1
2
3#include <asm/page.h>
4#include <linux/acpi.h>
5#include <linux/aer.h>
6#include <linux/bitmap.h>
7#include <linux/dma-mapping.h>
8#include <linux/idr.h>
9#include <linux/io.h>
10#include <linux/irqreturn.h>
11#include <linux/log2.h>
12#include <linux/pm_runtime.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/uacce.h>
16#include <linux/uaccess.h>
17#include <uapi/misc/uacce/hisi_qm.h>
18#include <linux/hisi_acc_qm.h>
19
20
21#define QM_VF_AEQ_INT_SOURCE 0x0
22#define QM_VF_AEQ_INT_MASK 0x4
23#define QM_VF_EQ_INT_SOURCE 0x8
24#define QM_VF_EQ_INT_MASK 0xc
25#define QM_IRQ_NUM_V1 1
26#define QM_IRQ_NUM_PF_V2 4
27#define QM_IRQ_NUM_VF_V2 2
28#define QM_IRQ_NUM_VF_V3 3
29
30#define QM_EQ_EVENT_IRQ_VECTOR 0
31#define QM_AEQ_EVENT_IRQ_VECTOR 1
32#define QM_CMD_EVENT_IRQ_VECTOR 2
33#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
34
35
36#define QM_MB_PING_ALL_VFS 0xffff
37#define QM_MB_CMD_DATA_SHIFT 32
38#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
39
40
41#define QM_SQ_HOP_NUM_SHIFT 0
42#define QM_SQ_PAGE_SIZE_SHIFT 4
43#define QM_SQ_BUF_SIZE_SHIFT 8
44#define QM_SQ_SQE_SIZE_SHIFT 12
45#define QM_SQ_PRIORITY_SHIFT 0
46#define QM_SQ_ORDERS_SHIFT 4
47#define QM_SQ_TYPE_SHIFT 8
48#define QM_QC_PASID_ENABLE 0x1
49#define QM_QC_PASID_ENABLE_SHIFT 7
50
51#define QM_SQ_TYPE_MASK GENMASK(3, 0)
52#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
53
54
55#define QM_CQ_HOP_NUM_SHIFT 0
56#define QM_CQ_PAGE_SIZE_SHIFT 4
57#define QM_CQ_BUF_SIZE_SHIFT 8
58#define QM_CQ_CQE_SIZE_SHIFT 12
59#define QM_CQ_PHASE_SHIFT 0
60#define QM_CQ_FLAG_SHIFT 1
61
62#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
63#define QM_QC_CQE_SIZE 4
64#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
65
66
67#define QM_EQE_AEQE_SIZE (2UL << 12)
68#define QM_EQC_PHASE_SHIFT 16
69
70#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
71#define QM_EQE_CQN_MASK GENMASK(15, 0)
72
73#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
74#define QM_AEQE_TYPE_SHIFT 17
75#define QM_AEQE_CQN_MASK GENMASK(15, 0)
76#define QM_CQ_OVERFLOW 0
77#define QM_EQ_OVERFLOW 1
78#define QM_CQE_ERROR 2
79
80#define QM_DOORBELL_CMD_SQ 0
81#define QM_DOORBELL_CMD_CQ 1
82#define QM_DOORBELL_CMD_EQ 2
83#define QM_DOORBELL_CMD_AEQ 3
84
85#define QM_DOORBELL_BASE_V1 0x340
86#define QM_DB_CMD_SHIFT_V1 16
87#define QM_DB_INDEX_SHIFT_V1 32
88#define QM_DB_PRIORITY_SHIFT_V1 48
89#define QM_QUE_ISO_CFG_V 0x0030
90#define QM_PAGE_SIZE 0x0034
91#define QM_QUE_ISO_EN 0x100154
92#define QM_CAPBILITY 0x100158
93#define QM_QP_NUN_MASK GENMASK(10, 0)
94#define QM_QP_DB_INTERVAL 0x10000
95
96#define QM_MEM_START_INIT 0x100040
97#define QM_MEM_INIT_DONE 0x100044
98#define QM_VFT_CFG_RDY 0x10006c
99#define QM_VFT_CFG_OP_WR 0x100058
100#define QM_VFT_CFG_TYPE 0x10005c
101#define QM_SQC_VFT 0x0
102#define QM_CQC_VFT 0x1
103#define QM_VFT_CFG 0x100060
104#define QM_VFT_CFG_OP_ENABLE 0x100054
105#define QM_PM_CTRL 0x100148
106#define QM_IDLE_DISABLE BIT(9)
107
108#define QM_VFT_CFG_DATA_L 0x100064
109#define QM_VFT_CFG_DATA_H 0x100068
110#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
111#define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
112#define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
113#define QM_SQC_VFT_START_SQN_SHIFT 28
114#define QM_SQC_VFT_VALID (1ULL << 44)
115#define QM_SQC_VFT_SQN_SHIFT 45
116#define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
117#define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
118#define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
119#define QM_CQC_VFT_VALID (1ULL << 28)
120
121#define QM_SQC_VFT_BASE_SHIFT_V2 28
122#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
123#define QM_SQC_VFT_NUM_SHIFT_V2 45
124#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
125
126#define QM_DFX_CNT_CLR_CE 0x100118
127
128#define QM_ABNORMAL_INT_SOURCE 0x100000
129#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0)
130#define QM_ABNORMAL_INT_MASK 0x100004
131#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
132#define QM_ABNORMAL_INT_STATUS 0x100008
133#define QM_ABNORMAL_INT_SET 0x10000c
134#define QM_ABNORMAL_INF00 0x100010
135#define QM_FIFO_OVERFLOW_TYPE 0xc0
136#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
137#define QM_FIFO_OVERFLOW_VF 0x3f
138#define QM_ABNORMAL_INF01 0x100014
139#define QM_DB_TIMEOUT_TYPE 0xc0
140#define QM_DB_TIMEOUT_TYPE_SHIFT 6
141#define QM_DB_TIMEOUT_VF 0x3f
142#define QM_RAS_CE_ENABLE 0x1000ec
143#define QM_RAS_FE_ENABLE 0x1000f0
144#define QM_RAS_NFE_ENABLE 0x1000f4
145#define QM_RAS_CE_THRESHOLD 0x1000f8
146#define QM_RAS_CE_TIMES_PER_IRQ 1
147#define QM_RAS_MSI_INT_SEL 0x1040f4
148#define QM_OOO_SHUTDOWN_SEL 0x1040f8
149
150#define QM_RESET_WAIT_TIMEOUT 400
151#define QM_PEH_VENDOR_ID 0x1000d8
152#define ACC_VENDOR_ID_VALUE 0x5a5a
153#define QM_PEH_DFX_INFO0 0x1000fc
154#define QM_PEH_DFX_INFO1 0x100100
155#define QM_PEH_DFX_MASK (BIT(0) | BIT(2))
156#define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16)
157#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
158#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
159#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
160#define ACC_MASTER_TRANS_RETURN_RW 3
161#define ACC_MASTER_TRANS_RETURN 0x300150
162#define ACC_MASTER_GLOBAL_CTRL 0x300000
163#define ACC_AM_CFG_PORT_WR_EN 0x30001c
164#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
165#define ACC_AM_ROB_ECC_INT_STS 0x300104
166#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
167#define QM_MSI_CAP_ENABLE BIT(16)
168
169
170#define QM_IFC_READY_STATUS 0x100128
171#define QM_IFC_C_STS_M 0x10012C
172#define QM_IFC_INT_SET_P 0x100130
173#define QM_IFC_INT_CFG 0x100134
174#define QM_IFC_INT_SOURCE_P 0x100138
175#define QM_IFC_INT_SOURCE_V 0x0020
176#define QM_IFC_INT_MASK 0x0024
177#define QM_IFC_INT_STATUS 0x0028
178#define QM_IFC_INT_SET_V 0x002C
179#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
180#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
181#define QM_IFC_INT_SOURCE_MASK BIT(0)
182#define QM_IFC_INT_DISABLE BIT(0)
183#define QM_IFC_INT_STATUS_MASK BIT(0)
184#define QM_IFC_INT_SET_MASK BIT(0)
185#define QM_WAIT_DST_ACK 10
186#define QM_MAX_PF_WAIT_COUNT 10
187#define QM_MAX_VF_WAIT_COUNT 40
188#define QM_VF_RESET_WAIT_US 20000
189#define QM_VF_RESET_WAIT_CNT 3000
190#define QM_VF_RESET_WAIT_TIMEOUT_US \
191 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
192
193#define QM_DFX_MB_CNT_VF 0x104010
194#define QM_DFX_DB_CNT_VF 0x104020
195#define QM_DFX_SQE_CNT_VF_SQN 0x104030
196#define QM_DFX_CQE_CNT_VF_CQN 0x104040
197#define QM_DFX_QN_SHIFT 16
198#define CURRENT_FUN_MASK GENMASK(5, 0)
199#define CURRENT_Q_MASK GENMASK(31, 16)
200
201#define POLL_PERIOD 10
202#define POLL_TIMEOUT 1000
203#define WAIT_PERIOD_US_MAX 200
204#define WAIT_PERIOD_US_MIN 100
205#define MAX_WAIT_COUNTS 1000
206#define QM_CACHE_WB_START 0x204
207#define QM_CACHE_WB_DONE 0x208
208
209#define PCI_BAR_2 2
210#define PCI_BAR_4 4
211#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
212#define QMC_ALIGN(sz) ALIGN(sz, 32)
213
214#define QM_DBG_READ_LEN 256
215#define QM_DBG_WRITE_LEN 1024
216#define QM_DBG_TMP_BUF_LEN 22
217#define QM_PCI_COMMAND_INVALID ~0
218#define QM_RESET_STOP_TX_OFFSET 1
219#define QM_RESET_STOP_RX_OFFSET 2
220
221#define WAIT_PERIOD 20
222#define REMOVE_WAIT_DELAY 10
223#define QM_SQE_ADDR_MASK GENMASK(7, 0)
224#define QM_EQ_DEPTH (1024 * 2)
225
226#define QM_DRIVER_REMOVING 0
227#define QM_RST_SCHED 1
228#define QM_RESETTING 2
229#define QM_QOS_PARAM_NUM 2
230#define QM_QOS_VAL_NUM 1
231#define QM_QOS_BDF_PARAM_NUM 4
232#define QM_QOS_MAX_VAL 1000
233#define QM_QOS_RATE 100
234#define QM_QOS_EXPAND_RATE 1000
235#define QM_SHAPER_CIR_B_MASK GENMASK(7, 0)
236#define QM_SHAPER_CIR_U_MASK GENMASK(10, 8)
237#define QM_SHAPER_CIR_S_MASK GENMASK(14, 11)
238#define QM_SHAPER_FACTOR_CIR_U_SHIFT 8
239#define QM_SHAPER_FACTOR_CIR_S_SHIFT 11
240#define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
241#define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
242#define QM_SHAPER_CBS_B 1
243#define QM_SHAPER_CBS_S 16
244#define QM_SHAPER_VFT_OFFSET 6
245#define WAIT_FOR_QOS_VF 100
246#define QM_QOS_MIN_ERROR_RATE 5
247#define QM_QOS_TYPICAL_NUM 8
248#define QM_SHAPER_MIN_CBS_S 8
249#define QM_QOS_TICK 0x300U
250#define QM_QOS_DIVISOR_CLK 0x1f40U
251#define QM_QOS_MAX_CIR_B 200
252#define QM_QOS_MIN_CIR_B 100
253#define QM_QOS_MAX_CIR_U 6
254#define QM_QOS_MAX_CIR_S 11
255#define QM_QOS_VAL_MAX_LEN 32
256
257#define QM_AUTOSUSPEND_DELAY 3000
258
259#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
260 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
261 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
262 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
263 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
264
265#define QM_MK_CQC_DW3_V2(cqe_sz) \
266 ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
267
268#define QM_MK_SQC_W13(priority, orders, alg_type) \
269 (((priority) << QM_SQ_PRIORITY_SHIFT) | \
270 ((orders) << QM_SQ_ORDERS_SHIFT) | \
271 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
272
273#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
274 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
275 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
276 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
277 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
278
279#define QM_MK_SQC_DW3_V2(sqe_sz) \
280 ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
281
282#define INIT_QC_COMMON(qc, base, pasid) do { \
283 (qc)->head = 0; \
284 (qc)->tail = 0; \
285 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
286 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
287 (qc)->dw3 = 0; \
288 (qc)->w8 = 0; \
289 (qc)->rsvd0 = 0; \
290 (qc)->pasid = cpu_to_le16(pasid); \
291 (qc)->w11 = 0; \
292 (qc)->rsvd1 = 0; \
293} while (0)
294
295enum vft_type {
296 SQC_VFT = 0,
297 CQC_VFT,
298 SHAPER_VFT,
299};
300
301enum acc_err_result {
302 ACC_ERR_NONE,
303 ACC_ERR_NEED_RESET,
304 ACC_ERR_RECOVERED,
305};
306
307enum qm_alg_type {
308 ALG_TYPE_0,
309 ALG_TYPE_1,
310};
311
312enum qm_mb_cmd {
313 QM_PF_FLR_PREPARE = 0x01,
314 QM_PF_SRST_PREPARE,
315 QM_PF_RESET_DONE,
316 QM_VF_PREPARE_DONE,
317 QM_VF_PREPARE_FAIL,
318 QM_VF_START_DONE,
319 QM_VF_START_FAIL,
320 QM_PF_SET_QOS,
321 QM_VF_GET_QOS,
322};
323
324struct qm_cqe {
325 __le32 rsvd0;
326 __le16 cmd_id;
327 __le16 rsvd1;
328 __le16 sq_head;
329 __le16 sq_num;
330 __le16 rsvd2;
331 __le16 w7;
332};
333
334struct qm_eqe {
335 __le32 dw0;
336};
337
338struct qm_aeqe {
339 __le32 dw0;
340};
341
342struct qm_sqc {
343 __le16 head;
344 __le16 tail;
345 __le32 base_l;
346 __le32 base_h;
347 __le32 dw3;
348 __le16 w8;
349 __le16 rsvd0;
350 __le16 pasid;
351 __le16 w11;
352 __le16 cq_num;
353 __le16 w13;
354 __le32 rsvd1;
355};
356
357struct qm_cqc {
358 __le16 head;
359 __le16 tail;
360 __le32 base_l;
361 __le32 base_h;
362 __le32 dw3;
363 __le16 w8;
364 __le16 rsvd0;
365 __le16 pasid;
366 __le16 w11;
367 __le32 dw6;
368 __le32 rsvd1;
369};
370
371struct qm_eqc {
372 __le16 head;
373 __le16 tail;
374 __le32 base_l;
375 __le32 base_h;
376 __le32 dw3;
377 __le32 rsvd[2];
378 __le32 dw6;
379};
380
381struct qm_aeqc {
382 __le16 head;
383 __le16 tail;
384 __le32 base_l;
385 __le32 base_h;
386 __le32 dw3;
387 __le32 rsvd[2];
388 __le32 dw6;
389};
390
391struct qm_mailbox {
392 __le16 w0;
393 __le16 queue_num;
394 __le32 base_l;
395 __le32 base_h;
396 __le32 rsvd;
397};
398
399struct qm_doorbell {
400 __le16 queue_num;
401 __le16 cmd;
402 __le16 index;
403 __le16 priority;
404};
405
406struct hisi_qm_resource {
407 struct hisi_qm *qm;
408 int distance;
409 struct list_head list;
410};
411
412struct hisi_qm_hw_ops {
413 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
414 void (*qm_db)(struct hisi_qm *qm, u16 qn,
415 u8 cmd, u16 index, u8 priority);
416 u32 (*get_irq_num)(struct hisi_qm *qm);
417 int (*debug_init)(struct hisi_qm *qm);
418 void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
419 void (*hw_error_uninit)(struct hisi_qm *qm);
420 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
421 int (*stop_qp)(struct hisi_qp *qp);
422 int (*set_msi)(struct hisi_qm *qm, bool set);
423 int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
424 int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
425};
426
427struct qm_dfx_item {
428 const char *name;
429 u32 offset;
430};
431
432static struct qm_dfx_item qm_dfx_files[] = {
433 {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
434 {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
435 {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
436 {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
437 {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
438};
439
440static const char * const qm_debug_file_name[] = {
441 [CURRENT_QM] = "current_qm",
442 [CURRENT_Q] = "current_q",
443 [CLEAR_ENABLE] = "clear_enable",
444};
445
446struct hisi_qm_hw_error {
447 u32 int_msk;
448 const char *msg;
449};
450
451static const struct hisi_qm_hw_error qm_hw_error[] = {
452 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
453 { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
454 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
455 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
456 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
457 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
458 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
459 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
460 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
461 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
462 { .int_msk = BIT(10), .msg = "qm_db_timeout" },
463 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
464 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
465 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
466 { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
467 { }
468};
469
470static const char * const qm_db_timeout[] = {
471 "sq", "cq", "eq", "aeq",
472};
473
474static const char * const qm_fifo_overflow[] = {
475 "cq", "eq", "aeq",
476};
477
478static const char * const qm_s[] = {
479 "init", "start", "close", "stop",
480};
481
482static const char * const qp_s[] = {
483 "none", "init", "start", "stop", "close",
484};
485
486struct qm_typical_qos_table {
487 u32 start;
488 u32 end;
489 u32 val;
490};
491
492
493static struct qm_typical_qos_table shaper_cir_s[] = {
494 {100, 100, 4},
495 {200, 200, 3},
496 {300, 500, 2},
497 {600, 1000, 1},
498 {1100, 100000, 0},
499};
500
501static struct qm_typical_qos_table shaper_cbs_s[] = {
502 {100, 200, 9},
503 {300, 500, 11},
504 {600, 1000, 12},
505 {1100, 10000, 16},
506 {10100, 25000, 17},
507 {25100, 50000, 18},
508 {50100, 100000, 19}
509};
510
511static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
512{
513 enum qm_state curr = atomic_read(&qm->status.flags);
514 bool avail = false;
515
516 switch (curr) {
517 case QM_INIT:
518 if (new == QM_START || new == QM_CLOSE)
519 avail = true;
520 break;
521 case QM_START:
522 if (new == QM_STOP)
523 avail = true;
524 break;
525 case QM_STOP:
526 if (new == QM_CLOSE || new == QM_START)
527 avail = true;
528 break;
529 default:
530 break;
531 }
532
533 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
534 qm_s[curr], qm_s[new]);
535
536 if (!avail)
537 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
538 qm_s[curr], qm_s[new]);
539
540 return avail;
541}
542
543static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
544 enum qp_state new)
545{
546 enum qm_state qm_curr = atomic_read(&qm->status.flags);
547 enum qp_state qp_curr = 0;
548 bool avail = false;
549
550 if (qp)
551 qp_curr = atomic_read(&qp->qp_status.flags);
552
553 switch (new) {
554 case QP_INIT:
555 if (qm_curr == QM_START || qm_curr == QM_INIT)
556 avail = true;
557 break;
558 case QP_START:
559 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
560 (qm_curr == QM_START && qp_curr == QP_STOP))
561 avail = true;
562 break;
563 case QP_STOP:
564 if ((qm_curr == QM_START && qp_curr == QP_START) ||
565 (qp_curr == QP_INIT))
566 avail = true;
567 break;
568 case QP_CLOSE:
569 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
570 (qm_curr == QM_START && qp_curr == QP_STOP) ||
571 (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
572 (qm_curr == QM_STOP && qp_curr == QP_INIT))
573 avail = true;
574 break;
575 default:
576 break;
577 }
578
579 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
580 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
581
582 if (!avail)
583 dev_warn(&qm->pdev->dev,
584 "Can not change qp state from %s to %s in QM %s\n",
585 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
586
587 return avail;
588}
589
590static u32 qm_get_hw_error_status(struct hisi_qm *qm)
591{
592 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
593}
594
595static u32 qm_get_dev_err_status(struct hisi_qm *qm)
596{
597 return qm->err_ini->get_dev_hw_err_status(qm);
598}
599
600
601static int qm_check_dev_error(struct hisi_qm *qm)
602{
603 u32 val, dev_val;
604
605 if (qm->fun_type == QM_HW_VF)
606 return 0;
607
608 val = qm_get_hw_error_status(qm);
609 dev_val = qm_get_dev_err_status(qm);
610
611 if (qm->ver < QM_HW_V3)
612 return (val & QM_ECC_MBIT) ||
613 (dev_val & qm->err_info.ecc_2bits_mask);
614
615 return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
616 (dev_val & (~qm->err_info.dev_ce_mask));
617}
618
619static int qm_wait_reset_finish(struct hisi_qm *qm)
620{
621 int delay = 0;
622
623
624 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
625 msleep(++delay);
626 if (delay > QM_RESET_WAIT_TIMEOUT)
627 return -EBUSY;
628 }
629
630 return 0;
631}
632
633static int qm_reset_prepare_ready(struct hisi_qm *qm)
634{
635 struct pci_dev *pdev = qm->pdev;
636 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
637
638
639
640
641
642 if (qm->ver < QM_HW_V3)
643 return qm_wait_reset_finish(pf_qm);
644
645 return qm_wait_reset_finish(qm);
646}
647
648static void qm_reset_bit_clear(struct hisi_qm *qm)
649{
650 struct pci_dev *pdev = qm->pdev;
651 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
652
653 if (qm->ver < QM_HW_V3)
654 clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
655
656 clear_bit(QM_RESETTING, &qm->misc_ctl);
657}
658
659static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
660 u64 base, u16 queue, bool op)
661{
662 mailbox->w0 = cpu_to_le16((cmd) |
663 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
664 (0x1 << QM_MB_BUSY_SHIFT));
665 mailbox->queue_num = cpu_to_le16(queue);
666 mailbox->base_l = cpu_to_le32(lower_32_bits(base));
667 mailbox->base_h = cpu_to_le32(upper_32_bits(base));
668 mailbox->rsvd = 0;
669}
670
671
672int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
673{
674 u32 val;
675
676 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
677 val, !((val >> QM_MB_BUSY_SHIFT) &
678 0x1), POLL_PERIOD, POLL_TIMEOUT);
679}
680EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
681
682
683static void qm_mb_write(struct hisi_qm *qm, const void *src)
684{
685 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
686 unsigned long tmp0 = 0, tmp1 = 0;
687
688 if (!IS_ENABLED(CONFIG_ARM64)) {
689 memcpy_toio(fun_base, src, 16);
690 wmb();
691 return;
692 }
693
694 asm volatile("ldp %0, %1, %3\n"
695 "stp %0, %1, %2\n"
696 "dsb sy\n"
697 : "=&r" (tmp0),
698 "=&r" (tmp1),
699 "+Q" (*((char __iomem *)fun_base))
700 : "Q" (*((char *)src))
701 : "memory");
702}
703
704static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
705{
706 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
707 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
708 goto mb_busy;
709 }
710
711 qm_mb_write(qm, mailbox);
712
713 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
714 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
715 goto mb_busy;
716 }
717
718 return 0;
719
720mb_busy:
721 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
722 return -EBUSY;
723}
724
725int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
726 bool op)
727{
728 struct qm_mailbox mailbox;
729 int ret;
730
731 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
732 queue, cmd, (unsigned long long)dma_addr);
733
734 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
735
736 mutex_lock(&qm->mailbox_lock);
737 ret = qm_mb_nolock(qm, &mailbox);
738 mutex_unlock(&qm->mailbox_lock);
739
740 return ret;
741}
742EXPORT_SYMBOL_GPL(hisi_qm_mb);
743
744static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
745{
746 u64 doorbell;
747
748 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
749 ((u64)index << QM_DB_INDEX_SHIFT_V1) |
750 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
751
752 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
753}
754
755static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
756{
757 void __iomem *io_base = qm->io_base;
758 u16 randata = 0;
759 u64 doorbell;
760
761 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
762 io_base = qm->db_io_base + (u64)qn * qm->db_interval +
763 QM_DOORBELL_SQ_CQ_BASE_V2;
764 else
765 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
766
767 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
768 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
769 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
770 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
771
772 writeq(doorbell, io_base);
773}
774
775static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
776{
777 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
778 qn, cmd, index);
779
780 qm->ops->qm_db(qm, qn, cmd, index, priority);
781}
782
783static void qm_disable_clock_gate(struct hisi_qm *qm)
784{
785 u32 val;
786
787
788 if (qm->ver < QM_HW_V3)
789 return;
790
791 val = readl(qm->io_base + QM_PM_CTRL);
792 val |= QM_IDLE_DISABLE;
793 writel(val, qm->io_base + QM_PM_CTRL);
794}
795
796static int qm_dev_mem_reset(struct hisi_qm *qm)
797{
798 u32 val;
799
800 writel(0x1, qm->io_base + QM_MEM_START_INIT);
801 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
802 val & BIT(0), POLL_PERIOD,
803 POLL_TIMEOUT);
804}
805
806static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
807{
808 return QM_IRQ_NUM_V1;
809}
810
811static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
812{
813 if (qm->fun_type == QM_HW_PF)
814 return QM_IRQ_NUM_PF_V2;
815 else
816 return QM_IRQ_NUM_VF_V2;
817}
818
819static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
820{
821 if (qm->fun_type == QM_HW_PF)
822 return QM_IRQ_NUM_PF_V2;
823
824 return QM_IRQ_NUM_VF_V3;
825}
826
827static int qm_pm_get_sync(struct hisi_qm *qm)
828{
829 struct device *dev = &qm->pdev->dev;
830 int ret;
831
832 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
833 return 0;
834
835 ret = pm_runtime_resume_and_get(dev);
836 if (ret < 0) {
837 dev_err(dev, "failed to get_sync(%d).\n", ret);
838 return ret;
839 }
840
841 return 0;
842}
843
844static void qm_pm_put_sync(struct hisi_qm *qm)
845{
846 struct device *dev = &qm->pdev->dev;
847
848 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
849 return;
850
851 pm_runtime_mark_last_busy(dev);
852 pm_runtime_put_autosuspend(dev);
853}
854
855static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
856{
857 u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
858
859 return &qm->qp_array[cqn];
860}
861
862static void qm_cq_head_update(struct hisi_qp *qp)
863{
864 if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
865 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
866 qp->qp_status.cq_head = 0;
867 } else {
868 qp->qp_status.cq_head++;
869 }
870}
871
872static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
873{
874 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
875 return;
876
877 if (qp->event_cb) {
878 qp->event_cb(qp);
879 return;
880 }
881
882 if (qp->req_cb) {
883 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
884
885 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
886 dma_rmb();
887 qp->req_cb(qp, qp->sqe + qm->sqe_size *
888 le16_to_cpu(cqe->sq_head));
889 qm_cq_head_update(qp);
890 cqe = qp->cqe + qp->qp_status.cq_head;
891 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
892 qp->qp_status.cq_head, 0);
893 atomic_dec(&qp->qp_status.used);
894 }
895
896
897 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
898 qp->qp_status.cq_head, 1);
899 }
900}
901
902static void qm_work_process(struct work_struct *work)
903{
904 struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
905 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
906 struct hisi_qp *qp;
907 int eqe_num = 0;
908
909 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
910 eqe_num++;
911 qp = qm_to_hisi_qp(qm, eqe);
912 qm_poll_qp(qp, qm);
913
914 if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
915 qm->status.eqc_phase = !qm->status.eqc_phase;
916 eqe = qm->eqe;
917 qm->status.eq_head = 0;
918 } else {
919 eqe++;
920 qm->status.eq_head++;
921 }
922
923 if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
924 eqe_num = 0;
925 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
926 }
927 }
928
929 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
930}
931
932static irqreturn_t do_qm_irq(int irq, void *data)
933{
934 struct hisi_qm *qm = (struct hisi_qm *)data;
935
936
937 if (qm->wq)
938 queue_work(qm->wq, &qm->work);
939 else
940 schedule_work(&qm->work);
941
942 return IRQ_HANDLED;
943}
944
945static irqreturn_t qm_irq(int irq, void *data)
946{
947 struct hisi_qm *qm = data;
948
949 if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
950 return do_qm_irq(irq, data);
951
952 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
953 dev_err(&qm->pdev->dev, "invalid int source\n");
954 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
955
956 return IRQ_NONE;
957}
958
959static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
960{
961 struct hisi_qm *qm = data;
962 u32 val;
963
964 val = readl(qm->io_base + QM_IFC_INT_STATUS);
965 val &= QM_IFC_INT_STATUS_MASK;
966 if (!val)
967 return IRQ_NONE;
968
969 schedule_work(&qm->cmd_process);
970
971 return IRQ_HANDLED;
972}
973
974static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
975{
976 u32 *addr;
977
978 if (qp->is_in_kernel)
979 return;
980
981 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
982 *addr = 1;
983
984
985 mb();
986}
987
988static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
989{
990 struct hisi_qp *qp = &qm->qp_array[qp_id];
991
992 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET);
993 hisi_qm_stop_qp(qp);
994 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET);
995}
996
997static void qm_reset_function(struct hisi_qm *qm)
998{
999 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
1000 struct device *dev = &qm->pdev->dev;
1001 int ret;
1002
1003 if (qm_check_dev_error(pf_qm))
1004 return;
1005
1006 ret = qm_reset_prepare_ready(qm);
1007 if (ret) {
1008 dev_err(dev, "reset function not ready\n");
1009 return;
1010 }
1011
1012 ret = hisi_qm_stop(qm, QM_FLR);
1013 if (ret) {
1014 dev_err(dev, "failed to stop qm when reset function\n");
1015 goto clear_bit;
1016 }
1017
1018 ret = hisi_qm_start(qm);
1019 if (ret)
1020 dev_err(dev, "failed to start qm when reset function\n");
1021
1022clear_bit:
1023 qm_reset_bit_clear(qm);
1024}
1025
1026static irqreturn_t qm_aeq_thread(int irq, void *data)
1027{
1028 struct hisi_qm *qm = data;
1029 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
1030 u32 type, qp_id;
1031
1032 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
1033 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
1034 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
1035
1036 switch (type) {
1037 case QM_EQ_OVERFLOW:
1038 dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
1039 qm_reset_function(qm);
1040 return IRQ_HANDLED;
1041 case QM_CQ_OVERFLOW:
1042 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
1043 qp_id);
1044 fallthrough;
1045 case QM_CQE_ERROR:
1046 qm_disable_qp(qm, qp_id);
1047 break;
1048 default:
1049 dev_err(&qm->pdev->dev, "unknown error type %u\n",
1050 type);
1051 break;
1052 }
1053
1054 if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
1055 qm->status.aeqc_phase = !qm->status.aeqc_phase;
1056 aeqe = qm->aeqe;
1057 qm->status.aeq_head = 0;
1058 } else {
1059 aeqe++;
1060 qm->status.aeq_head++;
1061 }
1062 }
1063
1064 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
1065
1066 return IRQ_HANDLED;
1067}
1068
1069static irqreturn_t qm_aeq_irq(int irq, void *data)
1070{
1071 struct hisi_qm *qm = data;
1072
1073 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
1074 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
1075 return IRQ_NONE;
1076
1077 return IRQ_WAKE_THREAD;
1078}
1079
1080static void qm_irq_unregister(struct hisi_qm *qm)
1081{
1082 struct pci_dev *pdev = qm->pdev;
1083
1084 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
1085
1086 if (qm->ver > QM_HW_V1) {
1087 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
1088
1089 if (qm->fun_type == QM_HW_PF)
1090 free_irq(pci_irq_vector(pdev,
1091 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
1092 }
1093
1094 if (qm->ver > QM_HW_V2)
1095 free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
1096}
1097
1098static void qm_init_qp_status(struct hisi_qp *qp)
1099{
1100 struct hisi_qp_status *qp_status = &qp->qp_status;
1101
1102 qp_status->sq_tail = 0;
1103 qp_status->cq_head = 0;
1104 qp_status->cqc_phase = true;
1105 atomic_set(&qp_status->used, 0);
1106}
1107
1108static void qm_init_prefetch(struct hisi_qm *qm)
1109{
1110 struct device *dev = &qm->pdev->dev;
1111 u32 page_type = 0x0;
1112
1113 if (qm->ver < QM_HW_V3)
1114 return;
1115
1116 switch (PAGE_SIZE) {
1117 case SZ_4K:
1118 page_type = 0x0;
1119 break;
1120 case SZ_16K:
1121 page_type = 0x1;
1122 break;
1123 case SZ_64K:
1124 page_type = 0x2;
1125 break;
1126 default:
1127 dev_err(dev, "system page size is not support: %lu, default set to 4KB",
1128 PAGE_SIZE);
1129 }
1130
1131 writel(page_type, qm->io_base + QM_PAGE_SIZE);
1132}
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
1145{
1146 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
1147 (QM_QOS_TICK * (1 << cir_s));
1148}
1149
1150static u32 acc_shaper_calc_cbs_s(u32 ir)
1151{
1152 int table_size = ARRAY_SIZE(shaper_cbs_s);
1153 int i;
1154
1155 for (i = 0; i < table_size; i++) {
1156 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end)
1157 return shaper_cbs_s[i].val;
1158 }
1159
1160 return QM_SHAPER_MIN_CBS_S;
1161}
1162
1163static u32 acc_shaper_calc_cir_s(u32 ir)
1164{
1165 int table_size = ARRAY_SIZE(shaper_cir_s);
1166 int i;
1167
1168 for (i = 0; i < table_size; i++) {
1169 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end)
1170 return shaper_cir_s[i].val;
1171 }
1172
1173 return 0;
1174}
1175
1176static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
1177{
1178 u32 cir_b, cir_u, cir_s, ir_calc;
1179 u32 error_rate;
1180
1181 factor->cbs_s = acc_shaper_calc_cbs_s(ir);
1182 cir_s = acc_shaper_calc_cir_s(ir);
1183
1184 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
1185 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
1186 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
1187
1188 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
1189 if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
1190 factor->cir_b = cir_b;
1191 factor->cir_u = cir_u;
1192 factor->cir_s = cir_s;
1193 return 0;
1194 }
1195 }
1196 }
1197
1198 return -EINVAL;
1199}
1200
1201static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
1202 u32 number, struct qm_shaper_factor *factor)
1203{
1204 u64 tmp = 0;
1205
1206 if (number > 0) {
1207 switch (type) {
1208 case SQC_VFT:
1209 if (qm->ver == QM_HW_V1) {
1210 tmp = QM_SQC_VFT_BUF_SIZE |
1211 QM_SQC_VFT_SQC_SIZE |
1212 QM_SQC_VFT_INDEX_NUMBER |
1213 QM_SQC_VFT_VALID |
1214 (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
1215 } else {
1216 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
1217 QM_SQC_VFT_VALID |
1218 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
1219 }
1220 break;
1221 case CQC_VFT:
1222 if (qm->ver == QM_HW_V1) {
1223 tmp = QM_CQC_VFT_BUF_SIZE |
1224 QM_CQC_VFT_SQC_SIZE |
1225 QM_CQC_VFT_INDEX_NUMBER |
1226 QM_CQC_VFT_VALID;
1227 } else {
1228 tmp = QM_CQC_VFT_VALID;
1229 }
1230 break;
1231 case SHAPER_VFT:
1232 if (qm->ver >= QM_HW_V3) {
1233 tmp = factor->cir_b |
1234 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
1235 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
1236 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
1237 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
1238 }
1239 break;
1240 }
1241 }
1242
1243 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
1244 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
1245}
1246
1247static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
1248 u32 fun_num, u32 base, u32 number)
1249{
1250 struct qm_shaper_factor *factor = &qm->factor[fun_num];
1251 unsigned int val;
1252 int ret;
1253
1254 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1255 val & BIT(0), POLL_PERIOD,
1256 POLL_TIMEOUT);
1257 if (ret)
1258 return ret;
1259
1260 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
1261 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
1262 if (type == SHAPER_VFT)
1263 fun_num |= base << QM_SHAPER_VFT_OFFSET;
1264
1265 writel(fun_num, qm->io_base + QM_VFT_CFG);
1266
1267 qm_vft_data_cfg(qm, type, base, number, factor);
1268
1269 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
1270 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
1271
1272 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1273 val & BIT(0), POLL_PERIOD,
1274 POLL_TIMEOUT);
1275}
1276
1277static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
1278{
1279 u32 qos = qm->factor[fun_num].func_qos;
1280 int ret, i;
1281
1282 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
1283 if (ret) {
1284 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
1285 return ret;
1286 }
1287 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
1288 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
1289
1290 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
1291 if (ret)
1292 return ret;
1293 }
1294
1295 return 0;
1296}
1297
1298
1299static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1300 u32 number)
1301{
1302 int ret, i;
1303
1304 for (i = SQC_VFT; i <= CQC_VFT; i++) {
1305 ret = qm_set_vft_common(qm, i, fun_num, base, number);
1306 if (ret)
1307 return ret;
1308 }
1309
1310
1311 if (qm->ver >= QM_HW_V3) {
1312 ret = qm_shaper_init_vft(qm, fun_num);
1313 if (ret)
1314 goto back_sqc_cqc;
1315 }
1316
1317 return 0;
1318back_sqc_cqc:
1319 for (i = SQC_VFT; i <= CQC_VFT; i++) {
1320 ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
1321 if (ret)
1322 return ret;
1323 }
1324 return ret;
1325}
1326
1327static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
1328{
1329 u64 sqc_vft;
1330 int ret;
1331
1332 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
1333 if (ret)
1334 return ret;
1335
1336 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1337 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1338 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
1339 *number = (QM_SQC_VFT_NUM_MASK_v2 &
1340 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
1341
1342 return 0;
1343}
1344
1345static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
1346{
1347 u32 remain_q_num, vfq_num;
1348 u32 num_vfs = qm->vfs_num;
1349
1350 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
1351 if (vfq_num >= qm->max_qp_num)
1352 return qm->max_qp_num;
1353
1354 remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
1355 if (vfq_num + remain_q_num <= qm->max_qp_num)
1356 return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
1357
1358
1359
1360
1361
1362 return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
1363}
1364
1365static struct hisi_qm *file_to_qm(struct debugfs_file *file)
1366{
1367 struct qm_debug *debug = file->debug;
1368
1369 return container_of(debug, struct hisi_qm, debug);
1370}
1371
1372static u32 current_q_read(struct hisi_qm *qm)
1373{
1374 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
1375}
1376
1377static int current_q_write(struct hisi_qm *qm, u32 val)
1378{
1379 u32 tmp;
1380
1381 if (val >= qm->debug.curr_qm_qp_num)
1382 return -EINVAL;
1383
1384 tmp = val << QM_DFX_QN_SHIFT |
1385 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
1386 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1387
1388 tmp = val << QM_DFX_QN_SHIFT |
1389 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
1390 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1391
1392 return 0;
1393}
1394
1395static u32 clear_enable_read(struct hisi_qm *qm)
1396{
1397 return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
1398}
1399
1400
1401static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
1402{
1403 if (rd_clr_ctrl > 1)
1404 return -EINVAL;
1405
1406 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
1407
1408 return 0;
1409}
1410
1411static u32 current_qm_read(struct hisi_qm *qm)
1412{
1413 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
1414}
1415
1416static int current_qm_write(struct hisi_qm *qm, u32 val)
1417{
1418 u32 tmp;
1419
1420 if (val > qm->vfs_num)
1421 return -EINVAL;
1422
1423
1424 if (!val)
1425 qm->debug.curr_qm_qp_num = qm->qp_num;
1426 else
1427 qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
1428
1429 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
1430 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
1431
1432 tmp = val |
1433 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
1434 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1435
1436 tmp = val |
1437 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
1438 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1439
1440 return 0;
1441}
1442
1443static ssize_t qm_debug_read(struct file *filp, char __user *buf,
1444 size_t count, loff_t *pos)
1445{
1446 struct debugfs_file *file = filp->private_data;
1447 enum qm_debug_file index = file->index;
1448 struct hisi_qm *qm = file_to_qm(file);
1449 char tbuf[QM_DBG_TMP_BUF_LEN];
1450 u32 val;
1451 int ret;
1452
1453 ret = hisi_qm_get_dfx_access(qm);
1454 if (ret)
1455 return ret;
1456
1457 mutex_lock(&file->lock);
1458 switch (index) {
1459 case CURRENT_QM:
1460 val = current_qm_read(qm);
1461 break;
1462 case CURRENT_Q:
1463 val = current_q_read(qm);
1464 break;
1465 case CLEAR_ENABLE:
1466 val = clear_enable_read(qm);
1467 break;
1468 default:
1469 goto err_input;
1470 }
1471 mutex_unlock(&file->lock);
1472
1473 hisi_qm_put_dfx_access(qm);
1474 ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
1475 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
1476
1477err_input:
1478 mutex_unlock(&file->lock);
1479 hisi_qm_put_dfx_access(qm);
1480 return -EINVAL;
1481}
1482
1483static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
1484 size_t count, loff_t *pos)
1485{
1486 struct debugfs_file *file = filp->private_data;
1487 enum qm_debug_file index = file->index;
1488 struct hisi_qm *qm = file_to_qm(file);
1489 unsigned long val;
1490 char tbuf[QM_DBG_TMP_BUF_LEN];
1491 int len, ret;
1492
1493 if (*pos != 0)
1494 return 0;
1495
1496 if (count >= QM_DBG_TMP_BUF_LEN)
1497 return -ENOSPC;
1498
1499 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
1500 count);
1501 if (len < 0)
1502 return len;
1503
1504 tbuf[len] = '\0';
1505 if (kstrtoul(tbuf, 0, &val))
1506 return -EFAULT;
1507
1508 ret = hisi_qm_get_dfx_access(qm);
1509 if (ret)
1510 return ret;
1511
1512 mutex_lock(&file->lock);
1513 switch (index) {
1514 case CURRENT_QM:
1515 ret = current_qm_write(qm, val);
1516 break;
1517 case CURRENT_Q:
1518 ret = current_q_write(qm, val);
1519 break;
1520 case CLEAR_ENABLE:
1521 ret = clear_enable_write(qm, val);
1522 break;
1523 default:
1524 ret = -EINVAL;
1525 }
1526 mutex_unlock(&file->lock);
1527
1528 hisi_qm_put_dfx_access(qm);
1529
1530 if (ret)
1531 return ret;
1532
1533 return count;
1534}
1535
1536static const struct file_operations qm_debug_fops = {
1537 .owner = THIS_MODULE,
1538 .open = simple_open,
1539 .read = qm_debug_read,
1540 .write = qm_debug_write,
1541};
1542
1543#define CNT_CYC_REGS_NUM 10
1544static const struct debugfs_reg32 qm_dfx_regs[] = {
1545
1546 {"QM_ECC_1BIT_CNT ", 0x104000ull},
1547 {"QM_ECC_MBIT_CNT ", 0x104008ull},
1548 {"QM_DFX_MB_CNT ", 0x104018ull},
1549 {"QM_DFX_DB_CNT ", 0x104028ull},
1550 {"QM_DFX_SQE_CNT ", 0x104038ull},
1551 {"QM_DFX_CQE_CNT ", 0x104048ull},
1552 {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
1553 {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
1554 {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
1555 {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
1556 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1557 {"QM_ECC_1BIT_INF ", 0x104004ull},
1558 {"QM_ECC_MBIT_INF ", 0x10400cull},
1559 {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
1560 {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
1561 {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
1562 {"QM_DFX_FF_ST0 ", 0x1040c8ull},
1563 {"QM_DFX_FF_ST1 ", 0x1040ccull},
1564 {"QM_DFX_FF_ST2 ", 0x1040d0ull},
1565 {"QM_DFX_FF_ST3 ", 0x1040d4ull},
1566 {"QM_DFX_FF_ST4 ", 0x1040d8ull},
1567 {"QM_DFX_FF_ST5 ", 0x1040dcull},
1568 {"QM_DFX_FF_ST6 ", 0x1040e0ull},
1569 {"QM_IN_IDLE_ST ", 0x1040e4ull},
1570};
1571
1572static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
1573 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1574};
1575
1576
1577
1578
1579
1580
1581
1582
1583void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
1584{
1585 struct pci_dev *pdev = to_pci_dev(regset->dev);
1586 struct hisi_qm *qm = pci_get_drvdata(pdev);
1587 const struct debugfs_reg32 *regs = regset->regs;
1588 int regs_len = regset->nregs;
1589 int i, ret;
1590 u32 val;
1591
1592 ret = hisi_qm_get_dfx_access(qm);
1593 if (ret)
1594 return;
1595
1596 for (i = 0; i < regs_len; i++) {
1597 val = readl(regset->base + regs[i].offset);
1598 seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
1599 }
1600
1601 hisi_qm_put_dfx_access(qm);
1602}
1603EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
1604
1605static int qm_regs_show(struct seq_file *s, void *unused)
1606{
1607 struct hisi_qm *qm = s->private;
1608 struct debugfs_regset32 regset;
1609
1610 if (qm->fun_type == QM_HW_PF) {
1611 regset.regs = qm_dfx_regs;
1612 regset.nregs = ARRAY_SIZE(qm_dfx_regs);
1613 } else {
1614 regset.regs = qm_vf_dfx_regs;
1615 regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
1616 }
1617
1618 regset.base = qm->io_base;
1619 regset.dev = &qm->pdev->dev;
1620
1621 hisi_qm_regs_dump(s, ®set);
1622
1623 return 0;
1624}
1625
1626DEFINE_SHOW_ATTRIBUTE(qm_regs);
1627
1628static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
1629 size_t count, loff_t *pos)
1630{
1631 char buf[QM_DBG_READ_LEN];
1632 int len;
1633
1634 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
1635 "Please echo help to cmd to get help information");
1636
1637 return simple_read_from_buffer(buffer, count, pos, buf, len);
1638}
1639
1640static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1641 dma_addr_t *dma_addr)
1642{
1643 struct device *dev = &qm->pdev->dev;
1644 void *ctx_addr;
1645
1646 ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1647 if (!ctx_addr)
1648 return ERR_PTR(-ENOMEM);
1649
1650 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1651 if (dma_mapping_error(dev, *dma_addr)) {
1652 dev_err(dev, "DMA mapping error!\n");
1653 kfree(ctx_addr);
1654 return ERR_PTR(-ENOMEM);
1655 }
1656
1657 return ctx_addr;
1658}
1659
1660static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1661 const void *ctx_addr, dma_addr_t *dma_addr)
1662{
1663 struct device *dev = &qm->pdev->dev;
1664
1665 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1666 kfree(ctx_addr);
1667}
1668
1669static int dump_show(struct hisi_qm *qm, void *info,
1670 unsigned int info_size, char *info_name)
1671{
1672 struct device *dev = &qm->pdev->dev;
1673 u8 *info_buf, *info_curr = info;
1674 u32 i;
1675#define BYTE_PER_DW 4
1676
1677 info_buf = kzalloc(info_size, GFP_KERNEL);
1678 if (!info_buf)
1679 return -ENOMEM;
1680
1681 for (i = 0; i < info_size; i++, info_curr++) {
1682 if (i % BYTE_PER_DW == 0)
1683 info_buf[i + 3UL] = *info_curr;
1684 else if (i % BYTE_PER_DW == 1)
1685 info_buf[i + 1UL] = *info_curr;
1686 else if (i % BYTE_PER_DW == 2)
1687 info_buf[i - 1] = *info_curr;
1688 else if (i % BYTE_PER_DW == 3)
1689 info_buf[i - 3] = *info_curr;
1690 }
1691
1692 dev_info(dev, "%s DUMP\n", info_name);
1693 for (i = 0; i < info_size; i += BYTE_PER_DW) {
1694 pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
1695 info_buf[i], info_buf[i + 1UL],
1696 info_buf[i + 2UL], info_buf[i + 3UL]);
1697 }
1698
1699 kfree(info_buf);
1700
1701 return 0;
1702}
1703
1704static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1705{
1706 return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1707}
1708
1709static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1710{
1711 return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1712}
1713
1714static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
1715{
1716 struct device *dev = &qm->pdev->dev;
1717 struct qm_sqc *sqc, *sqc_curr;
1718 dma_addr_t sqc_dma;
1719 u32 qp_id;
1720 int ret;
1721
1722 if (!s)
1723 return -EINVAL;
1724
1725 ret = kstrtou32(s, 0, &qp_id);
1726 if (ret || qp_id >= qm->qp_num) {
1727 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1728 return -EINVAL;
1729 }
1730
1731 sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
1732 if (IS_ERR(sqc))
1733 return PTR_ERR(sqc);
1734
1735 ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
1736 if (ret) {
1737 down_read(&qm->qps_lock);
1738 if (qm->sqc) {
1739 sqc_curr = qm->sqc + qp_id;
1740
1741 ret = dump_show(qm, sqc_curr, sizeof(*sqc),
1742 "SOFT SQC");
1743 if (ret)
1744 dev_info(dev, "Show soft sqc failed!\n");
1745 }
1746 up_read(&qm->qps_lock);
1747
1748 goto err_free_ctx;
1749 }
1750
1751 ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
1752 if (ret)
1753 dev_info(dev, "Show hw sqc failed!\n");
1754
1755err_free_ctx:
1756 qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
1757 return ret;
1758}
1759
1760static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
1761{
1762 struct device *dev = &qm->pdev->dev;
1763 struct qm_cqc *cqc, *cqc_curr;
1764 dma_addr_t cqc_dma;
1765 u32 qp_id;
1766 int ret;
1767
1768 if (!s)
1769 return -EINVAL;
1770
1771 ret = kstrtou32(s, 0, &qp_id);
1772 if (ret || qp_id >= qm->qp_num) {
1773 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1774 return -EINVAL;
1775 }
1776
1777 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
1778 if (IS_ERR(cqc))
1779 return PTR_ERR(cqc);
1780
1781 ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
1782 if (ret) {
1783 down_read(&qm->qps_lock);
1784 if (qm->cqc) {
1785 cqc_curr = qm->cqc + qp_id;
1786
1787 ret = dump_show(qm, cqc_curr, sizeof(*cqc),
1788 "SOFT CQC");
1789 if (ret)
1790 dev_info(dev, "Show soft cqc failed!\n");
1791 }
1792 up_read(&qm->qps_lock);
1793
1794 goto err_free_ctx;
1795 }
1796
1797 ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
1798 if (ret)
1799 dev_info(dev, "Show hw cqc failed!\n");
1800
1801err_free_ctx:
1802 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
1803 return ret;
1804}
1805
1806static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
1807 int cmd, char *name)
1808{
1809 struct device *dev = &qm->pdev->dev;
1810 dma_addr_t xeqc_dma;
1811 void *xeqc;
1812 int ret;
1813
1814 if (strsep(&s, " ")) {
1815 dev_err(dev, "Please do not input extra characters!\n");
1816 return -EINVAL;
1817 }
1818
1819 xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
1820 if (IS_ERR(xeqc))
1821 return PTR_ERR(xeqc);
1822
1823 ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
1824 if (ret)
1825 goto err_free_ctx;
1826
1827 ret = dump_show(qm, xeqc, size, name);
1828 if (ret)
1829 dev_info(dev, "Show hw %s failed!\n", name);
1830
1831err_free_ctx:
1832 qm_ctx_free(qm, size, xeqc, &xeqc_dma);
1833 return ret;
1834}
1835
1836static int q_dump_param_parse(struct hisi_qm *qm, char *s,
1837 u32 *e_id, u32 *q_id)
1838{
1839 struct device *dev = &qm->pdev->dev;
1840 unsigned int qp_num = qm->qp_num;
1841 char *presult;
1842 int ret;
1843
1844 presult = strsep(&s, " ");
1845 if (!presult) {
1846 dev_err(dev, "Please input qp number!\n");
1847 return -EINVAL;
1848 }
1849
1850 ret = kstrtou32(presult, 0, q_id);
1851 if (ret || *q_id >= qp_num) {
1852 dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
1853 return -EINVAL;
1854 }
1855
1856 presult = strsep(&s, " ");
1857 if (!presult) {
1858 dev_err(dev, "Please input sqe number!\n");
1859 return -EINVAL;
1860 }
1861
1862 ret = kstrtou32(presult, 0, e_id);
1863 if (ret || *e_id >= QM_Q_DEPTH) {
1864 dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
1865 return -EINVAL;
1866 }
1867
1868 if (strsep(&s, " ")) {
1869 dev_err(dev, "Please do not input extra characters!\n");
1870 return -EINVAL;
1871 }
1872
1873 return 0;
1874}
1875
1876static int qm_sq_dump(struct hisi_qm *qm, char *s)
1877{
1878 struct device *dev = &qm->pdev->dev;
1879 void *sqe, *sqe_curr;
1880 struct hisi_qp *qp;
1881 u32 qp_id, sqe_id;
1882 int ret;
1883
1884 ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
1885 if (ret)
1886 return ret;
1887
1888 sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
1889 if (!sqe)
1890 return -ENOMEM;
1891
1892 qp = &qm->qp_array[qp_id];
1893 memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
1894 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
1895 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
1896 qm->debug.sqe_mask_len);
1897
1898 ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
1899 if (ret)
1900 dev_info(dev, "Show sqe failed!\n");
1901
1902 kfree(sqe);
1903
1904 return ret;
1905}
1906
1907static int qm_cq_dump(struct hisi_qm *qm, char *s)
1908{
1909 struct device *dev = &qm->pdev->dev;
1910 struct qm_cqe *cqe_curr;
1911 struct hisi_qp *qp;
1912 u32 qp_id, cqe_id;
1913 int ret;
1914
1915 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
1916 if (ret)
1917 return ret;
1918
1919 qp = &qm->qp_array[qp_id];
1920 cqe_curr = qp->cqe + cqe_id;
1921 ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
1922 if (ret)
1923 dev_info(dev, "Show cqe failed!\n");
1924
1925 return ret;
1926}
1927
1928static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
1929 size_t size, char *name)
1930{
1931 struct device *dev = &qm->pdev->dev;
1932 void *xeqe;
1933 u32 xeqe_id;
1934 int ret;
1935
1936 if (!s)
1937 return -EINVAL;
1938
1939 ret = kstrtou32(s, 0, &xeqe_id);
1940 if (ret)
1941 return -EINVAL;
1942
1943 if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
1944 dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
1945 return -EINVAL;
1946 } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
1947 dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
1948 return -EINVAL;
1949 }
1950
1951 down_read(&qm->qps_lock);
1952
1953 if (qm->eqe && !strcmp(name, "EQE")) {
1954 xeqe = qm->eqe + xeqe_id;
1955 } else if (qm->aeqe && !strcmp(name, "AEQE")) {
1956 xeqe = qm->aeqe + xeqe_id;
1957 } else {
1958 ret = -EINVAL;
1959 goto err_unlock;
1960 }
1961
1962 ret = dump_show(qm, xeqe, size, name);
1963 if (ret)
1964 dev_info(dev, "Show %s failed!\n", name);
1965
1966err_unlock:
1967 up_read(&qm->qps_lock);
1968 return ret;
1969}
1970
1971static int qm_dbg_help(struct hisi_qm *qm, char *s)
1972{
1973 struct device *dev = &qm->pdev->dev;
1974
1975 if (strsep(&s, " ")) {
1976 dev_err(dev, "Please do not input extra characters!\n");
1977 return -EINVAL;
1978 }
1979
1980 dev_info(dev, "available commands:\n");
1981 dev_info(dev, "sqc <num>\n");
1982 dev_info(dev, "cqc <num>\n");
1983 dev_info(dev, "eqc\n");
1984 dev_info(dev, "aeqc\n");
1985 dev_info(dev, "sq <num> <e>\n");
1986 dev_info(dev, "cq <num> <e>\n");
1987 dev_info(dev, "eq <e>\n");
1988 dev_info(dev, "aeq <e>\n");
1989
1990 return 0;
1991}
1992
1993static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
1994{
1995 struct device *dev = &qm->pdev->dev;
1996 char *presult, *s, *s_tmp;
1997 int ret;
1998
1999 s = kstrdup(cmd_buf, GFP_KERNEL);
2000 if (!s)
2001 return -ENOMEM;
2002
2003 s_tmp = s;
2004 presult = strsep(&s, " ");
2005 if (!presult) {
2006 ret = -EINVAL;
2007 goto err_buffer_free;
2008 }
2009
2010 if (!strcmp(presult, "sqc"))
2011 ret = qm_sqc_dump(qm, s);
2012 else if (!strcmp(presult, "cqc"))
2013 ret = qm_cqc_dump(qm, s);
2014 else if (!strcmp(presult, "eqc"))
2015 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
2016 QM_MB_CMD_EQC, "EQC");
2017 else if (!strcmp(presult, "aeqc"))
2018 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
2019 QM_MB_CMD_AEQC, "AEQC");
2020 else if (!strcmp(presult, "sq"))
2021 ret = qm_sq_dump(qm, s);
2022 else if (!strcmp(presult, "cq"))
2023 ret = qm_cq_dump(qm, s);
2024 else if (!strcmp(presult, "eq"))
2025 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
2026 else if (!strcmp(presult, "aeq"))
2027 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
2028 else if (!strcmp(presult, "help"))
2029 ret = qm_dbg_help(qm, s);
2030 else
2031 ret = -EINVAL;
2032
2033 if (ret)
2034 dev_info(dev, "Please echo help\n");
2035
2036err_buffer_free:
2037 kfree(s_tmp);
2038
2039 return ret;
2040}
2041
2042static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
2043 size_t count, loff_t *pos)
2044{
2045 struct hisi_qm *qm = filp->private_data;
2046 char *cmd_buf, *cmd_buf_tmp;
2047 int ret;
2048
2049 if (*pos)
2050 return 0;
2051
2052 ret = hisi_qm_get_dfx_access(qm);
2053 if (ret)
2054 return ret;
2055
2056
2057 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
2058 return 0;
2059
2060 if (count > QM_DBG_WRITE_LEN) {
2061 ret = -ENOSPC;
2062 goto put_dfx_access;
2063 }
2064
2065 cmd_buf = memdup_user_nul(buffer, count);
2066 if (IS_ERR(cmd_buf)) {
2067 ret = PTR_ERR(cmd_buf);
2068 goto put_dfx_access;
2069 }
2070
2071 cmd_buf_tmp = strchr(cmd_buf, '\n');
2072 if (cmd_buf_tmp) {
2073 *cmd_buf_tmp = '\0';
2074 count = cmd_buf_tmp - cmd_buf + 1;
2075 }
2076
2077 ret = qm_cmd_write_dump(qm, cmd_buf);
2078 if (ret) {
2079 kfree(cmd_buf);
2080 goto put_dfx_access;
2081 }
2082
2083 kfree(cmd_buf);
2084
2085 ret = count;
2086
2087put_dfx_access:
2088 hisi_qm_put_dfx_access(qm);
2089 return ret;
2090}
2091
2092static const struct file_operations qm_cmd_fops = {
2093 .owner = THIS_MODULE,
2094 .open = simple_open,
2095 .read = qm_cmd_read,
2096 .write = qm_cmd_write,
2097};
2098
2099static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
2100 enum qm_debug_file index)
2101{
2102 struct debugfs_file *file = qm->debug.files + index;
2103
2104 debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
2105 &qm_debug_fops);
2106
2107 file->index = index;
2108 mutex_init(&file->lock);
2109 file->debug = &qm->debug;
2110}
2111
2112static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
2113{
2114 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
2115}
2116
2117static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
2118{
2119 qm->error_mask = ce | nfe | fe;
2120
2121 writel(QM_ABNORMAL_INT_SOURCE_CLR,
2122 qm->io_base + QM_ABNORMAL_INT_SOURCE);
2123
2124
2125 writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
2126 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
2127 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
2128 writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
2129}
2130
2131static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
2132{
2133 u32 irq_enable = ce | nfe | fe;
2134 u32 irq_unmask = ~irq_enable;
2135
2136 qm_hw_error_cfg(qm, ce, nfe, fe);
2137
2138 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
2139 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
2140}
2141
2142static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
2143{
2144 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
2145}
2146
2147static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
2148{
2149 u32 irq_enable = ce | nfe | fe;
2150 u32 irq_unmask = ~irq_enable;
2151
2152 qm_hw_error_cfg(qm, ce, nfe, fe);
2153
2154
2155 writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
2156
2157 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
2158 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
2159}
2160
2161static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
2162{
2163 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
2164
2165
2166 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
2167}
2168
2169static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
2170{
2171 const struct hisi_qm_hw_error *err;
2172 struct device *dev = &qm->pdev->dev;
2173 u32 reg_val, type, vf_num;
2174 int i;
2175
2176 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
2177 err = &qm_hw_error[i];
2178 if (!(err->int_msk & error_status))
2179 continue;
2180
2181 dev_err(dev, "%s [error status=0x%x] found\n",
2182 err->msg, err->int_msk);
2183
2184 if (err->int_msk & QM_DB_TIMEOUT) {
2185 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
2186 type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
2187 QM_DB_TIMEOUT_TYPE_SHIFT;
2188 vf_num = reg_val & QM_DB_TIMEOUT_VF;
2189 dev_err(dev, "qm %s doorbell timeout in function %u\n",
2190 qm_db_timeout[type], vf_num);
2191 } else if (err->int_msk & QM_OF_FIFO_OF) {
2192 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
2193 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
2194 QM_FIFO_OVERFLOW_TYPE_SHIFT;
2195 vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
2196
2197 if (type < ARRAY_SIZE(qm_fifo_overflow))
2198 dev_err(dev, "qm %s fifo overflow in function %u\n",
2199 qm_fifo_overflow[type], vf_num);
2200 else
2201 dev_err(dev, "unknown error type\n");
2202 }
2203 }
2204}
2205
2206static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
2207{
2208 u32 error_status, tmp, val;
2209
2210
2211 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
2212 error_status = qm->error_mask & tmp;
2213
2214 if (error_status) {
2215 if (error_status & QM_ECC_MBIT)
2216 qm->err_status.is_qm_ecc_mbit = true;
2217
2218 qm_log_hw_error(qm, error_status);
2219 val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
2220
2221 if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
2222 writel(error_status, qm->io_base +
2223 QM_ABNORMAL_INT_SOURCE);
2224 writel(qm->err_info.nfe,
2225 qm->io_base + QM_RAS_NFE_ENABLE);
2226 return ACC_ERR_RECOVERED;
2227 }
2228
2229 return ACC_ERR_NEED_RESET;
2230 }
2231
2232 return ACC_ERR_RECOVERED;
2233}
2234
2235static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
2236{
2237 struct qm_mailbox mailbox;
2238 int ret;
2239
2240 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
2241 mutex_lock(&qm->mailbox_lock);
2242 ret = qm_mb_nolock(qm, &mailbox);
2243 if (ret)
2244 goto err_unlock;
2245
2246 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
2247 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
2248
2249err_unlock:
2250 mutex_unlock(&qm->mailbox_lock);
2251 return ret;
2252}
2253
2254static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
2255{
2256 u32 val;
2257
2258 if (qm->fun_type == QM_HW_PF)
2259 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
2260
2261 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
2262 val |= QM_IFC_INT_SOURCE_MASK;
2263 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
2264}
2265
2266static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
2267{
2268 struct device *dev = &qm->pdev->dev;
2269 u32 cmd;
2270 u64 msg;
2271 int ret;
2272
2273 ret = qm_get_mb_cmd(qm, &msg, vf_id);
2274 if (ret) {
2275 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
2276 return;
2277 }
2278
2279 cmd = msg & QM_MB_CMD_DATA_MASK;
2280 switch (cmd) {
2281 case QM_VF_PREPARE_FAIL:
2282 dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
2283 break;
2284 case QM_VF_START_FAIL:
2285 dev_err(dev, "failed to start VF(%u)!\n", vf_id);
2286 break;
2287 case QM_VF_PREPARE_DONE:
2288 case QM_VF_START_DONE:
2289 break;
2290 default:
2291 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
2292 break;
2293 }
2294}
2295
2296static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
2297{
2298 struct device *dev = &qm->pdev->dev;
2299 u32 vfs_num = qm->vfs_num;
2300 int cnt = 0;
2301 int ret = 0;
2302 u64 val;
2303 u32 i;
2304
2305 if (!qm->vfs_num || qm->ver < QM_HW_V3)
2306 return 0;
2307
2308 while (true) {
2309 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
2310
2311 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
2312 break;
2313
2314 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
2315 ret = -EBUSY;
2316 break;
2317 }
2318
2319 msleep(QM_WAIT_DST_ACK);
2320 }
2321
2322
2323 for (i = 1; i <= vfs_num; i++) {
2324 if (val & BIT(i))
2325 qm_handle_vf_msg(qm, i);
2326 else
2327 dev_err(dev, "VF(%u) not ping PF!\n", i);
2328 }
2329
2330
2331 qm_clear_cmd_interrupt(qm, val);
2332
2333 return ret;
2334}
2335
2336static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
2337{
2338 u32 val;
2339
2340 val = readl(qm->io_base + QM_IFC_INT_CFG);
2341 val &= ~QM_IFC_SEND_ALL_VFS;
2342 val |= fun_num;
2343 writel(val, qm->io_base + QM_IFC_INT_CFG);
2344
2345 val = readl(qm->io_base + QM_IFC_INT_SET_P);
2346 val |= QM_IFC_INT_SET_MASK;
2347 writel(val, qm->io_base + QM_IFC_INT_SET_P);
2348}
2349
2350static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
2351{
2352 u32 val;
2353
2354 val = readl(qm->io_base + QM_IFC_INT_SET_V);
2355 val |= QM_IFC_INT_SET_MASK;
2356 writel(val, qm->io_base + QM_IFC_INT_SET_V);
2357}
2358
2359static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
2360{
2361 struct device *dev = &qm->pdev->dev;
2362 struct qm_mailbox mailbox;
2363 int cnt = 0;
2364 u64 val;
2365 int ret;
2366
2367 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
2368 mutex_lock(&qm->mailbox_lock);
2369 ret = qm_mb_nolock(qm, &mailbox);
2370 if (ret) {
2371 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
2372 goto err_unlock;
2373 }
2374
2375 qm_trigger_vf_interrupt(qm, fun_num);
2376 while (true) {
2377 msleep(QM_WAIT_DST_ACK);
2378 val = readq(qm->io_base + QM_IFC_READY_STATUS);
2379
2380 if (!(val & BIT(fun_num)))
2381 goto err_unlock;
2382
2383 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
2384 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
2385 ret = -ETIMEDOUT;
2386 break;
2387 }
2388 }
2389
2390err_unlock:
2391 mutex_unlock(&qm->mailbox_lock);
2392 return ret;
2393}
2394
2395static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
2396{
2397 struct device *dev = &qm->pdev->dev;
2398 u32 vfs_num = qm->vfs_num;
2399 struct qm_mailbox mailbox;
2400 u64 val = 0;
2401 int cnt = 0;
2402 int ret;
2403 u32 i;
2404
2405 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
2406 mutex_lock(&qm->mailbox_lock);
2407
2408 ret = qm_mb_nolock(qm, &mailbox);
2409 if (ret) {
2410 dev_err(dev, "failed to send command to VFs!\n");
2411 mutex_unlock(&qm->mailbox_lock);
2412 return ret;
2413 }
2414
2415 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
2416 while (true) {
2417 msleep(QM_WAIT_DST_ACK);
2418 val = readq(qm->io_base + QM_IFC_READY_STATUS);
2419
2420 if (!(val & GENMASK(vfs_num, 1))) {
2421 mutex_unlock(&qm->mailbox_lock);
2422 return 0;
2423 }
2424
2425 if (++cnt > QM_MAX_PF_WAIT_COUNT)
2426 break;
2427 }
2428
2429 mutex_unlock(&qm->mailbox_lock);
2430
2431
2432 for (i = 1; i <= vfs_num; i++) {
2433 if (val & BIT(i))
2434 dev_err(dev, "failed to get response from VF(%u)!\n", i);
2435 }
2436
2437 return -ETIMEDOUT;
2438}
2439
2440static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
2441{
2442 struct qm_mailbox mailbox;
2443 int cnt = 0;
2444 u32 val;
2445 int ret;
2446
2447 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
2448 mutex_lock(&qm->mailbox_lock);
2449 ret = qm_mb_nolock(qm, &mailbox);
2450 if (ret) {
2451 dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
2452 goto unlock;
2453 }
2454
2455 qm_trigger_pf_interrupt(qm);
2456
2457 while (true) {
2458 msleep(QM_WAIT_DST_ACK);
2459 val = readl(qm->io_base + QM_IFC_INT_SET_V);
2460 if (!(val & QM_IFC_INT_STATUS_MASK))
2461 break;
2462
2463 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
2464 ret = -ETIMEDOUT;
2465 break;
2466 }
2467 }
2468
2469unlock:
2470 mutex_unlock(&qm->mailbox_lock);
2471 return ret;
2472}
2473
2474static int qm_stop_qp(struct hisi_qp *qp)
2475{
2476 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
2477}
2478
2479static int qm_set_msi(struct hisi_qm *qm, bool set)
2480{
2481 struct pci_dev *pdev = qm->pdev;
2482
2483 if (set) {
2484 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
2485 0);
2486 } else {
2487 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
2488 ACC_PEH_MSI_DISABLE);
2489 if (qm->err_status.is_qm_ecc_mbit ||
2490 qm->err_status.is_dev_ecc_mbit)
2491 return 0;
2492
2493 mdelay(1);
2494 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
2495 return -EFAULT;
2496 }
2497
2498 return 0;
2499}
2500
2501static void qm_wait_msi_finish(struct hisi_qm *qm)
2502{
2503 struct pci_dev *pdev = qm->pdev;
2504 u32 cmd = ~0;
2505 int cnt = 0;
2506 u32 val;
2507 int ret;
2508
2509 while (true) {
2510 pci_read_config_dword(pdev, pdev->msi_cap +
2511 PCI_MSI_PENDING_64, &cmd);
2512 if (!cmd)
2513 break;
2514
2515 if (++cnt > MAX_WAIT_COUNTS) {
2516 pci_warn(pdev, "failed to empty MSI PENDING!\n");
2517 break;
2518 }
2519
2520 udelay(1);
2521 }
2522
2523 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
2524 val, !(val & QM_PEH_DFX_MASK),
2525 POLL_PERIOD, POLL_TIMEOUT);
2526 if (ret)
2527 pci_warn(pdev, "failed to empty PEH MSI!\n");
2528
2529 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
2530 val, !(val & QM_PEH_MSI_FINISH_MASK),
2531 POLL_PERIOD, POLL_TIMEOUT);
2532 if (ret)
2533 pci_warn(pdev, "failed to finish MSI operation!\n");
2534}
2535
2536static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
2537{
2538 struct pci_dev *pdev = qm->pdev;
2539 int ret = -ETIMEDOUT;
2540 u32 cmd, i;
2541
2542 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
2543 if (set)
2544 cmd |= QM_MSI_CAP_ENABLE;
2545 else
2546 cmd &= ~QM_MSI_CAP_ENABLE;
2547
2548 pci_write_config_dword(pdev, pdev->msi_cap, cmd);
2549 if (set) {
2550 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
2551 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
2552 if (cmd & QM_MSI_CAP_ENABLE)
2553 return 0;
2554
2555 udelay(1);
2556 }
2557 } else {
2558 udelay(WAIT_PERIOD_US_MIN);
2559 qm_wait_msi_finish(qm);
2560 ret = 0;
2561 }
2562
2563 return ret;
2564}
2565
2566static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
2567 .qm_db = qm_db_v1,
2568 .get_irq_num = qm_get_irq_num_v1,
2569 .hw_error_init = qm_hw_error_init_v1,
2570 .set_msi = qm_set_msi,
2571};
2572
2573static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
2574 .get_vft = qm_get_vft_v2,
2575 .qm_db = qm_db_v2,
2576 .get_irq_num = qm_get_irq_num_v2,
2577 .hw_error_init = qm_hw_error_init_v2,
2578 .hw_error_uninit = qm_hw_error_uninit_v2,
2579 .hw_error_handle = qm_hw_error_handle_v2,
2580 .set_msi = qm_set_msi,
2581};
2582
2583static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
2584 .get_vft = qm_get_vft_v2,
2585 .qm_db = qm_db_v2,
2586 .get_irq_num = qm_get_irq_num_v3,
2587 .hw_error_init = qm_hw_error_init_v3,
2588 .hw_error_uninit = qm_hw_error_uninit_v3,
2589 .hw_error_handle = qm_hw_error_handle_v2,
2590 .stop_qp = qm_stop_qp,
2591 .set_msi = qm_set_msi_v3,
2592 .ping_all_vfs = qm_ping_all_vfs,
2593 .ping_pf = qm_ping_pf,
2594};
2595
2596static void *qm_get_avail_sqe(struct hisi_qp *qp)
2597{
2598 struct hisi_qp_status *qp_status = &qp->qp_status;
2599 u16 sq_tail = qp_status->sq_tail;
2600
2601 if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
2602 return NULL;
2603
2604 return qp->sqe + sq_tail * qp->qm->sqe_size;
2605}
2606
2607static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
2608{
2609 u64 *addr;
2610
2611
2612 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
2613 *addr = 0;
2614}
2615
2616static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
2617{
2618 struct device *dev = &qm->pdev->dev;
2619 struct hisi_qp *qp;
2620 int qp_id;
2621
2622 if (!qm_qp_avail_state(qm, NULL, QP_INIT))
2623 return ERR_PTR(-EPERM);
2624
2625 if (qm->qp_in_used == qm->qp_num) {
2626 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2627 qm->qp_num);
2628 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2629 return ERR_PTR(-EBUSY);
2630 }
2631
2632 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
2633 if (qp_id < 0) {
2634 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2635 qm->qp_num);
2636 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2637 return ERR_PTR(-EBUSY);
2638 }
2639
2640 qp = &qm->qp_array[qp_id];
2641 hisi_qm_unset_hw_reset(qp);
2642 memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
2643
2644 qp->event_cb = NULL;
2645 qp->req_cb = NULL;
2646 qp->qp_id = qp_id;
2647 qp->alg_type = alg_type;
2648 qp->is_in_kernel = true;
2649 qm->qp_in_used++;
2650 atomic_set(&qp->qp_status.flags, QP_INIT);
2651
2652 return qp;
2653}
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
2664{
2665 struct hisi_qp *qp;
2666 int ret;
2667
2668 ret = qm_pm_get_sync(qm);
2669 if (ret)
2670 return ERR_PTR(ret);
2671
2672 down_write(&qm->qps_lock);
2673 qp = qm_create_qp_nolock(qm, alg_type);
2674 up_write(&qm->qps_lock);
2675
2676 if (IS_ERR(qp))
2677 qm_pm_put_sync(qm);
2678
2679 return qp;
2680}
2681EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
2682
2683
2684
2685
2686
2687
2688
2689void hisi_qm_release_qp(struct hisi_qp *qp)
2690{
2691 struct hisi_qm *qm = qp->qm;
2692
2693 down_write(&qm->qps_lock);
2694
2695 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
2696 up_write(&qm->qps_lock);
2697 return;
2698 }
2699
2700 qm->qp_in_used--;
2701 idr_remove(&qm->qp_idr, qp->qp_id);
2702
2703 up_write(&qm->qps_lock);
2704
2705 qm_pm_put_sync(qm);
2706}
2707EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
2708
2709static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2710{
2711 struct hisi_qm *qm = qp->qm;
2712 struct device *dev = &qm->pdev->dev;
2713 enum qm_hw_ver ver = qm->ver;
2714 struct qm_sqc *sqc;
2715 dma_addr_t sqc_dma;
2716 int ret;
2717
2718 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
2719 if (!sqc)
2720 return -ENOMEM;
2721
2722 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
2723 if (ver == QM_HW_V1) {
2724 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
2725 sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
2726 } else {
2727 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
2728 sqc->w8 = 0;
2729 }
2730 sqc->cq_num = cpu_to_le16(qp_id);
2731 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
2732
2733 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2734 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
2735 QM_QC_PASID_ENABLE_SHIFT);
2736
2737 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
2738 DMA_TO_DEVICE);
2739 if (dma_mapping_error(dev, sqc_dma)) {
2740 kfree(sqc);
2741 return -ENOMEM;
2742 }
2743
2744 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
2745 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
2746 kfree(sqc);
2747
2748 return ret;
2749}
2750
2751static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2752{
2753 struct hisi_qm *qm = qp->qm;
2754 struct device *dev = &qm->pdev->dev;
2755 enum qm_hw_ver ver = qm->ver;
2756 struct qm_cqc *cqc;
2757 dma_addr_t cqc_dma;
2758 int ret;
2759
2760 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
2761 if (!cqc)
2762 return -ENOMEM;
2763
2764 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
2765 if (ver == QM_HW_V1) {
2766 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
2767 QM_QC_CQE_SIZE));
2768 cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
2769 } else {
2770 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
2771 cqc->w8 = 0;
2772 }
2773 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
2774
2775 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2776 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
2777
2778 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
2779 DMA_TO_DEVICE);
2780 if (dma_mapping_error(dev, cqc_dma)) {
2781 kfree(cqc);
2782 return -ENOMEM;
2783 }
2784
2785 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
2786 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
2787 kfree(cqc);
2788
2789 return ret;
2790}
2791
2792static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2793{
2794 int ret;
2795
2796 qm_init_qp_status(qp);
2797
2798 ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
2799 if (ret)
2800 return ret;
2801
2802 return qm_cq_ctx_cfg(qp, qp_id, pasid);
2803}
2804
2805static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
2806{
2807 struct hisi_qm *qm = qp->qm;
2808 struct device *dev = &qm->pdev->dev;
2809 int qp_id = qp->qp_id;
2810 u32 pasid = arg;
2811 int ret;
2812
2813 if (!qm_qp_avail_state(qm, qp, QP_START))
2814 return -EPERM;
2815
2816 ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
2817 if (ret)
2818 return ret;
2819
2820 atomic_set(&qp->qp_status.flags, QP_START);
2821 dev_dbg(dev, "queue %d started\n", qp_id);
2822
2823 return 0;
2824}
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
2835{
2836 struct hisi_qm *qm = qp->qm;
2837 int ret;
2838
2839 down_write(&qm->qps_lock);
2840 ret = qm_start_qp_nolock(qp, arg);
2841 up_write(&qm->qps_lock);
2842
2843 return ret;
2844}
2845EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
2846
2847
2848
2849
2850
2851
2852
2853static void qp_stop_fail_cb(struct hisi_qp *qp)
2854{
2855 int qp_used = atomic_read(&qp->qp_status.used);
2856 u16 cur_tail = qp->qp_status.sq_tail;
2857 u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
2858 struct hisi_qm *qm = qp->qm;
2859 u16 pos;
2860 int i;
2861
2862 for (i = 0; i < qp_used; i++) {
2863 pos = (i + cur_head) % QM_Q_DEPTH;
2864 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
2865 atomic_dec(&qp->qp_status.used);
2866 }
2867}
2868
2869
2870
2871
2872
2873
2874
2875
2876static int qm_drain_qp(struct hisi_qp *qp)
2877{
2878 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
2879 struct hisi_qm *qm = qp->qm;
2880 struct device *dev = &qm->pdev->dev;
2881 struct qm_sqc *sqc;
2882 struct qm_cqc *cqc;
2883 dma_addr_t dma_addr;
2884 int ret = 0, i = 0;
2885 void *addr;
2886
2887
2888 if (qm_check_dev_error(qm))
2889 return 0;
2890
2891
2892 if (qm->ops->stop_qp) {
2893 ret = qm->ops->stop_qp(qp);
2894 if (ret)
2895 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
2896 return ret;
2897 }
2898
2899 addr = qm_ctx_alloc(qm, size, &dma_addr);
2900 if (IS_ERR(addr)) {
2901 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
2902 return -ENOMEM;
2903 }
2904
2905 while (++i) {
2906 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
2907 if (ret) {
2908 dev_err_ratelimited(dev, "Failed to dump sqc!\n");
2909 break;
2910 }
2911 sqc = addr;
2912
2913 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
2914 qp->qp_id);
2915 if (ret) {
2916 dev_err_ratelimited(dev, "Failed to dump cqc!\n");
2917 break;
2918 }
2919 cqc = addr + sizeof(struct qm_sqc);
2920
2921 if ((sqc->tail == cqc->tail) &&
2922 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
2923 break;
2924
2925 if (i == MAX_WAIT_COUNTS) {
2926 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
2927 ret = -EBUSY;
2928 break;
2929 }
2930
2931 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
2932 }
2933
2934 qm_ctx_free(qm, size, addr, &dma_addr);
2935
2936 return ret;
2937}
2938
2939static int qm_stop_qp_nolock(struct hisi_qp *qp)
2940{
2941 struct device *dev = &qp->qm->pdev->dev;
2942 int ret;
2943
2944
2945
2946
2947
2948
2949
2950 if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
2951 qp->is_resetting = false;
2952 return 0;
2953 }
2954
2955 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
2956 return -EPERM;
2957
2958 atomic_set(&qp->qp_status.flags, QP_STOP);
2959
2960 ret = qm_drain_qp(qp);
2961 if (ret)
2962 dev_err(dev, "Failed to drain out data for stopping!\n");
2963
2964 if (qp->qm->wq)
2965 flush_workqueue(qp->qm->wq);
2966 else
2967 flush_work(&qp->qm->work);
2968
2969 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
2970 qp_stop_fail_cb(qp);
2971
2972 dev_dbg(dev, "stop queue %u!", qp->qp_id);
2973
2974 return 0;
2975}
2976
2977
2978
2979
2980
2981
2982
2983int hisi_qm_stop_qp(struct hisi_qp *qp)
2984{
2985 int ret;
2986
2987 down_write(&qp->qm->qps_lock);
2988 ret = qm_stop_qp_nolock(qp);
2989 up_write(&qp->qm->qps_lock);
2990
2991 return ret;
2992}
2993EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010int hisi_qp_send(struct hisi_qp *qp, const void *msg)
3011{
3012 struct hisi_qp_status *qp_status = &qp->qp_status;
3013 u16 sq_tail = qp_status->sq_tail;
3014 u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
3015 void *sqe = qm_get_avail_sqe(qp);
3016
3017 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
3018 atomic_read(&qp->qm->status.flags) == QM_STOP ||
3019 qp->is_resetting)) {
3020 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
3021 return -EAGAIN;
3022 }
3023
3024 if (!sqe)
3025 return -EBUSY;
3026
3027 memcpy(sqe, msg, qp->qm->sqe_size);
3028
3029 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
3030 atomic_inc(&qp->qp_status.used);
3031 qp_status->sq_tail = sq_tail_next;
3032
3033 return 0;
3034}
3035EXPORT_SYMBOL_GPL(hisi_qp_send);
3036
3037static void hisi_qm_cache_wb(struct hisi_qm *qm)
3038{
3039 unsigned int val;
3040
3041 if (qm->ver == QM_HW_V1)
3042 return;
3043
3044 writel(0x1, qm->io_base + QM_CACHE_WB_START);
3045 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
3046 val, val & BIT(0), POLL_PERIOD,
3047 POLL_TIMEOUT))
3048 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
3049}
3050
3051static void qm_qp_event_notifier(struct hisi_qp *qp)
3052{
3053 wake_up_interruptible(&qp->uacce_q->wait);
3054}
3055
3056static int hisi_qm_get_available_instances(struct uacce_device *uacce)
3057{
3058 return hisi_qm_get_free_qp_num(uacce->priv);
3059}
3060
3061static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
3062{
3063 int i;
3064
3065 for (i = 0; i < qm->qp_num; i++)
3066 qm_set_qp_disable(&qm->qp_array[i], offset);
3067}
3068
3069static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
3070 unsigned long arg,
3071 struct uacce_queue *q)
3072{
3073 struct hisi_qm *qm = uacce->priv;
3074 struct hisi_qp *qp;
3075 u8 alg_type = 0;
3076
3077 qp = hisi_qm_create_qp(qm, alg_type);
3078 if (IS_ERR(qp))
3079 return PTR_ERR(qp);
3080
3081 q->priv = qp;
3082 q->uacce = uacce;
3083 qp->uacce_q = q;
3084 qp->event_cb = qm_qp_event_notifier;
3085 qp->pasid = arg;
3086 qp->is_in_kernel = false;
3087
3088 return 0;
3089}
3090
3091static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
3092{
3093 struct hisi_qp *qp = q->priv;
3094
3095 hisi_qm_cache_wb(qp->qm);
3096 hisi_qm_release_qp(qp);
3097}
3098
3099
3100static int hisi_qm_uacce_mmap(struct uacce_queue *q,
3101 struct vm_area_struct *vma,
3102 struct uacce_qfile_region *qfr)
3103{
3104 struct hisi_qp *qp = q->priv;
3105 struct hisi_qm *qm = qp->qm;
3106 resource_size_t phys_base = qm->db_phys_base +
3107 qp->qp_id * qm->db_interval;
3108 size_t sz = vma->vm_end - vma->vm_start;
3109 struct pci_dev *pdev = qm->pdev;
3110 struct device *dev = &pdev->dev;
3111 unsigned long vm_pgoff;
3112 int ret;
3113
3114 switch (qfr->type) {
3115 case UACCE_QFRT_MMIO:
3116 if (qm->ver == QM_HW_V1) {
3117 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
3118 return -EINVAL;
3119 } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
3120 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
3121 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
3122 return -EINVAL;
3123 } else {
3124 if (sz > qm->db_interval)
3125 return -EINVAL;
3126 }
3127
3128 vma->vm_flags |= VM_IO;
3129
3130 return remap_pfn_range(vma, vma->vm_start,
3131 phys_base >> PAGE_SHIFT,
3132 sz, pgprot_noncached(vma->vm_page_prot));
3133 case UACCE_QFRT_DUS:
3134 if (sz != qp->qdma.size)
3135 return -EINVAL;
3136
3137
3138
3139
3140
3141 vm_pgoff = vma->vm_pgoff;
3142 vma->vm_pgoff = 0;
3143 ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
3144 qp->qdma.dma, sz);
3145 vma->vm_pgoff = vm_pgoff;
3146 return ret;
3147
3148 default:
3149 return -EINVAL;
3150 }
3151}
3152
3153static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
3154{
3155 struct hisi_qp *qp = q->priv;
3156
3157 return hisi_qm_start_qp(qp, qp->pasid);
3158}
3159
3160static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
3161{
3162 hisi_qm_stop_qp(q->priv);
3163}
3164
3165static int hisi_qm_is_q_updated(struct uacce_queue *q)
3166{
3167 struct hisi_qp *qp = q->priv;
3168 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
3169 int updated = 0;
3170
3171 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
3172
3173 dma_rmb();
3174 qm_cq_head_update(qp);
3175 cqe = qp->cqe + qp->qp_status.cq_head;
3176 updated = 1;
3177 }
3178
3179 return updated;
3180}
3181
3182static void qm_set_sqctype(struct uacce_queue *q, u16 type)
3183{
3184 struct hisi_qm *qm = q->uacce->priv;
3185 struct hisi_qp *qp = q->priv;
3186
3187 down_write(&qm->qps_lock);
3188 qp->alg_type = type;
3189 up_write(&qm->qps_lock);
3190}
3191
3192static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
3193 unsigned long arg)
3194{
3195 struct hisi_qp *qp = q->priv;
3196 struct hisi_qp_ctx qp_ctx;
3197
3198 if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
3199 if (copy_from_user(&qp_ctx, (void __user *)arg,
3200 sizeof(struct hisi_qp_ctx)))
3201 return -EFAULT;
3202
3203 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
3204 return -EINVAL;
3205
3206 qm_set_sqctype(q, qp_ctx.qc_type);
3207 qp_ctx.id = qp->qp_id;
3208
3209 if (copy_to_user((void __user *)arg, &qp_ctx,
3210 sizeof(struct hisi_qp_ctx)))
3211 return -EFAULT;
3212 } else {
3213 return -EINVAL;
3214 }
3215
3216 return 0;
3217}
3218
3219static const struct uacce_ops uacce_qm_ops = {
3220 .get_available_instances = hisi_qm_get_available_instances,
3221 .get_queue = hisi_qm_uacce_get_queue,
3222 .put_queue = hisi_qm_uacce_put_queue,
3223 .start_queue = hisi_qm_uacce_start_queue,
3224 .stop_queue = hisi_qm_uacce_stop_queue,
3225 .mmap = hisi_qm_uacce_mmap,
3226 .ioctl = hisi_qm_uacce_ioctl,
3227 .is_q_updated = hisi_qm_is_q_updated,
3228};
3229
3230static int qm_alloc_uacce(struct hisi_qm *qm)
3231{
3232 struct pci_dev *pdev = qm->pdev;
3233 struct uacce_device *uacce;
3234 unsigned long mmio_page_nr;
3235 unsigned long dus_page_nr;
3236 struct uacce_interface interface = {
3237 .flags = UACCE_DEV_SVA,
3238 .ops = &uacce_qm_ops,
3239 };
3240 int ret;
3241
3242 ret = strscpy(interface.name, dev_driver_string(&pdev->dev),
3243 sizeof(interface.name));
3244 if (ret < 0)
3245 return -ENAMETOOLONG;
3246
3247 uacce = uacce_alloc(&pdev->dev, &interface);
3248 if (IS_ERR(uacce))
3249 return PTR_ERR(uacce);
3250
3251 if (uacce->flags & UACCE_DEV_SVA) {
3252 qm->use_sva = true;
3253 } else {
3254
3255 uacce_remove(uacce);
3256 qm->uacce = NULL;
3257 return -EINVAL;
3258 }
3259
3260 uacce->is_vf = pdev->is_virtfn;
3261 uacce->priv = qm;
3262 uacce->algs = qm->algs;
3263
3264 if (qm->ver == QM_HW_V1)
3265 uacce->api_ver = HISI_QM_API_VER_BASE;
3266 else if (qm->ver == QM_HW_V2)
3267 uacce->api_ver = HISI_QM_API_VER2_BASE;
3268 else
3269 uacce->api_ver = HISI_QM_API_VER3_BASE;
3270
3271 if (qm->ver == QM_HW_V1)
3272 mmio_page_nr = QM_DOORBELL_PAGE_NR;
3273 else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
3274 mmio_page_nr = QM_DOORBELL_PAGE_NR +
3275 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
3276 else
3277 mmio_page_nr = qm->db_interval / PAGE_SIZE;
3278
3279
3280 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
3281 sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >>
3282 PAGE_SHIFT;
3283
3284 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
3285 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
3286
3287 qm->uacce = uacce;
3288
3289 return 0;
3290}
3291
3292
3293
3294
3295
3296
3297
3298
3299static int qm_frozen(struct hisi_qm *qm)
3300{
3301 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
3302 return 0;
3303
3304 down_write(&qm->qps_lock);
3305
3306 if (!qm->qp_in_used) {
3307 qm->qp_in_used = qm->qp_num;
3308 up_write(&qm->qps_lock);
3309 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
3310 return 0;
3311 }
3312
3313 up_write(&qm->qps_lock);
3314
3315 return -EBUSY;
3316}
3317
3318static int qm_try_frozen_vfs(struct pci_dev *pdev,
3319 struct hisi_qm_list *qm_list)
3320{
3321 struct hisi_qm *qm, *vf_qm;
3322 struct pci_dev *dev;
3323 int ret = 0;
3324
3325 if (!qm_list || !pdev)
3326 return -EINVAL;
3327
3328
3329 mutex_lock(&qm_list->lock);
3330 list_for_each_entry(qm, &qm_list->list, list) {
3331 dev = qm->pdev;
3332 if (dev == pdev)
3333 continue;
3334 if (pci_physfn(dev) == pdev) {
3335 vf_qm = pci_get_drvdata(dev);
3336 ret = qm_frozen(vf_qm);
3337 if (ret)
3338 goto frozen_fail;
3339 }
3340 }
3341
3342frozen_fail:
3343 mutex_unlock(&qm_list->lock);
3344
3345 return ret;
3346}
3347
3348
3349
3350
3351
3352
3353
3354void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3355{
3356 while (qm_frozen(qm) ||
3357 ((qm->fun_type == QM_HW_PF) &&
3358 qm_try_frozen_vfs(qm->pdev, qm_list))) {
3359 msleep(WAIT_PERIOD);
3360 }
3361
3362 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
3363 test_bit(QM_RESETTING, &qm->misc_ctl))
3364 msleep(WAIT_PERIOD);
3365
3366 udelay(REMOVE_WAIT_DELAY);
3367}
3368EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
3369
3370
3371
3372
3373
3374
3375
3376int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
3377{
3378 int ret;
3379
3380 down_read(&qm->qps_lock);
3381 ret = qm->qp_num - qm->qp_in_used;
3382 up_read(&qm->qps_lock);
3383
3384 return ret;
3385}
3386EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
3387
3388static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
3389{
3390 struct device *dev = &qm->pdev->dev;
3391 struct qm_dma *qdma;
3392 int i;
3393
3394 for (i = num - 1; i >= 0; i--) {
3395 qdma = &qm->qp_array[i].qdma;
3396 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
3397 }
3398
3399 kfree(qm->qp_array);
3400}
3401
3402static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
3403{
3404 struct device *dev = &qm->pdev->dev;
3405 size_t off = qm->sqe_size * QM_Q_DEPTH;
3406 struct hisi_qp *qp;
3407
3408 qp = &qm->qp_array[id];
3409 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
3410 GFP_KERNEL);
3411 if (!qp->qdma.va)
3412 return -ENOMEM;
3413
3414 qp->sqe = qp->qdma.va;
3415 qp->sqe_dma = qp->qdma.dma;
3416 qp->cqe = qp->qdma.va + off;
3417 qp->cqe_dma = qp->qdma.dma + off;
3418 qp->qdma.size = dma_size;
3419 qp->qm = qm;
3420 qp->qp_id = id;
3421
3422 return 0;
3423}
3424
3425static void hisi_qm_pre_init(struct hisi_qm *qm)
3426{
3427 struct pci_dev *pdev = qm->pdev;
3428
3429 if (qm->ver == QM_HW_V1)
3430 qm->ops = &qm_hw_ops_v1;
3431 else if (qm->ver == QM_HW_V2)
3432 qm->ops = &qm_hw_ops_v2;
3433 else
3434 qm->ops = &qm_hw_ops_v3;
3435
3436 pci_set_drvdata(pdev, qm);
3437 mutex_init(&qm->mailbox_lock);
3438 init_rwsem(&qm->qps_lock);
3439 qm->qp_in_used = 0;
3440 qm->misc_ctl = false;
3441 if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
3442 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
3443 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
3444 }
3445}
3446
3447static void qm_cmd_uninit(struct hisi_qm *qm)
3448{
3449 u32 val;
3450
3451 if (qm->ver < QM_HW_V3)
3452 return;
3453
3454 val = readl(qm->io_base + QM_IFC_INT_MASK);
3455 val |= QM_IFC_INT_DISABLE;
3456 writel(val, qm->io_base + QM_IFC_INT_MASK);
3457}
3458
3459static void qm_cmd_init(struct hisi_qm *qm)
3460{
3461 u32 val;
3462
3463 if (qm->ver < QM_HW_V3)
3464 return;
3465
3466
3467 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
3468
3469
3470 val = readl(qm->io_base + QM_IFC_INT_MASK);
3471 val &= ~QM_IFC_INT_DISABLE;
3472 writel(val, qm->io_base + QM_IFC_INT_MASK);
3473}
3474
3475static void qm_put_pci_res(struct hisi_qm *qm)
3476{
3477 struct pci_dev *pdev = qm->pdev;
3478
3479 if (qm->use_db_isolation)
3480 iounmap(qm->db_io_base);
3481
3482 iounmap(qm->io_base);
3483 pci_release_mem_regions(pdev);
3484}
3485
3486static void hisi_qm_pci_uninit(struct hisi_qm *qm)
3487{
3488 struct pci_dev *pdev = qm->pdev;
3489
3490 pci_free_irq_vectors(pdev);
3491 qm_put_pci_res(qm);
3492 pci_disable_device(pdev);
3493}
3494
3495static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
3496{
3497 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF)
3498 writel(state, qm->io_base + QM_VF_STATE);
3499}
3500
3501
3502
3503
3504
3505
3506
3507void hisi_qm_uninit(struct hisi_qm *qm)
3508{
3509 struct pci_dev *pdev = qm->pdev;
3510 struct device *dev = &pdev->dev;
3511
3512 qm_cmd_uninit(qm);
3513 kfree(qm->factor);
3514 down_write(&qm->qps_lock);
3515
3516 if (!qm_avail_state(qm, QM_CLOSE)) {
3517 up_write(&qm->qps_lock);
3518 return;
3519 }
3520
3521 hisi_qp_memory_uninit(qm, qm->qp_num);
3522 idr_destroy(&qm->qp_idr);
3523
3524 if (qm->qdma.va) {
3525 hisi_qm_cache_wb(qm);
3526 dma_free_coherent(dev, qm->qdma.size,
3527 qm->qdma.va, qm->qdma.dma);
3528 }
3529 hisi_qm_set_state(qm, QM_NOT_READY);
3530 up_write(&qm->qps_lock);
3531
3532 qm_irq_unregister(qm);
3533 hisi_qm_pci_uninit(qm);
3534 if (qm->use_sva) {
3535 uacce_remove(qm->uacce);
3536 qm->uacce = NULL;
3537 }
3538}
3539EXPORT_SYMBOL_GPL(hisi_qm_uninit);
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
3554{
3555 if (!base || !number)
3556 return -EINVAL;
3557
3558 if (!qm->ops->get_vft) {
3559 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
3560 return -EINVAL;
3561 }
3562
3563 return qm->ops->get_vft(qm, base, number);
3564}
3565EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
3582 u32 number)
3583{
3584 u32 max_q_num = qm->ctrl_qp_num;
3585
3586 if (base >= max_q_num || number > max_q_num ||
3587 (base + number) > max_q_num)
3588 return -EINVAL;
3589
3590 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
3591}
3592
3593static void qm_init_eq_aeq_status(struct hisi_qm *qm)
3594{
3595 struct hisi_qm_status *status = &qm->status;
3596
3597 status->eq_head = 0;
3598 status->aeq_head = 0;
3599 status->eqc_phase = true;
3600 status->aeqc_phase = true;
3601}
3602
3603static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
3604{
3605
3606 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
3607 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
3608
3609 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
3610 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
3611}
3612
3613static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
3614{
3615 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
3616 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
3617}
3618
3619static int qm_eq_ctx_cfg(struct hisi_qm *qm)
3620{
3621 struct device *dev = &qm->pdev->dev;
3622 struct qm_eqc *eqc;
3623 dma_addr_t eqc_dma;
3624 int ret;
3625
3626 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
3627 if (!eqc)
3628 return -ENOMEM;
3629
3630 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
3631 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
3632 if (qm->ver == QM_HW_V1)
3633 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
3634 eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
3635
3636 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
3637 DMA_TO_DEVICE);
3638 if (dma_mapping_error(dev, eqc_dma)) {
3639 kfree(eqc);
3640 return -ENOMEM;
3641 }
3642
3643 ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
3644 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
3645 kfree(eqc);
3646
3647 return ret;
3648}
3649
3650static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
3651{
3652 struct device *dev = &qm->pdev->dev;
3653 struct qm_aeqc *aeqc;
3654 dma_addr_t aeqc_dma;
3655 int ret;
3656
3657 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
3658 if (!aeqc)
3659 return -ENOMEM;
3660
3661 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
3662 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
3663 aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
3664
3665 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
3666 DMA_TO_DEVICE);
3667 if (dma_mapping_error(dev, aeqc_dma)) {
3668 kfree(aeqc);
3669 return -ENOMEM;
3670 }
3671
3672 ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
3673 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
3674 kfree(aeqc);
3675
3676 return ret;
3677}
3678
3679static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
3680{
3681 struct device *dev = &qm->pdev->dev;
3682 int ret;
3683
3684 qm_init_eq_aeq_status(qm);
3685
3686 ret = qm_eq_ctx_cfg(qm);
3687 if (ret) {
3688 dev_err(dev, "Set eqc failed!\n");
3689 return ret;
3690 }
3691
3692 return qm_aeq_ctx_cfg(qm);
3693}
3694
3695static int __hisi_qm_start(struct hisi_qm *qm)
3696{
3697 int ret;
3698
3699 WARN_ON(!qm->qdma.va);
3700
3701 if (qm->fun_type == QM_HW_PF) {
3702 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
3703 if (ret)
3704 return ret;
3705 }
3706
3707 ret = qm_eq_aeq_ctx_cfg(qm);
3708 if (ret)
3709 return ret;
3710
3711 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
3712 if (ret)
3713 return ret;
3714
3715 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
3716 if (ret)
3717 return ret;
3718
3719 qm_init_prefetch(qm);
3720 qm_enable_eq_aeq_interrupts(qm);
3721
3722 return 0;
3723}
3724
3725
3726
3727
3728
3729
3730
3731int hisi_qm_start(struct hisi_qm *qm)
3732{
3733 struct device *dev = &qm->pdev->dev;
3734 int ret = 0;
3735
3736 down_write(&qm->qps_lock);
3737
3738 if (!qm_avail_state(qm, QM_START)) {
3739 up_write(&qm->qps_lock);
3740 return -EPERM;
3741 }
3742
3743 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
3744
3745 if (!qm->qp_num) {
3746 dev_err(dev, "qp_num should not be 0\n");
3747 ret = -EINVAL;
3748 goto err_unlock;
3749 }
3750
3751 ret = __hisi_qm_start(qm);
3752 if (!ret)
3753 atomic_set(&qm->status.flags, QM_START);
3754
3755 hisi_qm_set_state(qm, QM_READY);
3756err_unlock:
3757 up_write(&qm->qps_lock);
3758 return ret;
3759}
3760EXPORT_SYMBOL_GPL(hisi_qm_start);
3761
3762static int qm_restart(struct hisi_qm *qm)
3763{
3764 struct device *dev = &qm->pdev->dev;
3765 struct hisi_qp *qp;
3766 int ret, i;
3767
3768 ret = hisi_qm_start(qm);
3769 if (ret < 0)
3770 return ret;
3771
3772 down_write(&qm->qps_lock);
3773 for (i = 0; i < qm->qp_num; i++) {
3774 qp = &qm->qp_array[i];
3775 if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
3776 qp->is_resetting == true) {
3777 ret = qm_start_qp_nolock(qp, 0);
3778 if (ret < 0) {
3779 dev_err(dev, "Failed to start qp%d!\n", i);
3780
3781 up_write(&qm->qps_lock);
3782 return ret;
3783 }
3784 qp->is_resetting = false;
3785 }
3786 }
3787 up_write(&qm->qps_lock);
3788
3789 return 0;
3790}
3791
3792
3793static int qm_stop_started_qp(struct hisi_qm *qm)
3794{
3795 struct device *dev = &qm->pdev->dev;
3796 struct hisi_qp *qp;
3797 int i, ret;
3798
3799 for (i = 0; i < qm->qp_num; i++) {
3800 qp = &qm->qp_array[i];
3801 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
3802 qp->is_resetting = true;
3803 ret = qm_stop_qp_nolock(qp);
3804 if (ret < 0) {
3805 dev_err(dev, "Failed to stop qp%d!\n", i);
3806 return ret;
3807 }
3808 }
3809 }
3810
3811 return 0;
3812}
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822static void qm_clear_queues(struct hisi_qm *qm)
3823{
3824 struct hisi_qp *qp;
3825 int i;
3826
3827 for (i = 0; i < qm->qp_num; i++) {
3828 qp = &qm->qp_array[i];
3829 if (qp->is_in_kernel && qp->is_resetting)
3830 memset(qp->qdma.va, 0, qp->qdma.size);
3831 }
3832
3833 memset(qm->qdma.va, 0, qm->qdma.size);
3834}
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
3846{
3847 struct device *dev = &qm->pdev->dev;
3848 int ret = 0;
3849
3850 down_write(&qm->qps_lock);
3851
3852 qm->status.stop_reason = r;
3853 if (!qm_avail_state(qm, QM_STOP)) {
3854 ret = -EPERM;
3855 goto err_unlock;
3856 }
3857
3858 if (qm->status.stop_reason == QM_SOFT_RESET ||
3859 qm->status.stop_reason == QM_FLR) {
3860 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
3861 ret = qm_stop_started_qp(qm);
3862 if (ret < 0) {
3863 dev_err(dev, "Failed to stop started qp!\n");
3864 goto err_unlock;
3865 }
3866 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
3867 }
3868
3869 qm_disable_eq_aeq_interrupts(qm);
3870 if (qm->fun_type == QM_HW_PF) {
3871 ret = hisi_qm_set_vft(qm, 0, 0, 0);
3872 if (ret < 0) {
3873 dev_err(dev, "Failed to set vft!\n");
3874 ret = -EBUSY;
3875 goto err_unlock;
3876 }
3877 }
3878
3879 qm_clear_queues(qm);
3880 atomic_set(&qm->status.flags, QM_STOP);
3881
3882err_unlock:
3883 up_write(&qm->qps_lock);
3884 return ret;
3885}
3886EXPORT_SYMBOL_GPL(hisi_qm_stop);
3887
3888static ssize_t qm_status_read(struct file *filp, char __user *buffer,
3889 size_t count, loff_t *pos)
3890{
3891 struct hisi_qm *qm = filp->private_data;
3892 char buf[QM_DBG_READ_LEN];
3893 int val, len;
3894
3895 val = atomic_read(&qm->status.flags);
3896 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
3897
3898 return simple_read_from_buffer(buffer, count, pos, buf, len);
3899}
3900
3901static const struct file_operations qm_status_fops = {
3902 .owner = THIS_MODULE,
3903 .open = simple_open,
3904 .read = qm_status_read,
3905};
3906
3907static int qm_debugfs_atomic64_set(void *data, u64 val)
3908{
3909 if (val)
3910 return -EINVAL;
3911
3912 atomic64_set((atomic64_t *)data, 0);
3913
3914 return 0;
3915}
3916
3917static int qm_debugfs_atomic64_get(void *data, u64 *val)
3918{
3919 *val = atomic64_read((atomic64_t *)data);
3920
3921 return 0;
3922}
3923
3924DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
3925 qm_debugfs_atomic64_set, "%llu\n");
3926
3927static void qm_hw_error_init(struct hisi_qm *qm)
3928{
3929 struct hisi_qm_err_info *err_info = &qm->err_info;
3930
3931 if (!qm->ops->hw_error_init) {
3932 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
3933 return;
3934 }
3935
3936 qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
3937}
3938
3939static void qm_hw_error_uninit(struct hisi_qm *qm)
3940{
3941 if (!qm->ops->hw_error_uninit) {
3942 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
3943 return;
3944 }
3945
3946 qm->ops->hw_error_uninit(qm);
3947}
3948
3949static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
3950{
3951 if (!qm->ops->hw_error_handle) {
3952 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
3953 return ACC_ERR_NONE;
3954 }
3955
3956 return qm->ops->hw_error_handle(qm);
3957}
3958
3959
3960
3961
3962
3963
3964
3965void hisi_qm_dev_err_init(struct hisi_qm *qm)
3966{
3967 if (qm->fun_type == QM_HW_VF)
3968 return;
3969
3970 qm_hw_error_init(qm);
3971
3972 if (!qm->err_ini->hw_err_enable) {
3973 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
3974 return;
3975 }
3976 qm->err_ini->hw_err_enable(qm);
3977}
3978EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
3979
3980
3981
3982
3983
3984
3985
3986void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
3987{
3988 if (qm->fun_type == QM_HW_VF)
3989 return;
3990
3991 qm_hw_error_uninit(qm);
3992
3993 if (!qm->err_ini->hw_err_disable) {
3994 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
3995 return;
3996 }
3997 qm->err_ini->hw_err_disable(qm);
3998}
3999EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
4000
4001
4002
4003
4004
4005
4006void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
4007{
4008 int i;
4009
4010 if (!qps || qp_num <= 0)
4011 return;
4012
4013 for (i = qp_num - 1; i >= 0; i--)
4014 hisi_qm_release_qp(qps[i]);
4015}
4016EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
4017
4018static void free_list(struct list_head *head)
4019{
4020 struct hisi_qm_resource *res, *tmp;
4021
4022 list_for_each_entry_safe(res, tmp, head, list) {
4023 list_del(&res->list);
4024 kfree(res);
4025 }
4026}
4027
4028static int hisi_qm_sort_devices(int node, struct list_head *head,
4029 struct hisi_qm_list *qm_list)
4030{
4031 struct hisi_qm_resource *res, *tmp;
4032 struct hisi_qm *qm;
4033 struct list_head *n;
4034 struct device *dev;
4035 int dev_node = 0;
4036
4037 list_for_each_entry(qm, &qm_list->list, list) {
4038 dev = &qm->pdev->dev;
4039
4040 if (IS_ENABLED(CONFIG_NUMA)) {
4041 dev_node = dev_to_node(dev);
4042 if (dev_node < 0)
4043 dev_node = 0;
4044 }
4045
4046 res = kzalloc(sizeof(*res), GFP_KERNEL);
4047 if (!res)
4048 return -ENOMEM;
4049
4050 res->qm = qm;
4051 res->distance = node_distance(dev_node, node);
4052 n = head;
4053 list_for_each_entry(tmp, head, list) {
4054 if (res->distance < tmp->distance) {
4055 n = &tmp->list;
4056 break;
4057 }
4058 }
4059 list_add_tail(&res->list, n);
4060 }
4061
4062 return 0;
4063}
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
4078 u8 alg_type, int node, struct hisi_qp **qps)
4079{
4080 struct hisi_qm_resource *tmp;
4081 int ret = -ENODEV;
4082 LIST_HEAD(head);
4083 int i;
4084
4085 if (!qps || !qm_list || qp_num <= 0)
4086 return -EINVAL;
4087
4088 mutex_lock(&qm_list->lock);
4089 if (hisi_qm_sort_devices(node, &head, qm_list)) {
4090 mutex_unlock(&qm_list->lock);
4091 goto err;
4092 }
4093
4094 list_for_each_entry(tmp, &head, list) {
4095 for (i = 0; i < qp_num; i++) {
4096 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
4097 if (IS_ERR(qps[i])) {
4098 hisi_qm_free_qps(qps, i);
4099 break;
4100 }
4101 }
4102
4103 if (i == qp_num) {
4104 ret = 0;
4105 break;
4106 }
4107 }
4108
4109 mutex_unlock(&qm_list->lock);
4110 if (ret)
4111 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
4112 node, alg_type, qp_num);
4113
4114err:
4115 free_list(&head);
4116 return ret;
4117}
4118EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
4119
4120static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
4121{
4122 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
4123 u32 max_qp_num = qm->max_qp_num;
4124 u32 q_base = qm->qp_num;
4125 int ret;
4126
4127 if (!num_vfs)
4128 return -EINVAL;
4129
4130 vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
4131
4132
4133 if (vfs_q_num < num_vfs)
4134 return -EINVAL;
4135
4136 q_num = vfs_q_num / num_vfs;
4137 remain_q_num = vfs_q_num % num_vfs;
4138
4139 for (i = num_vfs; i > 0; i--) {
4140
4141
4142
4143
4144 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
4145 act_q_num = q_num + remain_q_num;
4146 remain_q_num = 0;
4147 } else if (remain_q_num > 0) {
4148 act_q_num = q_num + 1;
4149 remain_q_num--;
4150 } else {
4151 act_q_num = q_num;
4152 }
4153
4154 act_q_num = min_t(int, act_q_num, max_qp_num);
4155 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
4156 if (ret) {
4157 for (j = num_vfs; j > i; j--)
4158 hisi_qm_set_vft(qm, j, 0, 0);
4159 return ret;
4160 }
4161 q_base += act_q_num;
4162 }
4163
4164 return 0;
4165}
4166
4167static int qm_clear_vft_config(struct hisi_qm *qm)
4168{
4169 int ret;
4170 u32 i;
4171
4172 for (i = 1; i <= qm->vfs_num; i++) {
4173 ret = hisi_qm_set_vft(qm, i, 0, 0);
4174 if (ret)
4175 return ret;
4176 }
4177 qm->vfs_num = 0;
4178
4179 return 0;
4180}
4181
4182static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
4183{
4184 struct device *dev = &qm->pdev->dev;
4185 u32 ir = qos * QM_QOS_RATE;
4186 int ret, total_vfs, i;
4187
4188 total_vfs = pci_sriov_get_totalvfs(qm->pdev);
4189 if (fun_index > total_vfs)
4190 return -EINVAL;
4191
4192 qm->factor[fun_index].func_qos = qos;
4193
4194 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
4195 if (ret) {
4196 dev_err(dev, "failed to calculate shaper parameter!\n");
4197 return -EINVAL;
4198 }
4199
4200 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
4201
4202 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
4203 if (ret) {
4204 dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
4205 return -EINVAL;
4206 }
4207 }
4208
4209 return 0;
4210}
4211
4212static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
4213{
4214 u64 cir_u = 0, cir_b = 0, cir_s = 0;
4215 u64 shaper_vft, ir_calc, ir;
4216 unsigned int val;
4217 u32 error_rate;
4218 int ret;
4219
4220 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
4221 val & BIT(0), POLL_PERIOD,
4222 POLL_TIMEOUT);
4223 if (ret)
4224 return 0;
4225
4226 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
4227 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
4228 writel(fun_index, qm->io_base + QM_VFT_CFG);
4229
4230 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
4231 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
4232
4233 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
4234 val & BIT(0), POLL_PERIOD,
4235 POLL_TIMEOUT);
4236 if (ret)
4237 return 0;
4238
4239 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
4240 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
4241
4242 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
4243 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
4244 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
4245
4246 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
4247 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
4248
4249 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
4250
4251 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
4252
4253 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
4254 if (error_rate > QM_QOS_MIN_ERROR_RATE) {
4255 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
4256 return 0;
4257 }
4258
4259 return ir;
4260}
4261
4262static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
4263{
4264 struct device *dev = &qm->pdev->dev;
4265 u64 mb_cmd;
4266 u32 qos;
4267 int ret;
4268
4269 qos = qm_get_shaper_vft_qos(qm, fun_num);
4270 if (!qos) {
4271 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
4272 return;
4273 }
4274
4275 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
4276 ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
4277 if (ret)
4278 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
4279}
4280
4281static int qm_vf_read_qos(struct hisi_qm *qm)
4282{
4283 int cnt = 0;
4284 int ret = -EINVAL;
4285
4286
4287 qm->mb_qos = 0;
4288
4289
4290 if (qm->ops->ping_pf) {
4291 ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
4292 if (ret) {
4293 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
4294 return ret;
4295 }
4296 }
4297
4298 while (true) {
4299 msleep(QM_WAIT_DST_ACK);
4300 if (qm->mb_qos)
4301 break;
4302
4303 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
4304 pci_err(qm->pdev, "PF ping VF timeout!\n");
4305 return -ETIMEDOUT;
4306 }
4307 }
4308
4309 return ret;
4310}
4311
4312static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
4313 size_t count, loff_t *pos)
4314{
4315 struct hisi_qm *qm = filp->private_data;
4316 char tbuf[QM_DBG_READ_LEN];
4317 u32 qos_val, ir;
4318 int ret;
4319
4320 ret = hisi_qm_get_dfx_access(qm);
4321 if (ret)
4322 return ret;
4323
4324
4325 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4326 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
4327 ret = -EAGAIN;
4328 goto err_put_dfx_access;
4329 }
4330
4331 if (qm->fun_type == QM_HW_PF) {
4332 ir = qm_get_shaper_vft_qos(qm, 0);
4333 } else {
4334 ret = qm_vf_read_qos(qm);
4335 if (ret)
4336 goto err_get_status;
4337 ir = qm->mb_qos;
4338 }
4339
4340 qos_val = ir / QM_QOS_RATE;
4341 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
4342
4343 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
4344
4345err_get_status:
4346 clear_bit(QM_RESETTING, &qm->misc_ctl);
4347err_put_dfx_access:
4348 hisi_qm_put_dfx_access(qm);
4349 return ret;
4350}
4351
4352static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
4353{
4354 int buflen = strlen(buf);
4355 int ret, i;
4356
4357 for (i = 0; i < buflen; i++) {
4358 if (!isdigit(buf[i]))
4359 return -EINVAL;
4360 }
4361
4362 ret = sscanf(buf, "%lu", val);
4363 if (ret != QM_QOS_VAL_NUM)
4364 return -EINVAL;
4365
4366 return 0;
4367}
4368
4369static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
4370 unsigned long *val,
4371 unsigned int *fun_index)
4372{
4373 char tbuf_bdf[QM_DBG_READ_LEN] = {0};
4374 char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
4375 u32 tmp1, device, function;
4376 int ret, bus;
4377
4378 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf);
4379 if (ret != QM_QOS_PARAM_NUM)
4380 return -EINVAL;
4381
4382 ret = qm_qos_value_init(val_buf, val);
4383 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) {
4384 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
4385 return -EINVAL;
4386 }
4387
4388 ret = sscanf(tbuf_bdf, "%u:%x:%u.%u", &tmp1, &bus, &device, &function);
4389 if (ret != QM_QOS_BDF_PARAM_NUM) {
4390 pci_err(qm->pdev, "input pci bdf value is error!\n");
4391 return -EINVAL;
4392 }
4393
4394 *fun_index = PCI_DEVFN(device, function);
4395
4396 return 0;
4397}
4398
4399static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
4400 size_t count, loff_t *pos)
4401{
4402 struct hisi_qm *qm = filp->private_data;
4403 char tbuf[QM_DBG_READ_LEN];
4404 unsigned int fun_index;
4405 unsigned long val;
4406 int len, ret;
4407
4408 if (qm->fun_type == QM_HW_VF)
4409 return -EINVAL;
4410
4411 if (*pos != 0)
4412 return 0;
4413
4414 if (count >= QM_DBG_READ_LEN)
4415 return -ENOSPC;
4416
4417 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
4418 if (len < 0)
4419 return len;
4420
4421 tbuf[len] = '\0';
4422 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
4423 if (ret)
4424 return ret;
4425
4426
4427 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4428 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
4429 return -EAGAIN;
4430 }
4431
4432 ret = qm_pm_get_sync(qm);
4433 if (ret) {
4434 ret = -EINVAL;
4435 goto err_get_status;
4436 }
4437
4438 ret = qm_func_shaper_enable(qm, fun_index, val);
4439 if (ret) {
4440 pci_err(qm->pdev, "failed to enable function shaper!\n");
4441 ret = -EINVAL;
4442 goto err_put_sync;
4443 }
4444
4445 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
4446 fun_index, val);
4447 ret = count;
4448
4449err_put_sync:
4450 qm_pm_put_sync(qm);
4451err_get_status:
4452 clear_bit(QM_RESETTING, &qm->misc_ctl);
4453 return ret;
4454}
4455
4456static const struct file_operations qm_algqos_fops = {
4457 .owner = THIS_MODULE,
4458 .open = simple_open,
4459 .read = qm_algqos_read,
4460 .write = qm_algqos_write,
4461};
4462
4463
4464
4465
4466
4467
4468
4469static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
4470{
4471 if (qm->fun_type == QM_HW_PF)
4472 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
4473 qm, &qm_algqos_fops);
4474 else
4475 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
4476 qm, &qm_algqos_fops);
4477}
4478
4479
4480
4481
4482
4483
4484
4485void hisi_qm_debug_init(struct hisi_qm *qm)
4486{
4487 struct qm_dfx *dfx = &qm->debug.dfx;
4488 struct dentry *qm_d;
4489 void *data;
4490 int i;
4491
4492 qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
4493 qm->debug.qm_d = qm_d;
4494
4495
4496 if (qm->fun_type == QM_HW_PF) {
4497 qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
4498 for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
4499 qm_create_debugfs_file(qm, qm->debug.qm_d, i);
4500 }
4501
4502 debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
4503
4504 debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
4505
4506 debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
4507 &qm_status_fops);
4508 for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
4509 data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
4510 debugfs_create_file(qm_dfx_files[i].name,
4511 0644,
4512 qm_d,
4513 data,
4514 &qm_atomic64_ops);
4515 }
4516
4517 if (qm->ver >= QM_HW_V3)
4518 hisi_qm_set_algqos_init(qm);
4519}
4520EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
4521
4522
4523
4524
4525
4526void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
4527{
4528 const struct debugfs_reg32 *regs;
4529 int i;
4530
4531
4532 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
4533 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
4534
4535
4536 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
4537 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
4538
4539
4540
4541
4542
4543 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
4544
4545 regs = qm_dfx_regs;
4546 for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
4547 readl(qm->io_base + regs->offset);
4548 regs++;
4549 }
4550
4551
4552 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
4553}
4554EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
4566{
4567 struct hisi_qm *qm = pci_get_drvdata(pdev);
4568 int pre_existing_vfs, num_vfs, total_vfs, ret;
4569
4570 ret = qm_pm_get_sync(qm);
4571 if (ret)
4572 return ret;
4573
4574 total_vfs = pci_sriov_get_totalvfs(pdev);
4575 pre_existing_vfs = pci_num_vf(pdev);
4576 if (pre_existing_vfs) {
4577 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
4578 pre_existing_vfs);
4579 goto err_put_sync;
4580 }
4581
4582 num_vfs = min_t(int, max_vfs, total_vfs);
4583 ret = qm_vf_q_assign(qm, num_vfs);
4584 if (ret) {
4585 pci_err(pdev, "Can't assign queues for VF!\n");
4586 goto err_put_sync;
4587 }
4588
4589 qm->vfs_num = num_vfs;
4590
4591 ret = pci_enable_sriov(pdev, num_vfs);
4592 if (ret) {
4593 pci_err(pdev, "Can't enable VF!\n");
4594 qm_clear_vft_config(qm);
4595 goto err_put_sync;
4596 }
4597
4598 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
4599
4600 return num_vfs;
4601
4602err_put_sync:
4603 qm_pm_put_sync(qm);
4604 return ret;
4605}
4606EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
4607
4608
4609
4610
4611
4612
4613
4614
4615int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
4616{
4617 struct hisi_qm *qm = pci_get_drvdata(pdev);
4618 int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
4619 int ret;
4620
4621 if (pci_vfs_assigned(pdev)) {
4622 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
4623 return -EPERM;
4624 }
4625
4626
4627 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
4628 pci_err(pdev, "Task is using its VF!\n");
4629 return -EBUSY;
4630 }
4631
4632 pci_disable_sriov(pdev);
4633
4634 memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
4635 ret = qm_clear_vft_config(qm);
4636 if (ret)
4637 return ret;
4638
4639 qm_pm_put_sync(qm);
4640
4641 return 0;
4642}
4643EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
4644
4645
4646
4647
4648
4649
4650
4651
4652int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
4653{
4654 if (num_vfs == 0)
4655 return hisi_qm_sriov_disable(pdev, false);
4656 else
4657 return hisi_qm_sriov_enable(pdev, num_vfs);
4658}
4659EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
4660
4661static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
4662{
4663 u32 err_sts;
4664
4665 if (!qm->err_ini->get_dev_hw_err_status) {
4666 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
4667 return ACC_ERR_NONE;
4668 }
4669
4670
4671 err_sts = qm->err_ini->get_dev_hw_err_status(qm);
4672 if (err_sts) {
4673 if (err_sts & qm->err_info.ecc_2bits_mask)
4674 qm->err_status.is_dev_ecc_mbit = true;
4675
4676 if (qm->err_ini->log_dev_hw_err)
4677 qm->err_ini->log_dev_hw_err(qm, err_sts);
4678
4679
4680 if ((err_sts | qm->err_info.dev_ce_mask) ==
4681 qm->err_info.dev_ce_mask) {
4682 if (qm->err_ini->clear_dev_hw_err_status)
4683 qm->err_ini->clear_dev_hw_err_status(qm,
4684 err_sts);
4685
4686 return ACC_ERR_RECOVERED;
4687 }
4688
4689 return ACC_ERR_NEED_RESET;
4690 }
4691
4692 return ACC_ERR_RECOVERED;
4693}
4694
4695static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
4696{
4697 enum acc_err_result qm_ret, dev_ret;
4698
4699
4700 qm_ret = qm_hw_error_handle(qm);
4701
4702
4703 dev_ret = qm_dev_err_handle(qm);
4704
4705 return (qm_ret == ACC_ERR_NEED_RESET ||
4706 dev_ret == ACC_ERR_NEED_RESET) ?
4707 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
4708}
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
4719 pci_channel_state_t state)
4720{
4721 struct hisi_qm *qm = pci_get_drvdata(pdev);
4722 enum acc_err_result ret;
4723
4724 if (pdev->is_virtfn)
4725 return PCI_ERS_RESULT_NONE;
4726
4727 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
4728 if (state == pci_channel_io_perm_failure)
4729 return PCI_ERS_RESULT_DISCONNECT;
4730
4731 ret = qm_process_dev_error(qm);
4732 if (ret == ACC_ERR_NEED_RESET)
4733 return PCI_ERS_RESULT_NEED_RESET;
4734
4735 return PCI_ERS_RESULT_RECOVERED;
4736}
4737EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
4738
4739static int qm_check_req_recv(struct hisi_qm *qm)
4740{
4741 struct pci_dev *pdev = qm->pdev;
4742 int ret;
4743 u32 val;
4744
4745 if (qm->ver >= QM_HW_V3)
4746 return 0;
4747
4748 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
4749 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4750 (val == ACC_VENDOR_ID_VALUE),
4751 POLL_PERIOD, POLL_TIMEOUT);
4752 if (ret) {
4753 dev_err(&pdev->dev, "Fails to read QM reg!\n");
4754 return ret;
4755 }
4756
4757 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
4758 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4759 (val == PCI_VENDOR_ID_HUAWEI),
4760 POLL_PERIOD, POLL_TIMEOUT);
4761 if (ret)
4762 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
4763
4764 return ret;
4765}
4766
4767static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
4768{
4769 struct pci_dev *pdev = qm->pdev;
4770 u16 cmd;
4771 int i;
4772
4773 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4774 if (set)
4775 cmd |= PCI_COMMAND_MEMORY;
4776 else
4777 cmd &= ~PCI_COMMAND_MEMORY;
4778
4779 pci_write_config_word(pdev, PCI_COMMAND, cmd);
4780 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
4781 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4782 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
4783 return 0;
4784
4785 udelay(1);
4786 }
4787
4788 return -ETIMEDOUT;
4789}
4790
4791static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
4792{
4793 struct pci_dev *pdev = qm->pdev;
4794 u16 sriov_ctrl;
4795 int pos;
4796 int i;
4797
4798 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4799 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
4800 if (set)
4801 sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
4802 else
4803 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
4804 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
4805
4806 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
4807 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
4808 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
4809 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
4810 return 0;
4811
4812 udelay(1);
4813 }
4814
4815 return -ETIMEDOUT;
4816}
4817
4818static int qm_vf_reset_prepare(struct hisi_qm *qm,
4819 enum qm_stop_reason stop_reason)
4820{
4821 struct hisi_qm_list *qm_list = qm->qm_list;
4822 struct pci_dev *pdev = qm->pdev;
4823 struct pci_dev *virtfn;
4824 struct hisi_qm *vf_qm;
4825 int ret = 0;
4826
4827 mutex_lock(&qm_list->lock);
4828 list_for_each_entry(vf_qm, &qm_list->list, list) {
4829 virtfn = vf_qm->pdev;
4830 if (virtfn == pdev)
4831 continue;
4832
4833 if (pci_physfn(virtfn) == pdev) {
4834
4835 pci_save_state(virtfn);
4836
4837 ret = hisi_qm_stop(vf_qm, stop_reason);
4838 if (ret)
4839 goto stop_fail;
4840 }
4841 }
4842
4843stop_fail:
4844 mutex_unlock(&qm_list->lock);
4845 return ret;
4846}
4847
4848static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
4849 enum qm_stop_reason stop_reason)
4850{
4851 struct pci_dev *pdev = qm->pdev;
4852 int ret;
4853
4854 if (!qm->vfs_num)
4855 return 0;
4856
4857
4858 if (qm->ops->ping_all_vfs) {
4859 ret = qm->ops->ping_all_vfs(qm, cmd);
4860 if (ret)
4861 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
4862 } else {
4863 ret = qm_vf_reset_prepare(qm, stop_reason);
4864 if (ret)
4865 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret);
4866 }
4867
4868 return ret;
4869}
4870
4871static int qm_controller_reset_prepare(struct hisi_qm *qm)
4872{
4873 struct pci_dev *pdev = qm->pdev;
4874 int ret;
4875
4876 ret = qm_reset_prepare_ready(qm);
4877 if (ret) {
4878 pci_err(pdev, "Controller reset not ready!\n");
4879 return ret;
4880 }
4881
4882
4883 qm_cmd_uninit(qm);
4884
4885
4886 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
4887 if (ret)
4888 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n");
4889
4890 ret = hisi_qm_stop(qm, QM_SOFT_RESET);
4891 if (ret) {
4892 pci_err(pdev, "Fails to stop QM!\n");
4893 qm_reset_bit_clear(qm);
4894 return ret;
4895 }
4896
4897 ret = qm_wait_vf_prepare_finish(qm);
4898 if (ret)
4899 pci_err(pdev, "failed to stop by vfs in soft reset!\n");
4900
4901 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4902
4903 return 0;
4904}
4905
4906static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
4907{
4908 u32 nfe_enb = 0;
4909
4910
4911 if (qm->ver >= QM_HW_V3)
4912 return;
4913
4914 if (!qm->err_status.is_dev_ecc_mbit &&
4915 qm->err_status.is_qm_ecc_mbit &&
4916 qm->err_ini->close_axi_master_ooo) {
4917
4918 qm->err_ini->close_axi_master_ooo(qm);
4919
4920 } else if (qm->err_status.is_dev_ecc_mbit &&
4921 !qm->err_status.is_qm_ecc_mbit &&
4922 !qm->err_ini->close_axi_master_ooo) {
4923
4924 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
4925 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
4926 qm->io_base + QM_RAS_NFE_ENABLE);
4927 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
4928 }
4929}
4930
4931static int qm_soft_reset(struct hisi_qm *qm)
4932{
4933 struct pci_dev *pdev = qm->pdev;
4934 int ret;
4935 u32 val;
4936
4937
4938 ret = qm_check_req_recv(qm);
4939 if (ret)
4940 return ret;
4941
4942 if (qm->vfs_num) {
4943 ret = qm_set_vf_mse(qm, false);
4944 if (ret) {
4945 pci_err(pdev, "Fails to disable vf MSE bit.\n");
4946 return ret;
4947 }
4948 }
4949
4950 ret = qm->ops->set_msi(qm, false);
4951 if (ret) {
4952 pci_err(pdev, "Fails to disable PEH MSI bit.\n");
4953 return ret;
4954 }
4955
4956 qm_dev_ecc_mbit_handle(qm);
4957
4958
4959 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
4960 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
4961
4962
4963 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
4964 val,
4965 (val == ACC_MASTER_TRANS_RETURN_RW),
4966 POLL_PERIOD, POLL_TIMEOUT);
4967 if (ret) {
4968 pci_emerg(pdev, "Bus lock! Please reset system.\n");
4969 return ret;
4970 }
4971
4972 if (qm->err_ini->close_sva_prefetch)
4973 qm->err_ini->close_sva_prefetch(qm);
4974
4975 ret = qm_set_pf_mse(qm, false);
4976 if (ret) {
4977 pci_err(pdev, "Fails to disable pf MSE bit.\n");
4978 return ret;
4979 }
4980
4981
4982 if (ACPI_HANDLE(&pdev->dev)) {
4983 unsigned long long value = 0;
4984 acpi_status s;
4985
4986 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
4987 qm->err_info.acpi_rst,
4988 NULL, &value);
4989 if (ACPI_FAILURE(s)) {
4990 pci_err(pdev, "NO controller reset method!\n");
4991 return -EIO;
4992 }
4993
4994 if (value) {
4995 pci_err(pdev, "Reset step %llu failed!\n", value);
4996 return -EIO;
4997 }
4998 } else {
4999 pci_err(pdev, "No reset method!\n");
5000 return -EINVAL;
5001 }
5002
5003 return 0;
5004}
5005
5006static int qm_vf_reset_done(struct hisi_qm *qm)
5007{
5008 struct hisi_qm_list *qm_list = qm->qm_list;
5009 struct pci_dev *pdev = qm->pdev;
5010 struct pci_dev *virtfn;
5011 struct hisi_qm *vf_qm;
5012 int ret = 0;
5013
5014 mutex_lock(&qm_list->lock);
5015 list_for_each_entry(vf_qm, &qm_list->list, list) {
5016 virtfn = vf_qm->pdev;
5017 if (virtfn == pdev)
5018 continue;
5019
5020 if (pci_physfn(virtfn) == pdev) {
5021
5022 pci_restore_state(virtfn);
5023
5024 ret = qm_restart(vf_qm);
5025 if (ret)
5026 goto restart_fail;
5027 }
5028 }
5029
5030restart_fail:
5031 mutex_unlock(&qm_list->lock);
5032 return ret;
5033}
5034
5035static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
5036{
5037 struct pci_dev *pdev = qm->pdev;
5038 int ret;
5039
5040 if (!qm->vfs_num)
5041 return 0;
5042
5043 ret = qm_vf_q_assign(qm, qm->vfs_num);
5044 if (ret) {
5045 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret);
5046 return ret;
5047 }
5048
5049
5050 if (qm->ops->ping_all_vfs) {
5051 ret = qm->ops->ping_all_vfs(qm, cmd);
5052 if (ret)
5053 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
5054 } else {
5055 ret = qm_vf_reset_done(qm);
5056 if (ret)
5057 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret);
5058 }
5059
5060 return ret;
5061}
5062
5063static int qm_dev_hw_init(struct hisi_qm *qm)
5064{
5065 return qm->err_ini->hw_init(qm);
5066}
5067
5068static void qm_restart_prepare(struct hisi_qm *qm)
5069{
5070 u32 value;
5071
5072 if (qm->err_ini->open_sva_prefetch)
5073 qm->err_ini->open_sva_prefetch(qm);
5074
5075 if (qm->ver >= QM_HW_V3)
5076 return;
5077
5078 if (!qm->err_status.is_qm_ecc_mbit &&
5079 !qm->err_status.is_dev_ecc_mbit)
5080 return;
5081
5082
5083 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5084 writel(value & ~qm->err_info.msi_wr_port,
5085 qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5086
5087
5088 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
5089 if (value && qm->err_ini->clear_dev_hw_err_status)
5090 qm->err_ini->clear_dev_hw_err_status(qm, value);
5091
5092
5093 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
5094
5095
5096 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
5097}
5098
5099static void qm_restart_done(struct hisi_qm *qm)
5100{
5101 u32 value;
5102
5103 if (qm->ver >= QM_HW_V3)
5104 goto clear_flags;
5105
5106 if (!qm->err_status.is_qm_ecc_mbit &&
5107 !qm->err_status.is_dev_ecc_mbit)
5108 return;
5109
5110
5111 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5112 value |= qm->err_info.msi_wr_port;
5113 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5114
5115clear_flags:
5116 qm->err_status.is_qm_ecc_mbit = false;
5117 qm->err_status.is_dev_ecc_mbit = false;
5118}
5119
5120static int qm_controller_reset_done(struct hisi_qm *qm)
5121{
5122 struct pci_dev *pdev = qm->pdev;
5123 int ret;
5124
5125 ret = qm->ops->set_msi(qm, true);
5126 if (ret) {
5127 pci_err(pdev, "Fails to enable PEH MSI bit!\n");
5128 return ret;
5129 }
5130
5131 ret = qm_set_pf_mse(qm, true);
5132 if (ret) {
5133 pci_err(pdev, "Fails to enable pf MSE bit!\n");
5134 return ret;
5135 }
5136
5137 if (qm->vfs_num) {
5138 ret = qm_set_vf_mse(qm, true);
5139 if (ret) {
5140 pci_err(pdev, "Fails to enable vf MSE bit!\n");
5141 return ret;
5142 }
5143 }
5144
5145 ret = qm_dev_hw_init(qm);
5146 if (ret) {
5147 pci_err(pdev, "Failed to init device\n");
5148 return ret;
5149 }
5150
5151 qm_restart_prepare(qm);
5152 hisi_qm_dev_err_init(qm);
5153 if (qm->err_ini->open_axi_master_ooo)
5154 qm->err_ini->open_axi_master_ooo(qm);
5155
5156 ret = qm_dev_mem_reset(qm);
5157 if (ret) {
5158 pci_err(pdev, "failed to reset device memory\n");
5159 return ret;
5160 }
5161
5162 ret = qm_restart(qm);
5163 if (ret) {
5164 pci_err(pdev, "Failed to start QM!\n");
5165 return ret;
5166 }
5167
5168 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
5169 if (ret)
5170 pci_err(pdev, "failed to start vfs by pf in soft reset.\n");
5171
5172 ret = qm_wait_vf_prepare_finish(qm);
5173 if (ret)
5174 pci_err(pdev, "failed to start by vfs in soft reset!\n");
5175
5176 qm_cmd_init(qm);
5177 qm_restart_done(qm);
5178
5179 qm_reset_bit_clear(qm);
5180
5181 return 0;
5182}
5183
5184static int qm_controller_reset(struct hisi_qm *qm)
5185{
5186 struct pci_dev *pdev = qm->pdev;
5187 int ret;
5188
5189 pci_info(pdev, "Controller resetting...\n");
5190
5191 ret = qm_controller_reset_prepare(qm);
5192 if (ret) {
5193 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5194 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5195 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5196 return ret;
5197 }
5198
5199 ret = qm_soft_reset(qm);
5200 if (ret) {
5201 pci_err(pdev, "Controller reset failed (%d)\n", ret);
5202 qm_reset_bit_clear(qm);
5203 return ret;
5204 }
5205
5206 ret = qm_controller_reset_done(qm);
5207 if (ret) {
5208 qm_reset_bit_clear(qm);
5209 return ret;
5210 }
5211
5212 pci_info(pdev, "Controller reset complete\n");
5213
5214 return 0;
5215}
5216
5217
5218
5219
5220
5221
5222
5223
5224pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
5225{
5226 struct hisi_qm *qm = pci_get_drvdata(pdev);
5227 int ret;
5228
5229 if (pdev->is_virtfn)
5230 return PCI_ERS_RESULT_RECOVERED;
5231
5232 pci_aer_clear_nonfatal_status(pdev);
5233
5234
5235 ret = qm_controller_reset(qm);
5236 if (ret) {
5237 pci_err(pdev, "Controller reset failed (%d)\n", ret);
5238 return PCI_ERS_RESULT_DISCONNECT;
5239 }
5240
5241 return PCI_ERS_RESULT_RECOVERED;
5242}
5243EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
5244
5245void hisi_qm_reset_prepare(struct pci_dev *pdev)
5246{
5247 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
5248 struct hisi_qm *qm = pci_get_drvdata(pdev);
5249 u32 delay = 0;
5250 int ret;
5251
5252 hisi_qm_dev_err_uninit(pf_qm);
5253
5254
5255
5256
5257
5258 while (qm_check_dev_error(pf_qm)) {
5259 msleep(++delay);
5260 if (delay > QM_RESET_WAIT_TIMEOUT)
5261 return;
5262 }
5263
5264 ret = qm_reset_prepare_ready(qm);
5265 if (ret) {
5266 pci_err(pdev, "FLR not ready!\n");
5267 return;
5268 }
5269
5270
5271 if (qm->fun_type == QM_HW_PF)
5272 qm_cmd_uninit(qm);
5273
5274 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
5275 if (ret)
5276 pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
5277
5278 ret = hisi_qm_stop(qm, QM_FLR);
5279 if (ret) {
5280 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
5281 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5282 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5283 return;
5284 }
5285
5286 ret = qm_wait_vf_prepare_finish(qm);
5287 if (ret)
5288 pci_err(pdev, "failed to stop by vfs in FLR!\n");
5289
5290 pci_info(pdev, "FLR resetting...\n");
5291}
5292EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
5293
5294static bool qm_flr_reset_complete(struct pci_dev *pdev)
5295{
5296 struct pci_dev *pf_pdev = pci_physfn(pdev);
5297 struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
5298 u32 id;
5299
5300 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
5301 if (id == QM_PCI_COMMAND_INVALID) {
5302 pci_err(pdev, "Device can not be used!\n");
5303 return false;
5304 }
5305
5306 return true;
5307}
5308
5309void hisi_qm_reset_done(struct pci_dev *pdev)
5310{
5311 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
5312 struct hisi_qm *qm = pci_get_drvdata(pdev);
5313 int ret;
5314
5315 if (qm->fun_type == QM_HW_PF) {
5316 ret = qm_dev_hw_init(qm);
5317 if (ret) {
5318 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
5319 goto flr_done;
5320 }
5321 }
5322
5323 hisi_qm_dev_err_init(pf_qm);
5324
5325 ret = qm_restart(qm);
5326 if (ret) {
5327 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
5328 goto flr_done;
5329 }
5330
5331 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
5332 if (ret)
5333 pci_err(pdev, "failed to start vfs by pf in FLR.\n");
5334
5335 ret = qm_wait_vf_prepare_finish(qm);
5336 if (ret)
5337 pci_err(pdev, "failed to start by vfs in FLR!\n");
5338
5339flr_done:
5340 if (qm->fun_type == QM_HW_PF)
5341 qm_cmd_init(qm);
5342
5343 if (qm_flr_reset_complete(pdev))
5344 pci_info(pdev, "FLR reset complete\n");
5345
5346 qm_reset_bit_clear(qm);
5347}
5348EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
5349
5350static irqreturn_t qm_abnormal_irq(int irq, void *data)
5351{
5352 struct hisi_qm *qm = data;
5353 enum acc_err_result ret;
5354
5355 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
5356 ret = qm_process_dev_error(qm);
5357 if (ret == ACC_ERR_NEED_RESET &&
5358 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
5359 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
5360 schedule_work(&qm->rst_work);
5361
5362 return IRQ_HANDLED;
5363}
5364
5365static int qm_irq_register(struct hisi_qm *qm)
5366{
5367 struct pci_dev *pdev = qm->pdev;
5368 int ret;
5369
5370 ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
5371 qm_irq, 0, qm->dev_name, qm);
5372 if (ret)
5373 return ret;
5374
5375 if (qm->ver > QM_HW_V1) {
5376 ret = request_threaded_irq(pci_irq_vector(pdev,
5377 QM_AEQ_EVENT_IRQ_VECTOR),
5378 qm_aeq_irq, qm_aeq_thread,
5379 0, qm->dev_name, qm);
5380 if (ret)
5381 goto err_aeq_irq;
5382
5383 if (qm->fun_type == QM_HW_PF) {
5384 ret = request_irq(pci_irq_vector(pdev,
5385 QM_ABNORMAL_EVENT_IRQ_VECTOR),
5386 qm_abnormal_irq, 0, qm->dev_name, qm);
5387 if (ret)
5388 goto err_abonormal_irq;
5389 }
5390 }
5391
5392 if (qm->ver > QM_HW_V2) {
5393 ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
5394 qm_mb_cmd_irq, 0, qm->dev_name, qm);
5395 if (ret)
5396 goto err_mb_cmd_irq;
5397 }
5398
5399 return 0;
5400
5401err_mb_cmd_irq:
5402 if (qm->fun_type == QM_HW_PF)
5403 free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
5404err_abonormal_irq:
5405 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
5406err_aeq_irq:
5407 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
5408 return ret;
5409}
5410
5411
5412
5413
5414
5415
5416
5417void hisi_qm_dev_shutdown(struct pci_dev *pdev)
5418{
5419 struct hisi_qm *qm = pci_get_drvdata(pdev);
5420 int ret;
5421
5422 ret = hisi_qm_stop(qm, QM_NORMAL);
5423 if (ret)
5424 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
5425}
5426EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
5427
5428static void hisi_qm_controller_reset(struct work_struct *rst_work)
5429{
5430 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
5431 int ret;
5432
5433 ret = qm_pm_get_sync(qm);
5434 if (ret) {
5435 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5436 return;
5437 }
5438
5439
5440 ret = qm_controller_reset(qm);
5441 if (ret)
5442 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
5443
5444 qm_pm_put_sync(qm);
5445}
5446
5447static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
5448 enum qm_stop_reason stop_reason)
5449{
5450 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
5451 struct pci_dev *pdev = qm->pdev;
5452 int ret;
5453
5454 ret = qm_reset_prepare_ready(qm);
5455 if (ret) {
5456 dev_err(&pdev->dev, "reset prepare not ready!\n");
5457 atomic_set(&qm->status.flags, QM_STOP);
5458 cmd = QM_VF_PREPARE_FAIL;
5459 goto err_prepare;
5460 }
5461
5462 ret = hisi_qm_stop(qm, stop_reason);
5463 if (ret) {
5464 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret);
5465 atomic_set(&qm->status.flags, QM_STOP);
5466 cmd = QM_VF_PREPARE_FAIL;
5467 goto err_prepare;
5468 } else {
5469 goto out;
5470 }
5471
5472err_prepare:
5473 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5474 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5475out:
5476 pci_save_state(pdev);
5477 ret = qm->ops->ping_pf(qm, cmd);
5478 if (ret)
5479 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
5480}
5481
5482static void qm_pf_reset_vf_done(struct hisi_qm *qm)
5483{
5484 enum qm_mb_cmd cmd = QM_VF_START_DONE;
5485 struct pci_dev *pdev = qm->pdev;
5486 int ret;
5487
5488 pci_restore_state(pdev);
5489 ret = hisi_qm_start(qm);
5490 if (ret) {
5491 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret);
5492 cmd = QM_VF_START_FAIL;
5493 }
5494
5495 ret = qm->ops->ping_pf(qm, cmd);
5496 if (ret)
5497 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
5498
5499 qm_reset_bit_clear(qm);
5500}
5501
5502static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
5503{
5504 struct device *dev = &qm->pdev->dev;
5505 u32 val, cmd;
5506 u64 msg;
5507 int ret;
5508
5509
5510 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
5511 val == BIT(0), QM_VF_RESET_WAIT_US,
5512 QM_VF_RESET_WAIT_TIMEOUT_US);
5513
5514 if (ret) {
5515 dev_err(dev, "couldn't get reset done status from PF, timeout!\n");
5516 return -ETIMEDOUT;
5517 }
5518
5519
5520
5521
5522
5523 ret = qm_get_mb_cmd(qm, &msg, 0);
5524 qm_clear_cmd_interrupt(qm, 0);
5525 if (ret) {
5526 dev_err(dev, "failed to get msg from PF in reset done!\n");
5527 return ret;
5528 }
5529
5530 cmd = msg & QM_MB_CMD_DATA_MASK;
5531 if (cmd != QM_PF_RESET_DONE) {
5532 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
5533 ret = -EINVAL;
5534 }
5535
5536 return ret;
5537}
5538
5539static void qm_pf_reset_vf_process(struct hisi_qm *qm,
5540 enum qm_stop_reason stop_reason)
5541{
5542 struct device *dev = &qm->pdev->dev;
5543 int ret;
5544
5545 dev_info(dev, "device reset start...\n");
5546
5547
5548 qm_cmd_uninit(qm);
5549 qm_pf_reset_vf_prepare(qm, stop_reason);
5550
5551 ret = qm_wait_pf_reset_finish(qm);
5552 if (ret)
5553 goto err_get_status;
5554
5555 qm_pf_reset_vf_done(qm);
5556 qm_cmd_init(qm);
5557
5558 dev_info(dev, "device reset done.\n");
5559
5560 return;
5561
5562err_get_status:
5563 qm_cmd_init(qm);
5564 qm_reset_bit_clear(qm);
5565}
5566
5567static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
5568{
5569 struct device *dev = &qm->pdev->dev;
5570 u64 msg;
5571 u32 cmd;
5572 int ret;
5573
5574
5575
5576
5577
5578 ret = qm_get_mb_cmd(qm, &msg, fun_num);
5579 qm_clear_cmd_interrupt(qm, BIT(fun_num));
5580 if (ret) {
5581 dev_err(dev, "failed to get msg from source!\n");
5582 return;
5583 }
5584
5585 cmd = msg & QM_MB_CMD_DATA_MASK;
5586 switch (cmd) {
5587 case QM_PF_FLR_PREPARE:
5588 qm_pf_reset_vf_process(qm, QM_FLR);
5589 break;
5590 case QM_PF_SRST_PREPARE:
5591 qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
5592 break;
5593 case QM_VF_GET_QOS:
5594 qm_vf_get_qos(qm, fun_num);
5595 break;
5596 case QM_PF_SET_QOS:
5597 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
5598 break;
5599 default:
5600 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
5601 break;
5602 }
5603}
5604
5605static void qm_cmd_process(struct work_struct *cmd_process)
5606{
5607 struct hisi_qm *qm = container_of(cmd_process,
5608 struct hisi_qm, cmd_process);
5609 u32 vfs_num = qm->vfs_num;
5610 u64 val;
5611 u32 i;
5612
5613 if (qm->fun_type == QM_HW_PF) {
5614 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
5615 if (!val)
5616 return;
5617
5618 for (i = 1; i <= vfs_num; i++) {
5619 if (val & BIT(i))
5620 qm_handle_cmd_msg(qm, i);
5621 }
5622
5623 return;
5624 }
5625
5626 qm_handle_cmd_msg(qm, 0);
5627}
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
5638{
5639 struct device *dev = &qm->pdev->dev;
5640 int flag = 0;
5641 int ret = 0;
5642
5643 mutex_lock(&qm_list->lock);
5644 if (list_empty(&qm_list->list))
5645 flag = 1;
5646 list_add_tail(&qm->list, &qm_list->list);
5647 mutex_unlock(&qm_list->lock);
5648
5649 if (qm->ver <= QM_HW_V2 && qm->use_sva) {
5650 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
5651 return 0;
5652 }
5653
5654 if (flag) {
5655 ret = qm_list->register_to_crypto(qm);
5656 if (ret) {
5657 mutex_lock(&qm_list->lock);
5658 list_del(&qm->list);
5659 mutex_unlock(&qm_list->lock);
5660 }
5661 }
5662
5663 return ret;
5664}
5665EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
5677{
5678 mutex_lock(&qm_list->lock);
5679 list_del(&qm->list);
5680 mutex_unlock(&qm_list->lock);
5681
5682 if (qm->ver <= QM_HW_V2 && qm->use_sva)
5683 return;
5684
5685 if (list_empty(&qm_list->list))
5686 qm_list->unregister_from_crypto(qm);
5687}
5688EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
5689
5690static int qm_get_qp_num(struct hisi_qm *qm)
5691{
5692 if (qm->ver == QM_HW_V1)
5693 qm->ctrl_qp_num = QM_QNUM_V1;
5694 else if (qm->ver == QM_HW_V2)
5695 qm->ctrl_qp_num = QM_QNUM_V2;
5696 else
5697 qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
5698 QM_QP_NUN_MASK;
5699
5700 if (qm->use_db_isolation)
5701 qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
5702 QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
5703 else
5704 qm->max_qp_num = qm->ctrl_qp_num;
5705
5706
5707 if (qm->qp_num > qm->max_qp_num) {
5708 dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
5709 qm->qp_num, qm->max_qp_num);
5710 return -EINVAL;
5711 }
5712
5713 return 0;
5714}
5715
5716static int qm_get_pci_res(struct hisi_qm *qm)
5717{
5718 struct pci_dev *pdev = qm->pdev;
5719 struct device *dev = &pdev->dev;
5720 int ret;
5721
5722 ret = pci_request_mem_regions(pdev, qm->dev_name);
5723 if (ret < 0) {
5724 dev_err(dev, "Failed to request mem regions!\n");
5725 return ret;
5726 }
5727
5728 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
5729 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
5730 if (!qm->io_base) {
5731 ret = -EIO;
5732 goto err_request_mem_regions;
5733 }
5734
5735 if (qm->ver > QM_HW_V2) {
5736 if (qm->fun_type == QM_HW_PF)
5737 qm->use_db_isolation = readl(qm->io_base +
5738 QM_QUE_ISO_EN) & BIT(0);
5739 else
5740 qm->use_db_isolation = readl(qm->io_base +
5741 QM_QUE_ISO_CFG_V) & BIT(0);
5742 }
5743
5744 if (qm->use_db_isolation) {
5745 qm->db_interval = QM_QP_DB_INTERVAL;
5746 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
5747 qm->db_io_base = ioremap(qm->db_phys_base,
5748 pci_resource_len(pdev, PCI_BAR_4));
5749 if (!qm->db_io_base) {
5750 ret = -EIO;
5751 goto err_ioremap;
5752 }
5753 } else {
5754 qm->db_phys_base = qm->phys_base;
5755 qm->db_io_base = qm->io_base;
5756 qm->db_interval = 0;
5757 }
5758
5759 if (qm->fun_type == QM_HW_PF) {
5760 ret = qm_get_qp_num(qm);
5761 if (ret)
5762 goto err_db_ioremap;
5763 }
5764
5765 return 0;
5766
5767err_db_ioremap:
5768 if (qm->use_db_isolation)
5769 iounmap(qm->db_io_base);
5770err_ioremap:
5771 iounmap(qm->io_base);
5772err_request_mem_regions:
5773 pci_release_mem_regions(pdev);
5774 return ret;
5775}
5776
5777static int hisi_qm_pci_init(struct hisi_qm *qm)
5778{
5779 struct pci_dev *pdev = qm->pdev;
5780 struct device *dev = &pdev->dev;
5781 unsigned int num_vec;
5782 int ret;
5783
5784 ret = pci_enable_device_mem(pdev);
5785 if (ret < 0) {
5786 dev_err(dev, "Failed to enable device mem!\n");
5787 return ret;
5788 }
5789
5790 ret = qm_get_pci_res(qm);
5791 if (ret)
5792 goto err_disable_pcidev;
5793
5794 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5795 if (ret < 0)
5796 goto err_get_pci_res;
5797 pci_set_master(pdev);
5798
5799 if (!qm->ops->get_irq_num) {
5800 ret = -EOPNOTSUPP;
5801 goto err_get_pci_res;
5802 }
5803 num_vec = qm->ops->get_irq_num(qm);
5804 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
5805 if (ret < 0) {
5806 dev_err(dev, "Failed to enable MSI vectors!\n");
5807 goto err_get_pci_res;
5808 }
5809
5810 return 0;
5811
5812err_get_pci_res:
5813 qm_put_pci_res(qm);
5814err_disable_pcidev:
5815 pci_disable_device(pdev);
5816 return ret;
5817}
5818
5819static void hisi_qm_init_work(struct hisi_qm *qm)
5820{
5821 INIT_WORK(&qm->work, qm_work_process);
5822 if (qm->fun_type == QM_HW_PF)
5823 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
5824
5825 if (qm->ver > QM_HW_V2)
5826 INIT_WORK(&qm->cmd_process, qm_cmd_process);
5827}
5828
5829static int hisi_qp_alloc_memory(struct hisi_qm *qm)
5830{
5831 struct device *dev = &qm->pdev->dev;
5832 size_t qp_dma_size;
5833 int i, ret;
5834
5835 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
5836 if (!qm->qp_array)
5837 return -ENOMEM;
5838
5839
5840 qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
5841 sizeof(struct qm_cqe) * QM_Q_DEPTH;
5842 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
5843 for (i = 0; i < qm->qp_num; i++) {
5844 ret = hisi_qp_memory_init(qm, qp_dma_size, i);
5845 if (ret)
5846 goto err_init_qp_mem;
5847
5848 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
5849 }
5850
5851 return 0;
5852err_init_qp_mem:
5853 hisi_qp_memory_uninit(qm, i);
5854
5855 return ret;
5856}
5857
5858static int hisi_qm_memory_init(struct hisi_qm *qm)
5859{
5860 struct device *dev = &qm->pdev->dev;
5861 int ret, total_func, i;
5862 size_t off = 0;
5863
5864 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
5865 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
5866 if (!qm->factor)
5867 return -ENOMEM;
5868 for (i = 0; i < total_func; i++)
5869 qm->factor[i].func_qos = QM_QOS_MAX_VAL;
5870
5871#define QM_INIT_BUF(qm, type, num) do { \
5872 (qm)->type = ((qm)->qdma.va + (off)); \
5873 (qm)->type##_dma = (qm)->qdma.dma + (off); \
5874 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
5875} while (0)
5876
5877 idr_init(&qm->qp_idr);
5878 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
5879 QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
5880 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
5881 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
5882 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
5883 GFP_ATOMIC);
5884 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
5885 if (!qm->qdma.va) {
5886 ret = -ENOMEM;
5887 goto err_alloc_qdma;
5888 }
5889
5890 QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
5891 QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
5892 QM_INIT_BUF(qm, sqc, qm->qp_num);
5893 QM_INIT_BUF(qm, cqc, qm->qp_num);
5894
5895 ret = hisi_qp_alloc_memory(qm);
5896 if (ret)
5897 goto err_alloc_qp_array;
5898
5899 return 0;
5900
5901err_alloc_qp_array:
5902 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
5903err_alloc_qdma:
5904 kfree(qm->factor);
5905
5906 return ret;
5907}
5908
5909
5910
5911
5912
5913
5914
5915int hisi_qm_init(struct hisi_qm *qm)
5916{
5917 struct pci_dev *pdev = qm->pdev;
5918 struct device *dev = &pdev->dev;
5919 int ret;
5920
5921 hisi_qm_pre_init(qm);
5922
5923 ret = hisi_qm_pci_init(qm);
5924 if (ret)
5925 return ret;
5926
5927 ret = qm_irq_register(qm);
5928 if (ret)
5929 goto err_pci_init;
5930
5931 if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
5932
5933 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
5934 if (ret)
5935 goto err_irq_register;
5936 }
5937
5938 if (qm->fun_type == QM_HW_PF) {
5939 qm_disable_clock_gate(qm);
5940 ret = qm_dev_mem_reset(qm);
5941 if (ret) {
5942 dev_err(dev, "failed to reset device memory\n");
5943 goto err_irq_register;
5944 }
5945 }
5946
5947 if (qm->mode == UACCE_MODE_SVA) {
5948 ret = qm_alloc_uacce(qm);
5949 if (ret < 0)
5950 dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
5951 }
5952
5953 ret = hisi_qm_memory_init(qm);
5954 if (ret)
5955 goto err_alloc_uacce;
5956
5957 hisi_qm_init_work(qm);
5958 qm_cmd_init(qm);
5959 atomic_set(&qm->status.flags, QM_INIT);
5960
5961 return 0;
5962
5963err_alloc_uacce:
5964 if (qm->use_sva) {
5965 uacce_remove(qm->uacce);
5966 qm->uacce = NULL;
5967 }
5968err_irq_register:
5969 qm_irq_unregister(qm);
5970err_pci_init:
5971 hisi_qm_pci_uninit(qm);
5972 return ret;
5973}
5974EXPORT_SYMBOL_GPL(hisi_qm_init);
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985int hisi_qm_get_dfx_access(struct hisi_qm *qm)
5986{
5987 struct device *dev = &qm->pdev->dev;
5988
5989 if (pm_runtime_suspended(dev)) {
5990 dev_info(dev, "can not read/write - device in suspended.\n");
5991 return -EAGAIN;
5992 }
5993
5994 return qm_pm_get_sync(qm);
5995}
5996EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
5997
5998
5999
6000
6001
6002
6003
6004void hisi_qm_put_dfx_access(struct hisi_qm *qm)
6005{
6006 qm_pm_put_sync(qm);
6007}
6008EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
6009
6010
6011
6012
6013
6014
6015
6016void hisi_qm_pm_init(struct hisi_qm *qm)
6017{
6018 struct device *dev = &qm->pdev->dev;
6019
6020 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
6021 return;
6022
6023 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
6024 pm_runtime_use_autosuspend(dev);
6025 pm_runtime_put_noidle(dev);
6026}
6027EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
6028
6029
6030
6031
6032
6033
6034
6035void hisi_qm_pm_uninit(struct hisi_qm *qm)
6036{
6037 struct device *dev = &qm->pdev->dev;
6038
6039 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
6040 return;
6041
6042 pm_runtime_get_noresume(dev);
6043 pm_runtime_dont_use_autosuspend(dev);
6044}
6045EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
6046
6047static int qm_prepare_for_suspend(struct hisi_qm *qm)
6048{
6049 struct pci_dev *pdev = qm->pdev;
6050 int ret;
6051 u32 val;
6052
6053 ret = qm->ops->set_msi(qm, false);
6054 if (ret) {
6055 pci_err(pdev, "failed to disable MSI before suspending!\n");
6056 return ret;
6057 }
6058
6059
6060 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
6061 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
6062
6063 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
6064 val,
6065 (val == ACC_MASTER_TRANS_RETURN_RW),
6066 POLL_PERIOD, POLL_TIMEOUT);
6067 if (ret) {
6068 pci_emerg(pdev, "Bus lock! Please reset system.\n");
6069 return ret;
6070 }
6071
6072 ret = qm_set_pf_mse(qm, false);
6073 if (ret)
6074 pci_err(pdev, "failed to disable MSE before suspending!\n");
6075
6076 return ret;
6077}
6078
6079static int qm_rebuild_for_resume(struct hisi_qm *qm)
6080{
6081 struct pci_dev *pdev = qm->pdev;
6082 int ret;
6083
6084 ret = qm_set_pf_mse(qm, true);
6085 if (ret) {
6086 pci_err(pdev, "failed to enable MSE after resuming!\n");
6087 return ret;
6088 }
6089
6090 ret = qm->ops->set_msi(qm, true);
6091 if (ret) {
6092 pci_err(pdev, "failed to enable MSI after resuming!\n");
6093 return ret;
6094 }
6095
6096 ret = qm_dev_hw_init(qm);
6097 if (ret) {
6098 pci_err(pdev, "failed to init device after resuming\n");
6099 return ret;
6100 }
6101
6102 qm_cmd_init(qm);
6103 hisi_qm_dev_err_init(qm);
6104 qm_disable_clock_gate(qm);
6105 ret = qm_dev_mem_reset(qm);
6106 if (ret)
6107 pci_err(pdev, "failed to reset device memory\n");
6108
6109 return ret;
6110}
6111
6112
6113
6114
6115
6116
6117
6118int hisi_qm_suspend(struct device *dev)
6119{
6120 struct pci_dev *pdev = to_pci_dev(dev);
6121 struct hisi_qm *qm = pci_get_drvdata(pdev);
6122 int ret;
6123
6124 pci_info(pdev, "entering suspended state\n");
6125
6126 ret = hisi_qm_stop(qm, QM_NORMAL);
6127 if (ret) {
6128 pci_err(pdev, "failed to stop qm(%d)\n", ret);
6129 return ret;
6130 }
6131
6132 ret = qm_prepare_for_suspend(qm);
6133 if (ret)
6134 pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
6135
6136 return ret;
6137}
6138EXPORT_SYMBOL_GPL(hisi_qm_suspend);
6139
6140
6141
6142
6143
6144
6145
6146int hisi_qm_resume(struct device *dev)
6147{
6148 struct pci_dev *pdev = to_pci_dev(dev);
6149 struct hisi_qm *qm = pci_get_drvdata(pdev);
6150 int ret;
6151
6152 pci_info(pdev, "resuming from suspend state\n");
6153
6154 ret = qm_rebuild_for_resume(qm);
6155 if (ret) {
6156 pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
6157 return ret;
6158 }
6159
6160 ret = hisi_qm_start(qm);
6161 if (ret)
6162 pci_err(pdev, "failed to start qm(%d)\n", ret);
6163
6164 return ret;
6165}
6166EXPORT_SYMBOL_GPL(hisi_qm_resume);
6167
6168MODULE_LICENSE("GPL v2");
6169MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
6170MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
6171