1
2
3#include <linux/acpi.h>
4#include <linux/aer.h>
5#include <linux/bitops.h>
6#include <linux/debugfs.h>
7#include <linux/init.h>
8#include <linux/io.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/topology.h>
13#include "hpre.h"
14
15#define HPRE_QUEUE_NUM_V2 1024
16#define HPRE_QM_ABNML_INT_MASK 0x100004
17#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
18#define HPRE_COMM_CNT_CLR_CE 0x0
19#define HPRE_CTRL_CNT_CLR_CE 0x301000
20#define HPRE_FSM_MAX_CNT 0x301008
21#define HPRE_VFG_AXQOS 0x30100c
22#define HPRE_VFG_AXCACHE 0x301010
23#define HPRE_RDCHN_INI_CFG 0x301014
24#define HPRE_AWUSR_FP_CFG 0x301018
25#define HPRE_BD_ENDIAN 0x301020
26#define HPRE_ECC_BYPASS 0x301024
27#define HPRE_RAS_WIDTH_CFG 0x301028
28#define HPRE_POISON_BYPASS 0x30102c
29#define HPRE_BD_ARUSR_CFG 0x301030
30#define HPRE_BD_AWUSR_CFG 0x301034
31#define HPRE_TYPES_ENB 0x301038
32#define HPRE_DATA_RUSER_CFG 0x30103c
33#define HPRE_DATA_WUSER_CFG 0x301040
34#define HPRE_INT_MASK 0x301400
35#define HPRE_INT_STATUS 0x301800
36#define HPRE_CORE_INT_ENABLE 0
37#define HPRE_CORE_INT_DISABLE 0x003fffff
38#define HPRE_RAS_ECC_1BIT_TH 0x30140c
39#define HPRE_RDCHN_INI_ST 0x301a00
40#define HPRE_CLSTR_BASE 0x302000
41#define HPRE_CORE_EN_OFFSET 0x04
42#define HPRE_CORE_INI_CFG_OFFSET 0x20
43#define HPRE_CORE_INI_STATUS_OFFSET 0x80
44#define HPRE_CORE_HTBT_WARN_OFFSET 0x8c
45#define HPRE_CORE_IS_SCHD_OFFSET 0x90
46
47#define HPRE_RAS_CE_ENB 0x301410
48#define HPRE_HAC_RAS_CE_ENABLE 0x1
49#define HPRE_RAS_NFE_ENB 0x301414
50#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
51#define HPRE_RAS_FE_ENB 0x301418
52#define HPRE_HAC_RAS_FE_ENABLE 0
53
54#define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
55#define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
56#define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
57#define HPRE_HAC_ECC1_CNT 0x301a04
58#define HPRE_HAC_ECC2_CNT 0x301a08
59#define HPRE_HAC_INT_STATUS 0x301800
60#define HPRE_HAC_SOURCE_INT 0x301600
61#define HPRE_CLSTR_ADDR_INTRVL 0x1000
62#define HPRE_CLUSTER_INQURY 0x100
63#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
64#define HPRE_TIMEOUT_ABNML_BIT 6
65#define HPRE_PASID_EN_BIT 9
66#define HPRE_REG_RD_INTVRL_US 10
67#define HPRE_REG_RD_TMOUT_US 1000
68#define HPRE_DBGFS_VAL_MAX_LEN 20
69#define HPRE_PCI_DEVICE_ID 0xa258
70#define HPRE_PCI_VF_DEVICE_ID 0xa259
71#define HPRE_ADDR(qm, offset) ((qm)->io_base + (offset))
72#define HPRE_QM_USR_CFG_MASK 0xfffffffe
73#define HPRE_QM_AXI_CFG_MASK 0xffff
74#define HPRE_QM_VFG_AX_MASK 0xff
75#define HPRE_BD_USR_MASK 0x3
76#define HPRE_CLUSTER_CORE_MASK 0xf
77
78#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
79#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
80#define HPRE_WR_MSI_PORT BIT(2)
81
82#define HPRE_CORE_ECC_2BIT_ERR BIT(1)
83#define HPRE_OOO_ECC_2BIT_ERR BIT(5)
84
85#define HPRE_QM_BME_FLR BIT(7)
86#define HPRE_QM_PM_FLR BIT(11)
87#define HPRE_QM_SRIOV_FLR BIT(12)
88
89#define HPRE_VIA_MSI_DSM 1
90#define HPRE_SQE_MASK_OFFSET 8
91#define HPRE_SQE_MASK_LEN 24
92
93static struct hisi_qm_list hpre_devices;
94static const char hpre_name[] = "hisi_hpre";
95static struct dentry *hpre_debugfs_root;
96static const struct pci_device_id hpre_dev_ids[] = {
97 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) },
98 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) },
99 { 0, }
100};
101
102MODULE_DEVICE_TABLE(pci, hpre_dev_ids);
103
104struct hpre_hw_error {
105 u32 int_msk;
106 const char *msg;
107};
108
109static const char * const hpre_debug_file_name[] = {
110 [HPRE_CURRENT_QM] = "current_qm",
111 [HPRE_CLEAR_ENABLE] = "rdclr_en",
112 [HPRE_CLUSTER_CTRL] = "cluster_ctrl",
113};
114
115static const struct hpre_hw_error hpre_hw_errors[] = {
116 { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" },
117 { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" },
118 { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" },
119 { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" },
120 { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" },
121 { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" },
122 { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" },
123 { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" },
124 { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" },
125 { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" },
126 { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" },
127 { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" },
128 { }
129};
130
131static const u64 hpre_cluster_offsets[] = {
132 [HPRE_CLUSTER0] =
133 HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,
134 [HPRE_CLUSTER1] =
135 HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,
136 [HPRE_CLUSTER2] =
137 HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,
138 [HPRE_CLUSTER3] =
139 HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
140};
141
142static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
143 {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
144 {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
145 {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
146 {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET},
147 {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
148};
149
150static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
151 {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
152 {"AXQOS ", HPRE_VFG_AXQOS},
153 {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
154 {"QM_ARUSR_MCFG1 ", QM_ARUSER_M_CFG_1},
155 {"QM_AWUSR_MCFG1 ", QM_AWUSER_M_CFG_1},
156 {"BD_ENDIAN ", HPRE_BD_ENDIAN},
157 {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS},
158 {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG},
159 {"POISON_BYPASS ", HPRE_POISON_BYPASS},
160 {"BD_ARUSER ", HPRE_BD_ARUSR_CFG},
161 {"BD_AWUSER ", HPRE_BD_AWUSR_CFG},
162 {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG},
163 {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG},
164 {"INT_STATUS ", HPRE_INT_STATUS},
165};
166
167static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
168 "send_cnt",
169 "recv_cnt",
170 "send_fail_cnt",
171 "send_busy_cnt",
172 "over_thrhld_cnt",
173 "overtime_thrhld",
174 "invalid_req_cnt"
175};
176
177static int pf_q_num_set(const char *val, const struct kernel_param *kp)
178{
179 return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
180}
181
182static const struct kernel_param_ops hpre_pf_q_num_ops = {
183 .set = pf_q_num_set,
184 .get = param_get_int,
185};
186
187static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
188module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
189MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
190
191static const struct kernel_param_ops vfs_num_ops = {
192 .set = vfs_num_set,
193 .get = param_get_int,
194};
195
196static u32 vfs_num;
197module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
198MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
199
200struct hisi_qp *hpre_create_qp(void)
201{
202 int node = cpu_to_node(smp_processor_id());
203 struct hisi_qp *qp = NULL;
204 int ret;
205
206 ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp);
207 if (!ret)
208 return qp;
209
210 return NULL;
211}
212
213static int hpre_cfg_by_dsm(struct hisi_qm *qm)
214{
215 struct device *dev = &qm->pdev->dev;
216 union acpi_object *obj;
217 guid_t guid;
218
219 if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {
220 dev_err(dev, "Hpre GUID failed\n");
221 return -EINVAL;
222 }
223
224
225 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,
226 0, HPRE_VIA_MSI_DSM, NULL);
227 if (!obj) {
228 dev_err(dev, "ACPI handle failed!\n");
229 return -EIO;
230 }
231
232 ACPI_FREE(obj);
233
234 return 0;
235}
236
237
238
239
240
241
242static void disable_flr_of_bme(struct hisi_qm *qm)
243{
244 u32 val;
245
246 val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
247 val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
248 val |= HPRE_QM_PM_FLR;
249 writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
250 writel(PEH_AXUSER_CFG_ENABLE, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG_ENABLE));
251}
252
253static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
254{
255 struct device *dev = &qm->pdev->dev;
256 unsigned long offset;
257 int ret, i;
258 u32 val;
259
260 writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_ARUSER_M_CFG_ENABLE));
261 writel(HPRE_QM_USR_CFG_MASK, HPRE_ADDR(qm, QM_AWUSER_M_CFG_ENABLE));
262 writel_relaxed(HPRE_QM_AXI_CFG_MASK, HPRE_ADDR(qm, QM_AXI_M_CFG));
263
264
265 val = readl_relaxed(HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
266 val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
267 writel_relaxed(val, HPRE_ADDR(qm, HPRE_QM_ABNML_INT_MASK));
268
269 writel(0x1, HPRE_ADDR(qm, HPRE_TYPES_ENB));
270 writel(HPRE_QM_VFG_AX_MASK, HPRE_ADDR(qm, HPRE_VFG_AXCACHE));
271 writel(0x0, HPRE_ADDR(qm, HPRE_BD_ENDIAN));
272 writel(0x0, HPRE_ADDR(qm, HPRE_INT_MASK));
273 writel(0x0, HPRE_ADDR(qm, HPRE_RAS_ECC_1BIT_TH));
274 writel(0x0, HPRE_ADDR(qm, HPRE_POISON_BYPASS));
275 writel(0x0, HPRE_ADDR(qm, HPRE_COMM_CNT_CLR_CE));
276 writel(0x0, HPRE_ADDR(qm, HPRE_ECC_BYPASS));
277
278 writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_ARUSR_CFG));
279 writel(HPRE_BD_USR_MASK, HPRE_ADDR(qm, HPRE_BD_AWUSR_CFG));
280 writel(0x1, HPRE_ADDR(qm, HPRE_RDCHN_INI_CFG));
281 ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, HPRE_RDCHN_INI_ST), val,
282 val & BIT(0),
283 HPRE_REG_RD_INTVRL_US,
284 HPRE_REG_RD_TMOUT_US);
285 if (ret) {
286 dev_err(dev, "read rd channel timeout fail!\n");
287 return -ETIMEDOUT;
288 }
289
290 for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
291 offset = i * HPRE_CLSTR_ADDR_INTRVL;
292
293
294 writel(HPRE_CLUSTER_CORE_MASK,
295 HPRE_ADDR(qm, offset + HPRE_CORE_ENB));
296 writel(0x1, HPRE_ADDR(qm, offset + HPRE_CORE_INI_CFG));
297 ret = readl_relaxed_poll_timeout(HPRE_ADDR(qm, offset +
298 HPRE_CORE_INI_STATUS), val,
299 ((val & HPRE_CLUSTER_CORE_MASK) ==
300 HPRE_CLUSTER_CORE_MASK),
301 HPRE_REG_RD_INTVRL_US,
302 HPRE_REG_RD_TMOUT_US);
303 if (ret) {
304 dev_err(dev,
305 "cluster %d int st status timeout!\n", i);
306 return -ETIMEDOUT;
307 }
308 }
309
310 ret = hpre_cfg_by_dsm(qm);
311 if (ret)
312 dev_err(dev, "acpi_evaluate_dsm err.\n");
313
314 disable_flr_of_bme(qm);
315
316 return ret;
317}
318
319static void hpre_cnt_regs_clear(struct hisi_qm *qm)
320{
321 unsigned long offset;
322 int i;
323
324
325 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
326 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
327
328
329 for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
330 offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
331 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
332 }
333
334
335 writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
336
337 hisi_qm_debug_regs_clear(qm);
338}
339
340static void hpre_hw_error_disable(struct hisi_qm *qm)
341{
342 u32 val;
343
344
345 writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
346
347
348 val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
349 val &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
350 writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
351}
352
353static void hpre_hw_error_enable(struct hisi_qm *qm)
354{
355 u32 val;
356
357
358 writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
359
360
361 writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
362 writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
363 writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
364 writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
365
366
367 val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
368 val |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
369 writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
370}
371
372static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
373{
374 struct hpre *hpre = container_of(file->debug, struct hpre, debug);
375
376 return &hpre->qm;
377}
378
379static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
380{
381 struct hisi_qm *qm = hpre_file_to_qm(file);
382
383 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
384}
385
386static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
387{
388 struct hisi_qm *qm = hpre_file_to_qm(file);
389 u32 num_vfs = qm->vfs_num;
390 u32 vfq_num, tmp;
391
392 if (val > num_vfs)
393 return -EINVAL;
394
395
396 if (val == 0) {
397 qm->debug.curr_qm_qp_num = qm->qp_num;
398 } else {
399 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
400 if (val == num_vfs) {
401 qm->debug.curr_qm_qp_num =
402 qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
403 } else {
404 qm->debug.curr_qm_qp_num = vfq_num;
405 }
406 }
407
408 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
409 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
410
411 tmp = val |
412 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
413 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
414
415 tmp = val |
416 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
417 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
418
419 return 0;
420}
421
422static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
423{
424 struct hisi_qm *qm = hpre_file_to_qm(file);
425
426 return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
427 HPRE_CTRL_CNT_CLR_CE_BIT;
428}
429
430static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
431{
432 struct hisi_qm *qm = hpre_file_to_qm(file);
433 u32 tmp;
434
435 if (val != 1 && val != 0)
436 return -EINVAL;
437
438 tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
439 ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
440 writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
441
442 return 0;
443}
444
445static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
446{
447 struct hisi_qm *qm = hpre_file_to_qm(file);
448 int cluster_index = file->index - HPRE_CLUSTER_CTRL;
449 unsigned long offset = HPRE_CLSTR_BASE +
450 cluster_index * HPRE_CLSTR_ADDR_INTRVL;
451
452 return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
453}
454
455static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
456{
457 struct hisi_qm *qm = hpre_file_to_qm(file);
458 int cluster_index = file->index - HPRE_CLUSTER_CTRL;
459 unsigned long offset = HPRE_CLSTR_BASE + cluster_index *
460 HPRE_CLSTR_ADDR_INTRVL;
461
462 writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
463
464 return 0;
465}
466
467static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
468 size_t count, loff_t *pos)
469{
470 struct hpre_debugfs_file *file = filp->private_data;
471 char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
472 u32 val;
473 int ret;
474
475 spin_lock_irq(&file->lock);
476 switch (file->type) {
477 case HPRE_CURRENT_QM:
478 val = hpre_current_qm_read(file);
479 break;
480 case HPRE_CLEAR_ENABLE:
481 val = hpre_clear_enable_read(file);
482 break;
483 case HPRE_CLUSTER_CTRL:
484 val = hpre_cluster_inqry_read(file);
485 break;
486 default:
487 spin_unlock_irq(&file->lock);
488 return -EINVAL;
489 }
490 spin_unlock_irq(&file->lock);
491 ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
492 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
493}
494
495static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
496 size_t count, loff_t *pos)
497{
498 struct hpre_debugfs_file *file = filp->private_data;
499 char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
500 unsigned long val;
501 int len, ret;
502
503 if (*pos != 0)
504 return 0;
505
506 if (count >= HPRE_DBGFS_VAL_MAX_LEN)
507 return -ENOSPC;
508
509 len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,
510 pos, buf, count);
511 if (len < 0)
512 return len;
513
514 tbuf[len] = '\0';
515 if (kstrtoul(tbuf, 0, &val))
516 return -EFAULT;
517
518 spin_lock_irq(&file->lock);
519 switch (file->type) {
520 case HPRE_CURRENT_QM:
521 ret = hpre_current_qm_write(file, val);
522 if (ret)
523 goto err_input;
524 break;
525 case HPRE_CLEAR_ENABLE:
526 ret = hpre_clear_enable_write(file, val);
527 if (ret)
528 goto err_input;
529 break;
530 case HPRE_CLUSTER_CTRL:
531 ret = hpre_cluster_inqry_write(file, val);
532 if (ret)
533 goto err_input;
534 break;
535 default:
536 ret = -EINVAL;
537 goto err_input;
538 }
539 spin_unlock_irq(&file->lock);
540
541 return count;
542
543err_input:
544 spin_unlock_irq(&file->lock);
545 return ret;
546}
547
548static const struct file_operations hpre_ctrl_debug_fops = {
549 .owner = THIS_MODULE,
550 .open = simple_open,
551 .read = hpre_ctrl_debug_read,
552 .write = hpre_ctrl_debug_write,
553};
554
555static int hpre_debugfs_atomic64_get(void *data, u64 *val)
556{
557 struct hpre_dfx *dfx_item = data;
558
559 *val = atomic64_read(&dfx_item->value);
560
561 return 0;
562}
563
564static int hpre_debugfs_atomic64_set(void *data, u64 val)
565{
566 struct hpre_dfx *dfx_item = data;
567 struct hpre_dfx *hpre_dfx = NULL;
568
569 if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
570 hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
571 atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
572 } else if (val) {
573 return -EINVAL;
574 }
575
576 atomic64_set(&dfx_item->value, val);
577
578 return 0;
579}
580
581DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
582 hpre_debugfs_atomic64_set, "%llu\n");
583
584static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
585 enum hpre_ctrl_dbgfs_file type, int indx)
586{
587 struct hpre *hpre = container_of(qm, struct hpre, qm);
588 struct hpre_debug *dbg = &hpre->debug;
589 struct dentry *file_dir;
590
591 if (dir)
592 file_dir = dir;
593 else
594 file_dir = qm->debug.debug_root;
595
596 if (type >= HPRE_DEBUG_FILE_NUM)
597 return -EINVAL;
598
599 spin_lock_init(&dbg->files[indx].lock);
600 dbg->files[indx].debug = dbg;
601 dbg->files[indx].type = type;
602 dbg->files[indx].index = indx;
603 debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
604 dbg->files + indx, &hpre_ctrl_debug_fops);
605
606 return 0;
607}
608
609static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
610{
611 struct device *dev = &qm->pdev->dev;
612 struct debugfs_regset32 *regset;
613
614 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
615 if (!regset)
616 return -ENOMEM;
617
618 regset->regs = hpre_com_dfx_regs;
619 regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
620 regset->base = qm->io_base;
621
622 debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset);
623 return 0;
624}
625
626static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
627{
628 struct device *dev = &qm->pdev->dev;
629 char buf[HPRE_DBGFS_VAL_MAX_LEN];
630 struct debugfs_regset32 *regset;
631 struct dentry *tmp_d;
632 int i, ret;
633
634 for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
635 ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
636 if (ret < 0)
637 return -EINVAL;
638 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
639
640 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
641 if (!regset)
642 return -ENOMEM;
643
644 regset->regs = hpre_cluster_dfx_regs;
645 regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
646 regset->base = qm->io_base + hpre_cluster_offsets[i];
647
648 debugfs_create_regset32("regs", 0444, tmp_d, regset);
649 ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
650 i + HPRE_CLUSTER_CTRL);
651 if (ret)
652 return ret;
653 }
654
655 return 0;
656}
657
658static int hpre_ctrl_debug_init(struct hisi_qm *qm)
659{
660 int ret;
661
662 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM,
663 HPRE_CURRENT_QM);
664 if (ret)
665 return ret;
666
667 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
668 HPRE_CLEAR_ENABLE);
669 if (ret)
670 return ret;
671
672 ret = hpre_pf_comm_regs_debugfs_init(qm);
673 if (ret)
674 return ret;
675
676 return hpre_cluster_debugfs_init(qm);
677}
678
679static void hpre_dfx_debug_init(struct hisi_qm *qm)
680{
681 struct hpre *hpre = container_of(qm, struct hpre, qm);
682 struct hpre_dfx *dfx = hpre->debug.dfx;
683 struct dentry *parent;
684 int i;
685
686 parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
687 for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
688 dfx[i].type = i;
689 debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
690 &hpre_atomic64_ops);
691 }
692}
693
694static int hpre_debugfs_init(struct hisi_qm *qm)
695{
696 struct device *dev = &qm->pdev->dev;
697 int ret;
698
699 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
700 hpre_debugfs_root);
701
702 qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
703 qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
704 ret = hisi_qm_debug_init(qm);
705 if (ret)
706 goto failed_to_create;
707
708 if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
709 ret = hpre_ctrl_debug_init(qm);
710 if (ret)
711 goto failed_to_create;
712 }
713
714 hpre_dfx_debug_init(qm);
715
716 return 0;
717
718failed_to_create:
719 debugfs_remove_recursive(qm->debug.debug_root);
720 return ret;
721}
722
723static void hpre_debugfs_exit(struct hisi_qm *qm)
724{
725 debugfs_remove_recursive(qm->debug.debug_root);
726}
727
728static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
729{
730 if (pdev->revision == QM_HW_V1) {
731 pci_warn(pdev, "HPRE version 1 is not supported!\n");
732 return -EINVAL;
733 }
734
735 qm->pdev = pdev;
736 qm->ver = pdev->revision;
737 qm->sqe_size = HPRE_SQE_SIZE;
738 qm->dev_name = hpre_name;
739
740 qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
741 QM_HW_PF : QM_HW_VF;
742 if (qm->fun_type == QM_HW_PF) {
743 qm->qp_base = HPRE_PF_DEF_Q_BASE;
744 qm->qp_num = pf_q_num;
745 qm->debug.curr_qm_qp_num = pf_q_num;
746 qm->qm_list = &hpre_devices;
747 }
748
749 return hisi_qm_init(qm);
750}
751
752static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
753{
754 const struct hpre_hw_error *err = hpre_hw_errors;
755 struct device *dev = &qm->pdev->dev;
756
757 while (err->msg) {
758 if (err->int_msk & err_sts)
759 dev_warn(dev, "%s [error status=0x%x] found\n",
760 err->msg, err->int_msk);
761 err++;
762 }
763}
764
765static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
766{
767 return readl(qm->io_base + HPRE_HAC_INT_STATUS);
768}
769
770static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
771{
772 writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
773}
774
775static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
776{
777 u32 value;
778
779 value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
780 writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
781 HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
782 writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
783 HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
784}
785
786static const struct hisi_qm_err_ini hpre_err_ini = {
787 .hw_init = hpre_set_user_domain_and_cache,
788 .hw_err_enable = hpre_hw_error_enable,
789 .hw_err_disable = hpre_hw_error_disable,
790 .get_dev_hw_err_status = hpre_get_hw_err_status,
791 .clear_dev_hw_err_status = hpre_clear_hw_err_status,
792 .log_dev_hw_err = hpre_log_hw_error,
793 .open_axi_master_ooo = hpre_open_axi_master_ooo,
794 .err_info = {
795 .ce = QM_BASE_CE,
796 .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
797 .fe = 0,
798 .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
799 HPRE_OOO_ECC_2BIT_ERR,
800 .msi_wr_port = HPRE_WR_MSI_PORT,
801 .acpi_rst = "HRST",
802 }
803};
804
805static int hpre_pf_probe_init(struct hpre *hpre)
806{
807 struct hisi_qm *qm = &hpre->qm;
808 int ret;
809
810 qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
811
812 ret = hpre_set_user_domain_and_cache(qm);
813 if (ret)
814 return ret;
815
816 qm->err_ini = &hpre_err_ini;
817 hisi_qm_dev_err_init(qm);
818
819 return 0;
820}
821
822static int hpre_probe_init(struct hpre *hpre)
823{
824 struct hisi_qm *qm = &hpre->qm;
825 int ret;
826
827 if (qm->fun_type == QM_HW_PF) {
828 ret = hpre_pf_probe_init(hpre);
829 if (ret)
830 return ret;
831 }
832
833 return 0;
834}
835
836static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
837{
838 struct hisi_qm *qm;
839 struct hpre *hpre;
840 int ret;
841
842 hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
843 if (!hpre)
844 return -ENOMEM;
845
846 qm = &hpre->qm;
847 ret = hpre_qm_init(qm, pdev);
848 if (ret) {
849 pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);
850 return ret;
851 }
852
853 ret = hpre_probe_init(hpre);
854 if (ret) {
855 pci_err(pdev, "Failed to probe (%d)!\n", ret);
856 goto err_with_qm_init;
857 }
858
859 ret = hisi_qm_start(qm);
860 if (ret)
861 goto err_with_err_init;
862
863 ret = hpre_debugfs_init(qm);
864 if (ret)
865 dev_warn(&pdev->dev, "init debugfs fail!\n");
866
867 hisi_qm_add_to_list(qm, &hpre_devices);
868
869 ret = hpre_algs_register();
870 if (ret < 0) {
871 pci_err(pdev, "fail to register algs to crypto!\n");
872 goto err_with_qm_start;
873 }
874
875 if (qm->fun_type == QM_HW_PF && vfs_num) {
876 ret = hisi_qm_sriov_enable(pdev, vfs_num);
877 if (ret < 0)
878 goto err_with_crypto_register;
879 }
880
881 return 0;
882
883err_with_crypto_register:
884 hpre_algs_unregister();
885
886err_with_qm_start:
887 hisi_qm_del_from_list(qm, &hpre_devices);
888 hpre_debugfs_exit(qm);
889 hisi_qm_stop(qm);
890
891err_with_err_init:
892 hisi_qm_dev_err_uninit(qm);
893
894err_with_qm_init:
895 hisi_qm_uninit(qm);
896
897 return ret;
898}
899
900static void hpre_remove(struct pci_dev *pdev)
901{
902 struct hpre *hpre = pci_get_drvdata(pdev);
903 struct hisi_qm *qm = &hpre->qm;
904 int ret;
905
906 hpre_algs_unregister();
907 hisi_qm_del_from_list(qm, &hpre_devices);
908 if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
909 ret = hisi_qm_sriov_disable(pdev);
910 if (ret) {
911 pci_err(pdev, "Disable SRIOV fail!\n");
912 return;
913 }
914 }
915 if (qm->fun_type == QM_HW_PF) {
916 hpre_cnt_regs_clear(qm);
917 qm->debug.curr_qm_qp_num = 0;
918 }
919
920 hpre_debugfs_exit(qm);
921 hisi_qm_stop(qm);
922 hisi_qm_dev_err_uninit(qm);
923 hisi_qm_uninit(qm);
924}
925
926
927static const struct pci_error_handlers hpre_err_handler = {
928 .error_detected = hisi_qm_dev_err_detected,
929 .slot_reset = hisi_qm_dev_slot_reset,
930 .reset_prepare = hisi_qm_reset_prepare,
931 .reset_done = hisi_qm_reset_done,
932};
933
934static struct pci_driver hpre_pci_driver = {
935 .name = hpre_name,
936 .id_table = hpre_dev_ids,
937 .probe = hpre_probe,
938 .remove = hpre_remove,
939 .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
940 hisi_qm_sriov_configure : NULL,
941 .err_handler = &hpre_err_handler,
942};
943
944static void hpre_register_debugfs(void)
945{
946 if (!debugfs_initialized())
947 return;
948
949 hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
950}
951
952static void hpre_unregister_debugfs(void)
953{
954 debugfs_remove_recursive(hpre_debugfs_root);
955}
956
957static int __init hpre_init(void)
958{
959 int ret;
960
961 hisi_qm_init_list(&hpre_devices);
962 hpre_register_debugfs();
963
964 ret = pci_register_driver(&hpre_pci_driver);
965 if (ret) {
966 hpre_unregister_debugfs();
967 pr_err("hpre: can't register hisi hpre driver.\n");
968 }
969
970 return ret;
971}
972
973static void __exit hpre_exit(void)
974{
975 pci_unregister_driver(&hpre_pci_driver);
976 hpre_unregister_debugfs();
977}
978
979module_init(hpre_init);
980module_exit(hpre_exit);
981
982MODULE_LICENSE("GPL v2");
983MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
984MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
985