1
2
3
4#include <linux/etherdevice.h>
5#include <linux/iopoll.h>
6#include <net/rtnetlink.h>
7#include "hclgevf_cmd.h"
8#include "hclgevf_main.h"
9#include "hclge_mbx.h"
10#include "hnae3.h"
11
12#define HCLGEVF_NAME "hclgevf"
13
14static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
15static struct hnae3_ae_algo ae_algovf;
16
17static const struct pci_device_id ae_algovf_pci_tbl[] = {
18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
20
21 {0, }
22};
23
24MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
25
26static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
27 HCLGEVF_CMDQ_TX_ADDR_H_REG,
28 HCLGEVF_CMDQ_TX_DEPTH_REG,
29 HCLGEVF_CMDQ_TX_TAIL_REG,
30 HCLGEVF_CMDQ_TX_HEAD_REG,
31 HCLGEVF_CMDQ_RX_ADDR_L_REG,
32 HCLGEVF_CMDQ_RX_ADDR_H_REG,
33 HCLGEVF_CMDQ_RX_DEPTH_REG,
34 HCLGEVF_CMDQ_RX_TAIL_REG,
35 HCLGEVF_CMDQ_RX_HEAD_REG,
36 HCLGEVF_VECTOR0_CMDQ_SRC_REG,
37 HCLGEVF_CMDQ_INTR_STS_REG,
38 HCLGEVF_CMDQ_INTR_EN_REG,
39 HCLGEVF_CMDQ_INTR_GEN_REG};
40
41static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
42 HCLGEVF_RST_ING,
43 HCLGEVF_GRO_EN_REG};
44
45static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
46 HCLGEVF_RING_RX_ADDR_H_REG,
47 HCLGEVF_RING_RX_BD_NUM_REG,
48 HCLGEVF_RING_RX_BD_LENGTH_REG,
49 HCLGEVF_RING_RX_MERGE_EN_REG,
50 HCLGEVF_RING_RX_TAIL_REG,
51 HCLGEVF_RING_RX_HEAD_REG,
52 HCLGEVF_RING_RX_FBD_NUM_REG,
53 HCLGEVF_RING_RX_OFFSET_REG,
54 HCLGEVF_RING_RX_FBD_OFFSET_REG,
55 HCLGEVF_RING_RX_STASH_REG,
56 HCLGEVF_RING_RX_BD_ERR_REG,
57 HCLGEVF_RING_TX_ADDR_L_REG,
58 HCLGEVF_RING_TX_ADDR_H_REG,
59 HCLGEVF_RING_TX_BD_NUM_REG,
60 HCLGEVF_RING_TX_PRIORITY_REG,
61 HCLGEVF_RING_TX_TC_REG,
62 HCLGEVF_RING_TX_MERGE_EN_REG,
63 HCLGEVF_RING_TX_TAIL_REG,
64 HCLGEVF_RING_TX_HEAD_REG,
65 HCLGEVF_RING_TX_FBD_NUM_REG,
66 HCLGEVF_RING_TX_OFFSET_REG,
67 HCLGEVF_RING_TX_EBD_NUM_REG,
68 HCLGEVF_RING_TX_EBD_OFFSET_REG,
69 HCLGEVF_RING_TX_BD_ERR_REG,
70 HCLGEVF_RING_EN_REG};
71
72static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
73 HCLGEVF_TQP_INTR_GL0_REG,
74 HCLGEVF_TQP_INTR_GL1_REG,
75 HCLGEVF_TQP_INTR_GL2_REG,
76 HCLGEVF_TQP_INTR_RL_REG};
77
78static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
79 struct hnae3_handle *handle)
80{
81 return container_of(handle, struct hclgevf_dev, nic);
82}
83
84static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
85{
86 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
87 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
88 struct hclgevf_desc desc;
89 struct hclgevf_tqp *tqp;
90 int status;
91 int i;
92
93 for (i = 0; i < kinfo->num_tqps; i++) {
94 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
95 hclgevf_cmd_setup_basic_desc(&desc,
96 HCLGEVF_OPC_QUERY_RX_STATUS,
97 true);
98
99 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
100 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
101 if (status) {
102 dev_err(&hdev->pdev->dev,
103 "Query tqp stat fail, status = %d,queue = %d\n",
104 status, i);
105 return status;
106 }
107 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
108 le32_to_cpu(desc.data[1]);
109
110 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
111 true);
112
113 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
114 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
115 if (status) {
116 dev_err(&hdev->pdev->dev,
117 "Query tqp stat fail, status = %d,queue = %d\n",
118 status, i);
119 return status;
120 }
121 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
122 le32_to_cpu(desc.data[1]);
123 }
124
125 return 0;
126}
127
128static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
129{
130 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
131 struct hclgevf_tqp *tqp;
132 u64 *buff = data;
133 int i;
134
135 for (i = 0; i < kinfo->num_tqps; i++) {
136 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
137 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
138 }
139 for (i = 0; i < kinfo->num_tqps; i++) {
140 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
141 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
142 }
143
144 return buff;
145}
146
147static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
148{
149 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
150
151 return kinfo->num_tqps * 2;
152}
153
154static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
155{
156 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
157 u8 *buff = data;
158 int i = 0;
159
160 for (i = 0; i < kinfo->num_tqps; i++) {
161 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
162 struct hclgevf_tqp, q);
163 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
164 tqp->index);
165 buff += ETH_GSTRING_LEN;
166 }
167
168 for (i = 0; i < kinfo->num_tqps; i++) {
169 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
170 struct hclgevf_tqp, q);
171 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
172 tqp->index);
173 buff += ETH_GSTRING_LEN;
174 }
175
176 return buff;
177}
178
179static void hclgevf_update_stats(struct hnae3_handle *handle,
180 struct net_device_stats *net_stats)
181{
182 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
183 int status;
184
185 status = hclgevf_tqps_update_stats(handle);
186 if (status)
187 dev_err(&hdev->pdev->dev,
188 "VF update of TQPS stats fail, status = %d.\n",
189 status);
190}
191
192static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
193{
194 if (strset == ETH_SS_TEST)
195 return -EOPNOTSUPP;
196 else if (strset == ETH_SS_STATS)
197 return hclgevf_tqps_get_sset_count(handle, strset);
198
199 return 0;
200}
201
202static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
203 u8 *data)
204{
205 u8 *p = (char *)data;
206
207 if (strset == ETH_SS_STATS)
208 p = hclgevf_tqps_get_strings(handle, p);
209}
210
211static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
212{
213 hclgevf_tqps_get_stats(handle, data);
214}
215
216static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
217{
218 u8 resp_msg;
219 int status;
220
221 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
222 true, &resp_msg, sizeof(u8));
223 if (status) {
224 dev_err(&hdev->pdev->dev,
225 "VF request to get TC info from PF failed %d",
226 status);
227 return status;
228 }
229
230 hdev->hw_tc_map = resp_msg;
231
232 return 0;
233}
234
235static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
236{
237#define HCLGEVF_TQPS_RSS_INFO_LEN 8
238 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
239 int status;
240
241 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
242 true, resp_msg,
243 HCLGEVF_TQPS_RSS_INFO_LEN);
244 if (status) {
245 dev_err(&hdev->pdev->dev,
246 "VF request to get tqp info from PF failed %d",
247 status);
248 return status;
249 }
250
251 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
252 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
253 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
254 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
255
256 return 0;
257}
258
259static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
260{
261 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
262 u8 msg_data[2], resp_data[2];
263 u16 qid_in_pf = 0;
264 int ret;
265
266 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
267
268 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
269 2, true, resp_data, 2);
270 if (!ret)
271 qid_in_pf = *(u16 *)resp_data;
272
273 return qid_in_pf;
274}
275
276static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
277{
278 struct hclgevf_tqp *tqp;
279 int i;
280
281 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
282 sizeof(struct hclgevf_tqp), GFP_KERNEL);
283 if (!hdev->htqp)
284 return -ENOMEM;
285
286 tqp = hdev->htqp;
287
288 for (i = 0; i < hdev->num_tqps; i++) {
289 tqp->dev = &hdev->pdev->dev;
290 tqp->index = i;
291
292 tqp->q.ae_algo = &ae_algovf;
293 tqp->q.buf_size = hdev->rx_buf_len;
294 tqp->q.desc_num = hdev->num_desc;
295 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
296 i * HCLGEVF_TQP_REG_SIZE;
297
298 tqp++;
299 }
300
301 return 0;
302}
303
304static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
305{
306 struct hnae3_handle *nic = &hdev->nic;
307 struct hnae3_knic_private_info *kinfo;
308 u16 new_tqps = hdev->num_tqps;
309 int i;
310
311 kinfo = &nic->kinfo;
312 kinfo->num_tc = 0;
313 kinfo->num_desc = hdev->num_desc;
314 kinfo->rx_buf_len = hdev->rx_buf_len;
315 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
316 if (hdev->hw_tc_map & BIT(i))
317 kinfo->num_tc++;
318
319 kinfo->rss_size
320 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
321 new_tqps = kinfo->rss_size * kinfo->num_tc;
322 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
323
324 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
325 sizeof(struct hnae3_queue *), GFP_KERNEL);
326 if (!kinfo->tqp)
327 return -ENOMEM;
328
329 for (i = 0; i < kinfo->num_tqps; i++) {
330 hdev->htqp[i].q.handle = &hdev->nic;
331 hdev->htqp[i].q.tqp_index = i;
332 kinfo->tqp[i] = &hdev->htqp[i].q;
333 }
334
335 return 0;
336}
337
338static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
339{
340 int status;
341 u8 resp_msg;
342
343 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
344 0, false, &resp_msg, sizeof(u8));
345 if (status)
346 dev_err(&hdev->pdev->dev,
347 "VF failed to fetch link status(%d) from PF", status);
348}
349
350void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
351{
352 struct hnae3_handle *handle = &hdev->nic;
353 struct hnae3_client *client;
354
355 client = handle->client;
356
357 link_state =
358 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
359
360 if (link_state != hdev->hw.mac.link) {
361 client->ops->link_status_change(handle, !!link_state);
362 hdev->hw.mac.link = link_state;
363 }
364}
365
366static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
367{
368 struct hnae3_handle *nic = &hdev->nic;
369 int ret;
370
371 nic->ae_algo = &ae_algovf;
372 nic->pdev = hdev->pdev;
373 nic->numa_node_mask = hdev->numa_node_mask;
374 nic->flags |= HNAE3_SUPPORT_VF;
375
376 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
377 dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
378 hdev->ae_dev->dev_type);
379 return -EINVAL;
380 }
381
382 ret = hclgevf_knic_setup(hdev);
383 if (ret)
384 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
385 ret);
386 return ret;
387}
388
389static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
390{
391 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
392 dev_warn(&hdev->pdev->dev,
393 "vector(vector_id %d) has been freed.\n", vector_id);
394 return;
395 }
396
397 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
398 hdev->num_msi_left += 1;
399 hdev->num_msi_used -= 1;
400}
401
402static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
403 struct hnae3_vector_info *vector_info)
404{
405 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
406 struct hnae3_vector_info *vector = vector_info;
407 int alloc = 0;
408 int i, j;
409
410 vector_num = min(hdev->num_msi_left, vector_num);
411
412 for (j = 0; j < vector_num; j++) {
413 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
414 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
415 vector->vector = pci_irq_vector(hdev->pdev, i);
416 vector->io_addr = hdev->hw.io_base +
417 HCLGEVF_VECTOR_REG_BASE +
418 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
419 hdev->vector_status[i] = 0;
420 hdev->vector_irq[i] = vector->vector;
421
422 vector++;
423 alloc++;
424
425 break;
426 }
427 }
428 }
429 hdev->num_msi_left -= alloc;
430 hdev->num_msi_used += alloc;
431
432 return alloc;
433}
434
435static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
436{
437 int i;
438
439 for (i = 0; i < hdev->num_msi; i++)
440 if (vector == hdev->vector_irq[i])
441 return i;
442
443 return -EINVAL;
444}
445
446static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
447 const u8 hfunc, const u8 *key)
448{
449 struct hclgevf_rss_config_cmd *req;
450 struct hclgevf_desc desc;
451 int key_offset;
452 int key_size;
453 int ret;
454
455 req = (struct hclgevf_rss_config_cmd *)desc.data;
456
457 for (key_offset = 0; key_offset < 3; key_offset++) {
458 hclgevf_cmd_setup_basic_desc(&desc,
459 HCLGEVF_OPC_RSS_GENERIC_CONFIG,
460 false);
461
462 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
463 req->hash_config |=
464 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
465
466 if (key_offset == 2)
467 key_size =
468 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
469 else
470 key_size = HCLGEVF_RSS_HASH_KEY_NUM;
471
472 memcpy(req->hash_key,
473 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
474
475 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
476 if (ret) {
477 dev_err(&hdev->pdev->dev,
478 "Configure RSS config fail, status = %d\n",
479 ret);
480 return ret;
481 }
482 }
483
484 return 0;
485}
486
487static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
488{
489 return HCLGEVF_RSS_KEY_SIZE;
490}
491
492static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
493{
494 return HCLGEVF_RSS_IND_TBL_SIZE;
495}
496
497static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
498{
499 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
500 struct hclgevf_rss_indirection_table_cmd *req;
501 struct hclgevf_desc desc;
502 int status;
503 int i, j;
504
505 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
506
507 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
508 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
509 false);
510 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
511 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
512 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
513 req->rss_result[j] =
514 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
515
516 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
517 if (status) {
518 dev_err(&hdev->pdev->dev,
519 "VF failed(=%d) to set RSS indirection table\n",
520 status);
521 return status;
522 }
523 }
524
525 return 0;
526}
527
528static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
529{
530 struct hclgevf_rss_tc_mode_cmd *req;
531 u16 tc_offset[HCLGEVF_MAX_TC_NUM];
532 u16 tc_valid[HCLGEVF_MAX_TC_NUM];
533 u16 tc_size[HCLGEVF_MAX_TC_NUM];
534 struct hclgevf_desc desc;
535 u16 roundup_size;
536 int status;
537 int i;
538
539 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
540
541 roundup_size = roundup_pow_of_two(rss_size);
542 roundup_size = ilog2(roundup_size);
543
544 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
545 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
546 tc_size[i] = roundup_size;
547 tc_offset[i] = rss_size * i;
548 }
549
550 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
551 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
552 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
553 (tc_valid[i] & 0x1));
554 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
555 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
556 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
557 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
558 }
559 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
560 if (status)
561 dev_err(&hdev->pdev->dev,
562 "VF failed(=%d) to set rss tc mode\n", status);
563
564 return status;
565}
566
567static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
568 u8 *hfunc)
569{
570 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
571 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
572 int i;
573
574 if (handle->pdev->revision >= 0x21) {
575
576 if (hfunc) {
577 switch (rss_cfg->hash_algo) {
578 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
579 *hfunc = ETH_RSS_HASH_TOP;
580 break;
581 case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
582 *hfunc = ETH_RSS_HASH_XOR;
583 break;
584 default:
585 *hfunc = ETH_RSS_HASH_UNKNOWN;
586 break;
587 }
588 }
589
590
591 if (key)
592 memcpy(key, rss_cfg->rss_hash_key,
593 HCLGEVF_RSS_KEY_SIZE);
594 }
595
596 if (indir)
597 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
598 indir[i] = rss_cfg->rss_indirection_tbl[i];
599
600 return 0;
601}
602
603static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
604 const u8 *key, const u8 hfunc)
605{
606 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
607 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
608 int ret, i;
609
610 if (handle->pdev->revision >= 0x21) {
611
612 if (key) {
613 switch (hfunc) {
614 case ETH_RSS_HASH_TOP:
615 rss_cfg->hash_algo =
616 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
617 break;
618 case ETH_RSS_HASH_XOR:
619 rss_cfg->hash_algo =
620 HCLGEVF_RSS_HASH_ALGO_SIMPLE;
621 break;
622 case ETH_RSS_HASH_NO_CHANGE:
623 break;
624 default:
625 return -EINVAL;
626 }
627
628 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
629 key);
630 if (ret)
631 return ret;
632
633
634 memcpy(rss_cfg->rss_hash_key, key,
635 HCLGEVF_RSS_KEY_SIZE);
636 }
637 }
638
639
640 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
641 rss_cfg->rss_indirection_tbl[i] = indir[i];
642
643
644 return hclgevf_set_rss_indir_table(hdev);
645}
646
647static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
648{
649 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
650
651 if (nfc->data & RXH_L4_B_2_3)
652 hash_sets |= HCLGEVF_D_PORT_BIT;
653 else
654 hash_sets &= ~HCLGEVF_D_PORT_BIT;
655
656 if (nfc->data & RXH_IP_SRC)
657 hash_sets |= HCLGEVF_S_IP_BIT;
658 else
659 hash_sets &= ~HCLGEVF_S_IP_BIT;
660
661 if (nfc->data & RXH_IP_DST)
662 hash_sets |= HCLGEVF_D_IP_BIT;
663 else
664 hash_sets &= ~HCLGEVF_D_IP_BIT;
665
666 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
667 hash_sets |= HCLGEVF_V_TAG_BIT;
668
669 return hash_sets;
670}
671
672static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
673 struct ethtool_rxnfc *nfc)
674{
675 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
676 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
677 struct hclgevf_rss_input_tuple_cmd *req;
678 struct hclgevf_desc desc;
679 u8 tuple_sets;
680 int ret;
681
682 if (handle->pdev->revision == 0x20)
683 return -EOPNOTSUPP;
684
685 if (nfc->data &
686 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
687 return -EINVAL;
688
689 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
690 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
691
692 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
693 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
694 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
695 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
696 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
697 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
698 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
699 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
700
701 tuple_sets = hclgevf_get_rss_hash_bits(nfc);
702 switch (nfc->flow_type) {
703 case TCP_V4_FLOW:
704 req->ipv4_tcp_en = tuple_sets;
705 break;
706 case TCP_V6_FLOW:
707 req->ipv6_tcp_en = tuple_sets;
708 break;
709 case UDP_V4_FLOW:
710 req->ipv4_udp_en = tuple_sets;
711 break;
712 case UDP_V6_FLOW:
713 req->ipv6_udp_en = tuple_sets;
714 break;
715 case SCTP_V4_FLOW:
716 req->ipv4_sctp_en = tuple_sets;
717 break;
718 case SCTP_V6_FLOW:
719 if ((nfc->data & RXH_L4_B_0_1) ||
720 (nfc->data & RXH_L4_B_2_3))
721 return -EINVAL;
722
723 req->ipv6_sctp_en = tuple_sets;
724 break;
725 case IPV4_FLOW:
726 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
727 break;
728 case IPV6_FLOW:
729 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
730 break;
731 default:
732 return -EINVAL;
733 }
734
735 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
736 if (ret) {
737 dev_err(&hdev->pdev->dev,
738 "Set rss tuple fail, status = %d\n", ret);
739 return ret;
740 }
741
742 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
743 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
744 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
745 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
746 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
747 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
748 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
749 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
750 return 0;
751}
752
753static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
754 struct ethtool_rxnfc *nfc)
755{
756 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
757 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
758 u8 tuple_sets;
759
760 if (handle->pdev->revision == 0x20)
761 return -EOPNOTSUPP;
762
763 nfc->data = 0;
764
765 switch (nfc->flow_type) {
766 case TCP_V4_FLOW:
767 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
768 break;
769 case UDP_V4_FLOW:
770 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
771 break;
772 case TCP_V6_FLOW:
773 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
774 break;
775 case UDP_V6_FLOW:
776 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
777 break;
778 case SCTP_V4_FLOW:
779 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
780 break;
781 case SCTP_V6_FLOW:
782 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
783 break;
784 case IPV4_FLOW:
785 case IPV6_FLOW:
786 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
787 break;
788 default:
789 return -EINVAL;
790 }
791
792 if (!tuple_sets)
793 return 0;
794
795 if (tuple_sets & HCLGEVF_D_PORT_BIT)
796 nfc->data |= RXH_L4_B_2_3;
797 if (tuple_sets & HCLGEVF_S_PORT_BIT)
798 nfc->data |= RXH_L4_B_0_1;
799 if (tuple_sets & HCLGEVF_D_IP_BIT)
800 nfc->data |= RXH_IP_DST;
801 if (tuple_sets & HCLGEVF_S_IP_BIT)
802 nfc->data |= RXH_IP_SRC;
803
804 return 0;
805}
806
807static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
808 struct hclgevf_rss_cfg *rss_cfg)
809{
810 struct hclgevf_rss_input_tuple_cmd *req;
811 struct hclgevf_desc desc;
812 int ret;
813
814 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
815
816 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
817
818 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
819 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
820 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
821 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
822 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
823 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
824 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
825 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
826
827 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
828 if (ret)
829 dev_err(&hdev->pdev->dev,
830 "Configure rss input fail, status = %d\n", ret);
831 return ret;
832}
833
834static int hclgevf_get_tc_size(struct hnae3_handle *handle)
835{
836 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
837 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
838
839 return rss_cfg->rss_size;
840}
841
842static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
843 int vector_id,
844 struct hnae3_ring_chain_node *ring_chain)
845{
846 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
847 struct hnae3_ring_chain_node *node;
848 struct hclge_mbx_vf_to_pf_cmd *req;
849 struct hclgevf_desc desc;
850 int i = 0;
851 int status;
852 u8 type;
853
854 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
855
856 for (node = ring_chain; node; node = node->next) {
857 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
858 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
859
860 if (i == 0) {
861 hclgevf_cmd_setup_basic_desc(&desc,
862 HCLGEVF_OPC_MBX_VF_TO_PF,
863 false);
864 type = en ?
865 HCLGE_MBX_MAP_RING_TO_VECTOR :
866 HCLGE_MBX_UNMAP_RING_TO_VECTOR;
867 req->msg[0] = type;
868 req->msg[1] = vector_id;
869 }
870
871 req->msg[idx_offset] =
872 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
873 req->msg[idx_offset + 1] = node->tqp_index;
874 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
875 HNAE3_RING_GL_IDX_M,
876 HNAE3_RING_GL_IDX_S);
877
878 i++;
879 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
880 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
881 HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
882 !node->next) {
883 req->msg[2] = i;
884
885 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
886 if (status) {
887 dev_err(&hdev->pdev->dev,
888 "Map TQP fail, status is %d.\n",
889 status);
890 return status;
891 }
892 i = 0;
893 hclgevf_cmd_setup_basic_desc(&desc,
894 HCLGEVF_OPC_MBX_VF_TO_PF,
895 false);
896 req->msg[0] = type;
897 req->msg[1] = vector_id;
898 }
899 }
900
901 return 0;
902}
903
904static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
905 struct hnae3_ring_chain_node *ring_chain)
906{
907 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
908 int vector_id;
909
910 vector_id = hclgevf_get_vector_index(hdev, vector);
911 if (vector_id < 0) {
912 dev_err(&handle->pdev->dev,
913 "Get vector index fail. ret =%d\n", vector_id);
914 return vector_id;
915 }
916
917 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
918}
919
920static int hclgevf_unmap_ring_from_vector(
921 struct hnae3_handle *handle,
922 int vector,
923 struct hnae3_ring_chain_node *ring_chain)
924{
925 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
926 int ret, vector_id;
927
928 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
929 return 0;
930
931 vector_id = hclgevf_get_vector_index(hdev, vector);
932 if (vector_id < 0) {
933 dev_err(&handle->pdev->dev,
934 "Get vector index fail. ret =%d\n", vector_id);
935 return vector_id;
936 }
937
938 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
939 if (ret)
940 dev_err(&handle->pdev->dev,
941 "Unmap ring from vector fail. vector=%d, ret =%d\n",
942 vector_id,
943 ret);
944
945 return ret;
946}
947
948static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
949{
950 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
951 int vector_id;
952
953 vector_id = hclgevf_get_vector_index(hdev, vector);
954 if (vector_id < 0) {
955 dev_err(&handle->pdev->dev,
956 "hclgevf_put_vector get vector index fail. ret =%d\n",
957 vector_id);
958 return vector_id;
959 }
960
961 hclgevf_free_vector(hdev, vector_id);
962
963 return 0;
964}
965
966static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
967 bool en_uc_pmc, bool en_mc_pmc)
968{
969 struct hclge_mbx_vf_to_pf_cmd *req;
970 struct hclgevf_desc desc;
971 int status;
972
973 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
974
975 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
976 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
977 req->msg[1] = en_uc_pmc ? 1 : 0;
978 req->msg[2] = en_mc_pmc ? 1 : 0;
979
980 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
981 if (status)
982 dev_err(&hdev->pdev->dev,
983 "Set promisc mode fail, status is %d.\n", status);
984
985 return status;
986}
987
988static int hclgevf_set_promisc_mode(struct hnae3_handle *handle,
989 bool en_uc_pmc, bool en_mc_pmc)
990{
991 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
992
993 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
994}
995
996static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
997 int stream_id, bool enable)
998{
999 struct hclgevf_cfg_com_tqp_queue_cmd *req;
1000 struct hclgevf_desc desc;
1001 int status;
1002
1003 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
1004
1005 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
1006 false);
1007 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
1008 req->stream_id = cpu_to_le16(stream_id);
1009 req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
1010
1011 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1012 if (status)
1013 dev_err(&hdev->pdev->dev,
1014 "TQP enable fail, status =%d.\n", status);
1015
1016 return status;
1017}
1018
1019static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
1020{
1021 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
1022 struct hclgevf_tqp *tqp;
1023 int i;
1024
1025 for (i = 0; i < kinfo->num_tqps; i++) {
1026 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
1027 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
1028 }
1029}
1030
1031static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
1032{
1033 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1034
1035 ether_addr_copy(p, hdev->hw.mac.mac_addr);
1036}
1037
1038static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
1039 bool is_first)
1040{
1041 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1042 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
1043 u8 *new_mac_addr = (u8 *)p;
1044 u8 msg_data[ETH_ALEN * 2];
1045 u16 subcode;
1046 int status;
1047
1048 ether_addr_copy(msg_data, new_mac_addr);
1049 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
1050
1051 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
1052 HCLGE_MBX_MAC_VLAN_UC_MODIFY;
1053
1054 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1055 subcode, msg_data, ETH_ALEN * 2,
1056 true, NULL, 0);
1057 if (!status)
1058 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
1059
1060 return status;
1061}
1062
1063static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
1064 const unsigned char *addr)
1065{
1066 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1067
1068 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1069 HCLGE_MBX_MAC_VLAN_UC_ADD,
1070 addr, ETH_ALEN, false, NULL, 0);
1071}
1072
1073static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1074 const unsigned char *addr)
1075{
1076 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1077
1078 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1079 HCLGE_MBX_MAC_VLAN_UC_REMOVE,
1080 addr, ETH_ALEN, false, NULL, 0);
1081}
1082
1083static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1084 const unsigned char *addr)
1085{
1086 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1087
1088 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
1089 HCLGE_MBX_MAC_VLAN_MC_ADD,
1090 addr, ETH_ALEN, false, NULL, 0);
1091}
1092
1093static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1094 const unsigned char *addr)
1095{
1096 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1097
1098 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
1099 HCLGE_MBX_MAC_VLAN_MC_REMOVE,
1100 addr, ETH_ALEN, false, NULL, 0);
1101}
1102
1103static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1104 __be16 proto, u16 vlan_id,
1105 bool is_kill)
1106{
1107#define HCLGEVF_VLAN_MBX_MSG_LEN 5
1108 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1109 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
1110
1111 if (vlan_id > 4095)
1112 return -EINVAL;
1113
1114 if (proto != htons(ETH_P_8021Q))
1115 return -EPROTONOSUPPORT;
1116
1117 msg_data[0] = is_kill;
1118 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1119 memcpy(&msg_data[3], &proto, sizeof(proto));
1120 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
1121 HCLGE_MBX_VLAN_FILTER, msg_data,
1122 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
1123}
1124
1125static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1126{
1127 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1128 u8 msg_data;
1129
1130 msg_data = enable ? 1 : 0;
1131 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
1132 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
1133 1, false, NULL, 0);
1134}
1135
1136static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
1137{
1138 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1139 u8 msg_data[2];
1140 int ret;
1141
1142 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
1143
1144
1145 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
1146 if (ret)
1147 return ret;
1148
1149 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
1150 2, true, NULL, 0);
1151}
1152
1153static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1154{
1155 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1156
1157 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
1158 sizeof(new_mtu), true, NULL, 0);
1159}
1160
1161static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1162 enum hnae3_reset_notify_type type)
1163{
1164 struct hnae3_client *client = hdev->nic_client;
1165 struct hnae3_handle *handle = &hdev->nic;
1166 int ret;
1167
1168 if (!client->ops->reset_notify)
1169 return -EOPNOTSUPP;
1170
1171 ret = client->ops->reset_notify(handle, type);
1172 if (ret)
1173 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1174 type, ret);
1175
1176 return ret;
1177}
1178
1179static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
1180{
1181 struct hclgevf_dev *hdev = ae_dev->priv;
1182
1183 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
1184}
1185
1186static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
1187 unsigned long delay_us,
1188 unsigned long wait_cnt)
1189{
1190 unsigned long cnt = 0;
1191
1192 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
1193 cnt++ < wait_cnt)
1194 usleep_range(delay_us, delay_us * 2);
1195
1196 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
1197 dev_err(&hdev->pdev->dev,
1198 "flr wait timeout\n");
1199 return -ETIMEDOUT;
1200 }
1201
1202 return 0;
1203}
1204
1205static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1206{
1207#define HCLGEVF_RESET_WAIT_US 20000
1208#define HCLGEVF_RESET_WAIT_CNT 2000
1209#define HCLGEVF_RESET_WAIT_TIMEOUT_US \
1210 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1211
1212 u32 val;
1213 int ret;
1214
1215
1216 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1217 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
1218
1219 if (hdev->reset_type == HNAE3_FLR_RESET)
1220 return hclgevf_flr_poll_timeout(hdev,
1221 HCLGEVF_RESET_WAIT_US,
1222 HCLGEVF_RESET_WAIT_CNT);
1223
1224 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
1225 !(val & HCLGEVF_RST_ING_BITS),
1226 HCLGEVF_RESET_WAIT_US,
1227 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1228
1229
1230 if (ret) {
1231 dev_err(&hdev->pdev->dev,
1232 "could'nt get reset done status from h/w, timeout!\n");
1233 return ret;
1234 }
1235
1236
1237
1238
1239
1240 msleep(5000);
1241
1242 return 0;
1243}
1244
1245static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1246{
1247 int ret;
1248
1249
1250 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1251 if (ret)
1252 return ret;
1253
1254
1255 ret = hclgevf_reset_hdev(hdev);
1256 if (ret) {
1257 dev_err(&hdev->pdev->dev,
1258 "hclge device re-init failed, VF is disabled!\n");
1259 return ret;
1260 }
1261
1262
1263 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1264 if (ret)
1265 return ret;
1266
1267 return 0;
1268}
1269
1270static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1271{
1272 int ret = 0;
1273
1274 switch (hdev->reset_type) {
1275 case HNAE3_VF_FUNC_RESET:
1276 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1277 0, true, NULL, sizeof(u8));
1278 break;
1279 case HNAE3_FLR_RESET:
1280 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
1281 break;
1282 default:
1283 break;
1284 }
1285
1286 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1287
1288 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
1289 hdev->reset_type, ret);
1290
1291 return ret;
1292}
1293
1294static int hclgevf_reset(struct hclgevf_dev *hdev)
1295{
1296 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1297 int ret;
1298
1299
1300
1301
1302 ae_dev->reset_type = hdev->reset_type;
1303 hdev->reset_count++;
1304 rtnl_lock();
1305
1306
1307 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1308 if (ret)
1309 goto err_reset_lock;
1310
1311 rtnl_unlock();
1312
1313 ret = hclgevf_reset_prepare_wait(hdev);
1314 if (ret)
1315 goto err_reset;
1316
1317
1318
1319
1320 ret = hclgevf_reset_wait(hdev);
1321 if (ret) {
1322
1323 dev_err(&hdev->pdev->dev,
1324 "VF failed(=%d) to fetch H/W reset completion status\n",
1325 ret);
1326 goto err_reset;
1327 }
1328
1329 rtnl_lock();
1330
1331
1332 ret = hclgevf_reset_stack(hdev);
1333 if (ret) {
1334 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1335 goto err_reset_lock;
1336 }
1337
1338
1339 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1340 if (ret)
1341 goto err_reset_lock;
1342
1343 rtnl_unlock();
1344
1345 hdev->last_reset_time = jiffies;
1346 ae_dev->reset_type = HNAE3_NONE_RESET;
1347
1348 return ret;
1349err_reset_lock:
1350 rtnl_unlock();
1351err_reset:
1352
1353
1354
1355
1356 hclgevf_cmd_init(hdev);
1357 dev_err(&hdev->pdev->dev, "failed to reset VF\n");
1358
1359 return ret;
1360}
1361
1362static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
1363 unsigned long *addr)
1364{
1365 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1366
1367
1368 if (test_bit(HNAE3_VF_RESET, addr)) {
1369 rst_level = HNAE3_VF_RESET;
1370 clear_bit(HNAE3_VF_RESET, addr);
1371 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1372 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1373 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1374 rst_level = HNAE3_VF_FULL_RESET;
1375 clear_bit(HNAE3_VF_FULL_RESET, addr);
1376 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1377 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1378 rst_level = HNAE3_VF_PF_FUNC_RESET;
1379 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1380 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1381 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1382 rst_level = HNAE3_VF_FUNC_RESET;
1383 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1384 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
1385 rst_level = HNAE3_FLR_RESET;
1386 clear_bit(HNAE3_FLR_RESET, addr);
1387 }
1388
1389 return rst_level;
1390}
1391
1392static void hclgevf_reset_event(struct pci_dev *pdev,
1393 struct hnae3_handle *handle)
1394{
1395 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1396 struct hclgevf_dev *hdev = ae_dev->priv;
1397
1398 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1399
1400 if (hdev->default_reset_request)
1401 hdev->reset_level =
1402 hclgevf_get_reset_level(hdev,
1403 &hdev->default_reset_request);
1404 else
1405 hdev->reset_level = HNAE3_VF_FUNC_RESET;
1406
1407
1408 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1409 hclgevf_reset_task_schedule(hdev);
1410
1411 hdev->last_reset_time = jiffies;
1412}
1413
1414static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1415 enum hnae3_reset_type rst_type)
1416{
1417 struct hclgevf_dev *hdev = ae_dev->priv;
1418
1419 set_bit(rst_type, &hdev->default_reset_request);
1420}
1421
1422static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
1423{
1424#define HCLGEVF_FLR_WAIT_MS 100
1425#define HCLGEVF_FLR_WAIT_CNT 50
1426 struct hclgevf_dev *hdev = ae_dev->priv;
1427 int cnt = 0;
1428
1429 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
1430 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
1431 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
1432 hclgevf_reset_event(hdev->pdev, NULL);
1433
1434 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
1435 cnt++ < HCLGEVF_FLR_WAIT_CNT)
1436 msleep(HCLGEVF_FLR_WAIT_MS);
1437
1438 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
1439 dev_err(&hdev->pdev->dev,
1440 "flr wait down timeout: %d\n", cnt);
1441}
1442
1443static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1444{
1445 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1446
1447 return hdev->fw_version;
1448}
1449
1450static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1451{
1452 struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1453
1454 vector->vector_irq = pci_irq_vector(hdev->pdev,
1455 HCLGEVF_MISC_VECTOR_NUM);
1456 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1457
1458 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1459 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1460
1461 hdev->num_msi_left -= 1;
1462 hdev->num_msi_used += 1;
1463}
1464
1465void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1466{
1467 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
1468 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
1469 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1470 schedule_work(&hdev->rst_service_task);
1471 }
1472}
1473
1474void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1475{
1476 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1477 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1478 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1479 schedule_work(&hdev->mbx_service_task);
1480 }
1481}
1482
1483static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1484{
1485 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
1486 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1487 schedule_work(&hdev->service_task);
1488}
1489
1490static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1491{
1492
1493 if (hdev->mbx_event_pending)
1494 hclgevf_mbx_task_schedule(hdev);
1495
1496 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1497 hclgevf_reset_task_schedule(hdev);
1498}
1499
1500static void hclgevf_service_timer(struct timer_list *t)
1501{
1502 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1503
1504 mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1505
1506 hclgevf_task_schedule(hdev);
1507}
1508
1509static void hclgevf_reset_service_task(struct work_struct *work)
1510{
1511 struct hclgevf_dev *hdev =
1512 container_of(work, struct hclgevf_dev, rst_service_task);
1513 int ret;
1514
1515 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1516 return;
1517
1518 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1519
1520 if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1521 &hdev->reset_state)) {
1522
1523
1524
1525
1526
1527 hdev->reset_attempts = 0;
1528
1529 hdev->last_reset_time = jiffies;
1530 while ((hdev->reset_type =
1531 hclgevf_get_reset_level(hdev, &hdev->reset_pending))
1532 != HNAE3_NONE_RESET) {
1533 ret = hclgevf_reset(hdev);
1534 if (ret)
1535 dev_err(&hdev->pdev->dev,
1536 "VF stack reset failed %d.\n", ret);
1537 }
1538 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1539 &hdev->reset_state)) {
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 if (hdev->reset_attempts > 3) {
1565
1566 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
1567
1568
1569 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1570 } else {
1571 hdev->reset_attempts++;
1572
1573 set_bit(hdev->reset_level, &hdev->reset_pending);
1574 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1575 }
1576 hclgevf_reset_task_schedule(hdev);
1577 }
1578
1579 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1580}
1581
1582static void hclgevf_mailbox_service_task(struct work_struct *work)
1583{
1584 struct hclgevf_dev *hdev;
1585
1586 hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
1587
1588 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1589 return;
1590
1591 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1592
1593 hclgevf_mbx_async_handler(hdev);
1594
1595 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1596}
1597
1598static void hclgevf_keep_alive_timer(struct timer_list *t)
1599{
1600 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
1601
1602 schedule_work(&hdev->keep_alive_task);
1603 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
1604}
1605
1606static void hclgevf_keep_alive_task(struct work_struct *work)
1607{
1608 struct hclgevf_dev *hdev;
1609 u8 respmsg;
1610 int ret;
1611
1612 hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
1613 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
1614 0, false, &respmsg, sizeof(u8));
1615 if (ret)
1616 dev_err(&hdev->pdev->dev,
1617 "VF sends keep alive cmd failed(=%d)\n", ret);
1618}
1619
1620static void hclgevf_service_task(struct work_struct *work)
1621{
1622 struct hclgevf_dev *hdev;
1623
1624 hdev = container_of(work, struct hclgevf_dev, service_task);
1625
1626
1627
1628
1629 hclgevf_request_link_info(hdev);
1630
1631 hclgevf_deferred_task_schedule(hdev);
1632
1633 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1634}
1635
1636static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1637{
1638 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
1639}
1640
1641static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
1642 u32 *clearval)
1643{
1644 u32 cmdq_src_reg, rst_ing_reg;
1645
1646
1647 cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1648 HCLGEVF_VECTOR0_CMDQ_SRC_REG);
1649
1650 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
1651 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1652 dev_info(&hdev->pdev->dev,
1653 "receive reset interrupt 0x%x!\n", rst_ing_reg);
1654 set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
1655 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1656 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1657 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
1658 *clearval = cmdq_src_reg;
1659 return HCLGEVF_VECTOR0_EVENT_RST;
1660 }
1661
1662
1663 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1664 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1665 *clearval = cmdq_src_reg;
1666 return HCLGEVF_VECTOR0_EVENT_MBX;
1667 }
1668
1669 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
1670
1671 return HCLGEVF_VECTOR0_EVENT_OTHER;
1672}
1673
1674static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1675{
1676 writel(en ? 1 : 0, vector->addr);
1677}
1678
1679static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1680{
1681 enum hclgevf_evt_cause event_cause;
1682 struct hclgevf_dev *hdev = data;
1683 u32 clearval;
1684
1685 hclgevf_enable_vector(&hdev->misc_vector, false);
1686 event_cause = hclgevf_check_evt_cause(hdev, &clearval);
1687
1688 switch (event_cause) {
1689 case HCLGEVF_VECTOR0_EVENT_RST:
1690 hclgevf_reset_task_schedule(hdev);
1691 break;
1692 case HCLGEVF_VECTOR0_EVENT_MBX:
1693 hclgevf_mbx_handler(hdev);
1694 break;
1695 default:
1696 break;
1697 }
1698
1699 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
1700 hclgevf_clear_event_cause(hdev, clearval);
1701 hclgevf_enable_vector(&hdev->misc_vector, true);
1702 }
1703
1704 return IRQ_HANDLED;
1705}
1706
1707static int hclgevf_configure(struct hclgevf_dev *hdev)
1708{
1709 int ret;
1710
1711 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE;
1712
1713
1714 ret = hclgevf_get_queue_info(hdev);
1715 if (ret)
1716 return ret;
1717
1718 return hclgevf_get_tc_info(hdev);
1719}
1720
1721static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
1722{
1723 struct pci_dev *pdev = ae_dev->pdev;
1724 struct hclgevf_dev *hdev;
1725
1726 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1727 if (!hdev)
1728 return -ENOMEM;
1729
1730 hdev->pdev = pdev;
1731 hdev->ae_dev = ae_dev;
1732 ae_dev->priv = hdev;
1733
1734 return 0;
1735}
1736
1737static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
1738{
1739 struct hnae3_handle *roce = &hdev->roce;
1740 struct hnae3_handle *nic = &hdev->nic;
1741
1742 roce->rinfo.num_vectors = hdev->num_roce_msix;
1743
1744 if (hdev->num_msi_left < roce->rinfo.num_vectors ||
1745 hdev->num_msi_left == 0)
1746 return -EINVAL;
1747
1748 roce->rinfo.base_vector = hdev->roce_base_vector;
1749
1750 roce->rinfo.netdev = nic->kinfo.netdev;
1751 roce->rinfo.roce_io_base = hdev->hw.io_base;
1752
1753 roce->pdev = nic->pdev;
1754 roce->ae_algo = nic->ae_algo;
1755 roce->numa_node_mask = nic->numa_node_mask;
1756
1757 return 0;
1758}
1759
1760static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
1761{
1762 struct hclgevf_cfg_gro_status_cmd *req;
1763 struct hclgevf_desc desc;
1764 int ret;
1765
1766 if (!hnae3_dev_gro_supported(hdev))
1767 return 0;
1768
1769 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
1770 false);
1771 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
1772
1773 req->gro_en = cpu_to_le16(en ? 1 : 0);
1774
1775 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1776 if (ret)
1777 dev_err(&hdev->pdev->dev,
1778 "VF GRO hardware config cmd failed, ret = %d.\n", ret);
1779
1780 return ret;
1781}
1782
1783static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
1784{
1785 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1786 int i, ret;
1787
1788 rss_cfg->rss_size = hdev->rss_size_max;
1789
1790 if (hdev->pdev->revision >= 0x21) {
1791 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
1792 netdev_rss_key_fill(rss_cfg->rss_hash_key,
1793 HCLGEVF_RSS_KEY_SIZE);
1794
1795 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
1796 rss_cfg->rss_hash_key);
1797 if (ret)
1798 return ret;
1799
1800 rss_cfg->rss_tuple_sets.ipv4_tcp_en =
1801 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1802 rss_cfg->rss_tuple_sets.ipv4_udp_en =
1803 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1804 rss_cfg->rss_tuple_sets.ipv4_sctp_en =
1805 HCLGEVF_RSS_INPUT_TUPLE_SCTP;
1806 rss_cfg->rss_tuple_sets.ipv4_fragment_en =
1807 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1808 rss_cfg->rss_tuple_sets.ipv6_tcp_en =
1809 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1810 rss_cfg->rss_tuple_sets.ipv6_udp_en =
1811 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1812 rss_cfg->rss_tuple_sets.ipv6_sctp_en =
1813 HCLGEVF_RSS_INPUT_TUPLE_SCTP;
1814 rss_cfg->rss_tuple_sets.ipv6_fragment_en =
1815 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1816
1817 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
1818 if (ret)
1819 return ret;
1820
1821 }
1822
1823
1824 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1825 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1826
1827 ret = hclgevf_set_rss_indir_table(hdev);
1828 if (ret)
1829 return ret;
1830
1831 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1832}
1833
1834static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1835{
1836
1837
1838
1839 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1840 false);
1841}
1842
1843static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
1844{
1845 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1846
1847 if (enable) {
1848 mod_timer(&hdev->service_timer, jiffies + HZ);
1849 } else {
1850 del_timer_sync(&hdev->service_timer);
1851 cancel_work_sync(&hdev->service_task);
1852 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1853 }
1854}
1855
1856static int hclgevf_ae_start(struct hnae3_handle *handle)
1857{
1858 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1859
1860
1861 hclgevf_reset_tqp_stats(handle);
1862
1863 hclgevf_request_link_info(hdev);
1864
1865 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1866
1867 return 0;
1868}
1869
1870static void hclgevf_ae_stop(struct hnae3_handle *handle)
1871{
1872 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1873 int i;
1874
1875 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1876
1877 for (i = 0; i < handle->kinfo.num_tqps; i++)
1878 hclgevf_reset_tqp(handle, i);
1879
1880
1881 hclgevf_reset_tqp_stats(handle);
1882 hclgevf_update_link_status(hdev, 0);
1883}
1884
1885static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
1886{
1887 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1888 u8 msg_data;
1889
1890 msg_data = alive ? 1 : 0;
1891 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
1892 0, &msg_data, 1, false, NULL, 0);
1893}
1894
1895static int hclgevf_client_start(struct hnae3_handle *handle)
1896{
1897 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1898
1899 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
1900 return hclgevf_set_alive(handle, true);
1901}
1902
1903static void hclgevf_client_stop(struct hnae3_handle *handle)
1904{
1905 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1906 int ret;
1907
1908 ret = hclgevf_set_alive(handle, false);
1909 if (ret)
1910 dev_warn(&hdev->pdev->dev,
1911 "%s failed %d\n", __func__, ret);
1912
1913 del_timer_sync(&hdev->keep_alive_timer);
1914 cancel_work_sync(&hdev->keep_alive_task);
1915}
1916
1917static void hclgevf_state_init(struct hclgevf_dev *hdev)
1918{
1919
1920 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1921 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1922 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1923
1924
1925 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1926
1927 INIT_WORK(&hdev->service_task, hclgevf_service_task);
1928 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1929
1930 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
1931
1932 mutex_init(&hdev->mbx_resp.mbx_mutex);
1933
1934
1935 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1936}
1937
1938static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1939{
1940 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1941
1942 if (hdev->service_timer.function)
1943 del_timer_sync(&hdev->service_timer);
1944 if (hdev->service_task.func)
1945 cancel_work_sync(&hdev->service_task);
1946 if (hdev->mbx_service_task.func)
1947 cancel_work_sync(&hdev->mbx_service_task);
1948 if (hdev->rst_service_task.func)
1949 cancel_work_sync(&hdev->rst_service_task);
1950
1951 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1952}
1953
1954static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1955{
1956 struct pci_dev *pdev = hdev->pdev;
1957 int vectors;
1958 int i;
1959
1960 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
1961 vectors = pci_alloc_irq_vectors(pdev,
1962 hdev->roce_base_msix_offset + 1,
1963 hdev->num_msi,
1964 PCI_IRQ_MSIX);
1965 else
1966 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1967 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1968
1969 if (vectors < 0) {
1970 dev_err(&pdev->dev,
1971 "failed(%d) to allocate MSI/MSI-X vectors\n",
1972 vectors);
1973 return vectors;
1974 }
1975 if (vectors < hdev->num_msi)
1976 dev_warn(&hdev->pdev->dev,
1977 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1978 hdev->num_msi, vectors);
1979
1980 hdev->num_msi = vectors;
1981 hdev->num_msi_left = vectors;
1982 hdev->base_msi_vector = pdev->irq;
1983 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
1984
1985 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1986 sizeof(u16), GFP_KERNEL);
1987 if (!hdev->vector_status) {
1988 pci_free_irq_vectors(pdev);
1989 return -ENOMEM;
1990 }
1991
1992 for (i = 0; i < hdev->num_msi; i++)
1993 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1994
1995 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1996 sizeof(int), GFP_KERNEL);
1997 if (!hdev->vector_irq) {
1998 devm_kfree(&pdev->dev, hdev->vector_status);
1999 pci_free_irq_vectors(pdev);
2000 return -ENOMEM;
2001 }
2002
2003 return 0;
2004}
2005
2006static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2007{
2008 struct pci_dev *pdev = hdev->pdev;
2009
2010 devm_kfree(&pdev->dev, hdev->vector_status);
2011 devm_kfree(&pdev->dev, hdev->vector_irq);
2012 pci_free_irq_vectors(pdev);
2013}
2014
2015static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2016{
2017 int ret = 0;
2018
2019 hclgevf_get_misc_vector(hdev);
2020
2021 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2022 0, "hclgevf_cmd", hdev);
2023 if (ret) {
2024 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2025 hdev->misc_vector.vector_irq);
2026 return ret;
2027 }
2028
2029 hclgevf_clear_event_cause(hdev, 0);
2030
2031
2032 hclgevf_enable_vector(&hdev->misc_vector, true);
2033
2034 return ret;
2035}
2036
2037static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2038{
2039
2040 hclgevf_enable_vector(&hdev->misc_vector, false);
2041 synchronize_irq(hdev->misc_vector.vector_irq);
2042 free_irq(hdev->misc_vector.vector_irq, hdev);
2043 hclgevf_free_vector(hdev, 0);
2044}
2045
2046static int hclgevf_init_client_instance(struct hnae3_client *client,
2047 struct hnae3_ae_dev *ae_dev)
2048{
2049 struct hclgevf_dev *hdev = ae_dev->priv;
2050 int ret;
2051
2052 switch (client->type) {
2053 case HNAE3_CLIENT_KNIC:
2054 hdev->nic_client = client;
2055 hdev->nic.client = client;
2056
2057 ret = client->ops->init_instance(&hdev->nic);
2058 if (ret)
2059 goto clear_nic;
2060
2061 hnae3_set_client_init_flag(client, ae_dev, 1);
2062
2063 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
2064 struct hnae3_client *rc = hdev->roce_client;
2065
2066 ret = hclgevf_init_roce_base_info(hdev);
2067 if (ret)
2068 goto clear_roce;
2069 ret = rc->ops->init_instance(&hdev->roce);
2070 if (ret)
2071 goto clear_roce;
2072
2073 hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
2074 1);
2075 }
2076 break;
2077 case HNAE3_CLIENT_UNIC:
2078 hdev->nic_client = client;
2079 hdev->nic.client = client;
2080
2081 ret = client->ops->init_instance(&hdev->nic);
2082 if (ret)
2083 goto clear_nic;
2084
2085 hnae3_set_client_init_flag(client, ae_dev, 1);
2086 break;
2087 case HNAE3_CLIENT_ROCE:
2088 if (hnae3_dev_roce_supported(hdev)) {
2089 hdev->roce_client = client;
2090 hdev->roce.client = client;
2091 }
2092
2093 if (hdev->roce_client && hdev->nic_client) {
2094 ret = hclgevf_init_roce_base_info(hdev);
2095 if (ret)
2096 goto clear_roce;
2097
2098 ret = client->ops->init_instance(&hdev->roce);
2099 if (ret)
2100 goto clear_roce;
2101 }
2102
2103 hnae3_set_client_init_flag(client, ae_dev, 1);
2104 break;
2105 default:
2106 return -EINVAL;
2107 }
2108
2109 return 0;
2110
2111clear_nic:
2112 hdev->nic_client = NULL;
2113 hdev->nic.client = NULL;
2114 return ret;
2115clear_roce:
2116 hdev->roce_client = NULL;
2117 hdev->roce.client = NULL;
2118 return ret;
2119}
2120
2121static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2122 struct hnae3_ae_dev *ae_dev)
2123{
2124 struct hclgevf_dev *hdev = ae_dev->priv;
2125
2126
2127 if (hdev->roce_client) {
2128 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2129 hdev->roce_client = NULL;
2130 hdev->roce.client = NULL;
2131 }
2132
2133
2134 if (client->ops->uninit_instance && hdev->nic_client &&
2135 client->type != HNAE3_CLIENT_ROCE) {
2136 client->ops->uninit_instance(&hdev->nic, 0);
2137 hdev->nic_client = NULL;
2138 hdev->nic.client = NULL;
2139 }
2140}
2141
2142static int hclgevf_pci_init(struct hclgevf_dev *hdev)
2143{
2144 struct pci_dev *pdev = hdev->pdev;
2145 struct hclgevf_hw *hw;
2146 int ret;
2147
2148 ret = pci_enable_device(pdev);
2149 if (ret) {
2150 dev_err(&pdev->dev, "failed to enable PCI device\n");
2151 return ret;
2152 }
2153
2154 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2155 if (ret) {
2156 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2157 goto err_disable_device;
2158 }
2159
2160 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2161 if (ret) {
2162 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2163 goto err_disable_device;
2164 }
2165
2166 pci_set_master(pdev);
2167 hw = &hdev->hw;
2168 hw->hdev = hdev;
2169 hw->io_base = pci_iomap(pdev, 2, 0);
2170 if (!hw->io_base) {
2171 dev_err(&pdev->dev, "can't map configuration register space\n");
2172 ret = -ENOMEM;
2173 goto err_clr_master;
2174 }
2175
2176 return 0;
2177
2178err_clr_master:
2179 pci_clear_master(pdev);
2180 pci_release_regions(pdev);
2181err_disable_device:
2182 pci_disable_device(pdev);
2183
2184 return ret;
2185}
2186
2187static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2188{
2189 struct pci_dev *pdev = hdev->pdev;
2190
2191 pci_iounmap(pdev, hdev->hw.io_base);
2192 pci_clear_master(pdev);
2193 pci_release_regions(pdev);
2194 pci_disable_device(pdev);
2195}
2196
2197static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
2198{
2199 struct hclgevf_query_res_cmd *req;
2200 struct hclgevf_desc desc;
2201 int ret;
2202
2203 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
2204 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2205 if (ret) {
2206 dev_err(&hdev->pdev->dev,
2207 "query vf resource failed, ret = %d.\n", ret);
2208 return ret;
2209 }
2210
2211 req = (struct hclgevf_query_res_cmd *)desc.data;
2212
2213 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
2214 hdev->roce_base_msix_offset =
2215 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
2216 HCLGEVF_MSIX_OFT_ROCEE_M,
2217 HCLGEVF_MSIX_OFT_ROCEE_S);
2218 hdev->num_roce_msix =
2219 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2220 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2221
2222
2223
2224
2225 hdev->num_msi = hdev->num_roce_msix +
2226 hdev->roce_base_msix_offset;
2227 } else {
2228 hdev->num_msi =
2229 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2230 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2231 }
2232
2233 return 0;
2234}
2235
2236static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
2237{
2238 struct pci_dev *pdev = hdev->pdev;
2239 int ret = 0;
2240
2241 if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
2242 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2243 hclgevf_misc_irq_uninit(hdev);
2244 hclgevf_uninit_msi(hdev);
2245 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2246 }
2247
2248 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2249 pci_set_master(pdev);
2250 ret = hclgevf_init_msi(hdev);
2251 if (ret) {
2252 dev_err(&pdev->dev,
2253 "failed(%d) to init MSI/MSI-X\n", ret);
2254 return ret;
2255 }
2256
2257 ret = hclgevf_misc_irq_init(hdev);
2258 if (ret) {
2259 hclgevf_uninit_msi(hdev);
2260 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2261 ret);
2262 return ret;
2263 }
2264
2265 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2266 }
2267
2268 return ret;
2269}
2270
2271static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
2272{
2273 struct pci_dev *pdev = hdev->pdev;
2274 int ret;
2275
2276 ret = hclgevf_pci_reset(hdev);
2277 if (ret) {
2278 dev_err(&pdev->dev, "pci reset failed %d\n", ret);
2279 return ret;
2280 }
2281
2282 ret = hclgevf_cmd_init(hdev);
2283 if (ret) {
2284 dev_err(&pdev->dev, "cmd failed %d\n", ret);
2285 return ret;
2286 }
2287
2288 ret = hclgevf_rss_init_hw(hdev);
2289 if (ret) {
2290 dev_err(&hdev->pdev->dev,
2291 "failed(%d) to initialize RSS\n", ret);
2292 return ret;
2293 }
2294
2295 ret = hclgevf_config_gro(hdev, true);
2296 if (ret)
2297 return ret;
2298
2299 ret = hclgevf_init_vlan_config(hdev);
2300 if (ret) {
2301 dev_err(&hdev->pdev->dev,
2302 "failed(%d) to initialize VLAN config\n", ret);
2303 return ret;
2304 }
2305
2306 dev_info(&hdev->pdev->dev, "Reset done\n");
2307
2308 return 0;
2309}
2310
2311static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
2312{
2313 struct pci_dev *pdev = hdev->pdev;
2314 int ret;
2315
2316 ret = hclgevf_pci_init(hdev);
2317 if (ret) {
2318 dev_err(&pdev->dev, "PCI initialization failed\n");
2319 return ret;
2320 }
2321
2322 ret = hclgevf_cmd_queue_init(hdev);
2323 if (ret) {
2324 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
2325 goto err_cmd_queue_init;
2326 }
2327
2328 ret = hclgevf_cmd_init(hdev);
2329 if (ret)
2330 goto err_cmd_init;
2331
2332
2333 ret = hclgevf_query_vf_resource(hdev);
2334 if (ret) {
2335 dev_err(&hdev->pdev->dev,
2336 "Query vf status error, ret = %d.\n", ret);
2337 goto err_cmd_init;
2338 }
2339
2340 ret = hclgevf_init_msi(hdev);
2341 if (ret) {
2342 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
2343 goto err_cmd_init;
2344 }
2345
2346 hclgevf_state_init(hdev);
2347 hdev->reset_level = HNAE3_VF_FUNC_RESET;
2348
2349 ret = hclgevf_misc_irq_init(hdev);
2350 if (ret) {
2351 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2352 ret);
2353 goto err_misc_irq_init;
2354 }
2355
2356 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2357
2358 ret = hclgevf_configure(hdev);
2359 if (ret) {
2360 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
2361 goto err_config;
2362 }
2363
2364 ret = hclgevf_alloc_tqps(hdev);
2365 if (ret) {
2366 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
2367 goto err_config;
2368 }
2369
2370 ret = hclgevf_set_handle_info(hdev);
2371 if (ret) {
2372 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
2373 goto err_config;
2374 }
2375
2376 ret = hclgevf_config_gro(hdev, true);
2377 if (ret)
2378 goto err_config;
2379
2380
2381 ret = hclgevf_rss_init_hw(hdev);
2382 if (ret) {
2383 dev_err(&hdev->pdev->dev,
2384 "failed(%d) to initialize RSS\n", ret);
2385 goto err_config;
2386 }
2387
2388 ret = hclgevf_init_vlan_config(hdev);
2389 if (ret) {
2390 dev_err(&hdev->pdev->dev,
2391 "failed(%d) to initialize VLAN config\n", ret);
2392 goto err_config;
2393 }
2394
2395 hdev->last_reset_time = jiffies;
2396 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
2397
2398 return 0;
2399
2400err_config:
2401 hclgevf_misc_irq_uninit(hdev);
2402err_misc_irq_init:
2403 hclgevf_state_uninit(hdev);
2404 hclgevf_uninit_msi(hdev);
2405err_cmd_init:
2406 hclgevf_cmd_uninit(hdev);
2407err_cmd_queue_init:
2408 hclgevf_pci_uninit(hdev);
2409 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2410 return ret;
2411}
2412
2413static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
2414{
2415 hclgevf_state_uninit(hdev);
2416
2417 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2418 hclgevf_misc_irq_uninit(hdev);
2419 hclgevf_uninit_msi(hdev);
2420 }
2421
2422 hclgevf_pci_uninit(hdev);
2423 hclgevf_cmd_uninit(hdev);
2424}
2425
2426static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
2427{
2428 struct pci_dev *pdev = ae_dev->pdev;
2429 struct hclgevf_dev *hdev;
2430 int ret;
2431
2432 ret = hclgevf_alloc_hdev(ae_dev);
2433 if (ret) {
2434 dev_err(&pdev->dev, "hclge device allocation failed\n");
2435 return ret;
2436 }
2437
2438 ret = hclgevf_init_hdev(ae_dev->priv);
2439 if (ret) {
2440 dev_err(&pdev->dev, "hclge device initialization failed\n");
2441 return ret;
2442 }
2443
2444 hdev = ae_dev->priv;
2445 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
2446 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
2447
2448 return 0;
2449}
2450
2451static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
2452{
2453 struct hclgevf_dev *hdev = ae_dev->priv;
2454
2455 hclgevf_uninit_hdev(hdev);
2456 ae_dev->priv = NULL;
2457}
2458
2459static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
2460{
2461 struct hnae3_handle *nic = &hdev->nic;
2462 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
2463
2464 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
2465}
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477static void hclgevf_get_channels(struct hnae3_handle *handle,
2478 struct ethtool_channels *ch)
2479{
2480 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2481
2482 ch->max_combined = hclgevf_get_max_channels(hdev);
2483 ch->other_count = 0;
2484 ch->max_other = 0;
2485 ch->combined_count = hdev->num_tqps;
2486}
2487
2488static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
2489 u16 *alloc_tqps, u16 *max_rss_size)
2490{
2491 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2492
2493 *alloc_tqps = hdev->num_tqps;
2494 *max_rss_size = hdev->rss_size_max;
2495}
2496
2497static int hclgevf_get_status(struct hnae3_handle *handle)
2498{
2499 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2500
2501 return hdev->hw.mac.link;
2502}
2503
2504static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
2505 u8 *auto_neg, u32 *speed,
2506 u8 *duplex)
2507{
2508 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2509
2510 if (speed)
2511 *speed = hdev->hw.mac.speed;
2512 if (duplex)
2513 *duplex = hdev->hw.mac.duplex;
2514 if (auto_neg)
2515 *auto_neg = AUTONEG_DISABLE;
2516}
2517
2518void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
2519 u8 duplex)
2520{
2521 hdev->hw.mac.speed = speed;
2522 hdev->hw.mac.duplex = duplex;
2523}
2524
2525static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
2526{
2527 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2528
2529 return hclgevf_config_gro(hdev, enable);
2530}
2531
2532static void hclgevf_get_media_type(struct hnae3_handle *handle,
2533 u8 *media_type)
2534{
2535 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2536 if (media_type)
2537 *media_type = hdev->hw.mac.media_type;
2538}
2539
2540static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
2541{
2542 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2543
2544 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2545}
2546
2547static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
2548{
2549 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2550
2551 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2552}
2553
2554static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
2555{
2556 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2557
2558 return hdev->reset_count;
2559}
2560
2561#define MAX_SEPARATE_NUM 4
2562#define SEPARATOR_VALUE 0xFFFFFFFF
2563#define REG_NUM_PER_LINE 4
2564#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
2565
2566static int hclgevf_get_regs_len(struct hnae3_handle *handle)
2567{
2568 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
2569 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2570
2571 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
2572 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
2573 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
2574 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
2575
2576 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
2577 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
2578}
2579
2580static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
2581 void *data)
2582{
2583 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2584 int i, j, reg_um, separator_num;
2585 u32 *reg = data;
2586
2587 *version = hdev->fw_version;
2588
2589
2590 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
2591 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2592 for (i = 0; i < reg_um; i++)
2593 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
2594 for (i = 0; i < separator_num; i++)
2595 *reg++ = SEPARATOR_VALUE;
2596
2597 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
2598 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2599 for (i = 0; i < reg_um; i++)
2600 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
2601 for (i = 0; i < separator_num; i++)
2602 *reg++ = SEPARATOR_VALUE;
2603
2604 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
2605 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2606 for (j = 0; j < hdev->num_tqps; j++) {
2607 for (i = 0; i < reg_um; i++)
2608 *reg++ = hclgevf_read_dev(&hdev->hw,
2609 ring_reg_addr_list[i] +
2610 0x200 * j);
2611 for (i = 0; i < separator_num; i++)
2612 *reg++ = SEPARATOR_VALUE;
2613 }
2614
2615 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
2616 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2617 for (j = 0; j < hdev->num_msi_used - 1; j++) {
2618 for (i = 0; i < reg_um; i++)
2619 *reg++ = hclgevf_read_dev(&hdev->hw,
2620 tqp_intr_reg_addr_list[i] +
2621 4 * j);
2622 for (i = 0; i < separator_num; i++)
2623 *reg++ = SEPARATOR_VALUE;
2624 }
2625}
2626
2627static const struct hnae3_ae_ops hclgevf_ops = {
2628 .init_ae_dev = hclgevf_init_ae_dev,
2629 .uninit_ae_dev = hclgevf_uninit_ae_dev,
2630 .flr_prepare = hclgevf_flr_prepare,
2631 .flr_done = hclgevf_flr_done,
2632 .init_client_instance = hclgevf_init_client_instance,
2633 .uninit_client_instance = hclgevf_uninit_client_instance,
2634 .start = hclgevf_ae_start,
2635 .stop = hclgevf_ae_stop,
2636 .client_start = hclgevf_client_start,
2637 .client_stop = hclgevf_client_stop,
2638 .map_ring_to_vector = hclgevf_map_ring_to_vector,
2639 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
2640 .get_vector = hclgevf_get_vector,
2641 .put_vector = hclgevf_put_vector,
2642 .reset_queue = hclgevf_reset_tqp,
2643 .set_promisc_mode = hclgevf_set_promisc_mode,
2644 .get_mac_addr = hclgevf_get_mac_addr,
2645 .set_mac_addr = hclgevf_set_mac_addr,
2646 .add_uc_addr = hclgevf_add_uc_addr,
2647 .rm_uc_addr = hclgevf_rm_uc_addr,
2648 .add_mc_addr = hclgevf_add_mc_addr,
2649 .rm_mc_addr = hclgevf_rm_mc_addr,
2650 .get_stats = hclgevf_get_stats,
2651 .update_stats = hclgevf_update_stats,
2652 .get_strings = hclgevf_get_strings,
2653 .get_sset_count = hclgevf_get_sset_count,
2654 .get_rss_key_size = hclgevf_get_rss_key_size,
2655 .get_rss_indir_size = hclgevf_get_rss_indir_size,
2656 .get_rss = hclgevf_get_rss,
2657 .set_rss = hclgevf_set_rss,
2658 .get_rss_tuple = hclgevf_get_rss_tuple,
2659 .set_rss_tuple = hclgevf_set_rss_tuple,
2660 .get_tc_size = hclgevf_get_tc_size,
2661 .get_fw_version = hclgevf_get_fw_version,
2662 .set_vlan_filter = hclgevf_set_vlan_filter,
2663 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
2664 .reset_event = hclgevf_reset_event,
2665 .set_default_reset_request = hclgevf_set_def_reset_request,
2666 .get_channels = hclgevf_get_channels,
2667 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
2668 .get_regs_len = hclgevf_get_regs_len,
2669 .get_regs = hclgevf_get_regs,
2670 .get_status = hclgevf_get_status,
2671 .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
2672 .get_media_type = hclgevf_get_media_type,
2673 .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
2674 .ae_dev_resetting = hclgevf_ae_dev_resetting,
2675 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
2676 .set_gro_en = hclgevf_gro_en,
2677 .set_mtu = hclgevf_set_mtu,
2678 .get_global_queue_id = hclgevf_get_qid_global,
2679 .set_timer_task = hclgevf_set_timer_task,
2680};
2681
2682static struct hnae3_ae_algo ae_algovf = {
2683 .ops = &hclgevf_ops,
2684 .pdev_id_table = ae_algovf_pci_tbl,
2685};
2686
2687static int hclgevf_init(void)
2688{
2689 pr_info("%s is initializing\n", HCLGEVF_NAME);
2690
2691 hnae3_register_ae_algo(&ae_algovf);
2692
2693 return 0;
2694}
2695
2696static void hclgevf_exit(void)
2697{
2698 hnae3_unregister_ae_algo(&ae_algovf);
2699}
2700module_init(hclgevf_init);
2701module_exit(hclgevf_exit);
2702
2703MODULE_LICENSE("GPL");
2704MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2705MODULE_DESCRIPTION("HCLGEVF Driver");
2706MODULE_VERSION(HCLGEVF_MOD_VERSION);
2707