1
2
3
4#include <linux/dma-mapping.h>
5#include <linux/etherdevice.h>
6#include <linux/interrupt.h>
7#include <linux/if_vlan.h>
8#include <linux/ip.h>
9#include <linux/ipv6.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/aer.h>
13#include <linux/skbuff.h>
14#include <linux/sctp.h>
15#include <linux/vermagic.h>
16#include <net/gre.h>
17#include <net/pkt_cls.h>
18#include <net/tcp.h>
19#include <net/vxlan.h>
20
21#include "hnae3.h"
22#include "hns3_enet.h"
23
24#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
25#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
26
27static void hns3_clear_all_ring(struct hnae3_handle *h);
28static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
29static void hns3_remove_hw_addr(struct net_device *netdev);
30
31static const char hns3_driver_name[] = "hns3";
32const char hns3_driver_version[] = VERMAGIC_STRING;
33static const char hns3_driver_string[] =
34 "Hisilicon Ethernet Network Driver for Hip08 Family";
35static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
36static struct hnae3_client client;
37
38
39
40
41
42
43
44
45static const struct pci_device_id hns3_pci_tbl[] = {
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
55 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
57 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
59 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
60 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
61
62 {0, }
63};
64MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
65
66static irqreturn_t hns3_irq_handle(int irq, void *vector)
67{
68 struct hns3_enet_tqp_vector *tqp_vector = vector;
69
70 napi_schedule(&tqp_vector->napi);
71
72 return IRQ_HANDLED;
73}
74
75
76
77
78static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
79 const cpumask_t *mask)
80{
81 struct hns3_enet_tqp_vector *tqp_vectors =
82 container_of(notify, struct hns3_enet_tqp_vector,
83 affinity_notify);
84
85 tqp_vectors->affinity_mask = *mask;
86}
87
88static void hns3_nic_irq_affinity_release(struct kref *ref)
89{
90}
91
92static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
93{
94 struct hns3_enet_tqp_vector *tqp_vectors;
95 unsigned int i;
96
97 for (i = 0; i < priv->vector_num; i++) {
98 tqp_vectors = &priv->tqp_vector[i];
99
100 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
101 continue;
102
103
104 irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
105 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
106
107
108 free_irq(tqp_vectors->vector_irq, tqp_vectors);
109 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
110 }
111}
112
113static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
114{
115 struct hns3_enet_tqp_vector *tqp_vectors;
116 int txrx_int_idx = 0;
117 int rx_int_idx = 0;
118 int tx_int_idx = 0;
119 unsigned int i;
120 int ret;
121
122 for (i = 0; i < priv->vector_num; i++) {
123 tqp_vectors = &priv->tqp_vector[i];
124
125 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
126 continue;
127
128 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
129 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
130 "%s-%s-%d", priv->netdev->name, "TxRx",
131 txrx_int_idx++);
132 txrx_int_idx++;
133 } else if (tqp_vectors->rx_group.ring) {
134 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
135 "%s-%s-%d", priv->netdev->name, "Rx",
136 rx_int_idx++);
137 } else if (tqp_vectors->tx_group.ring) {
138 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
139 "%s-%s-%d", priv->netdev->name, "Tx",
140 tx_int_idx++);
141 } else {
142
143 continue;
144 }
145
146 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
147
148 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
149 tqp_vectors->name,
150 tqp_vectors);
151 if (ret) {
152 netdev_err(priv->netdev, "request irq(%d) fail\n",
153 tqp_vectors->vector_irq);
154 return ret;
155 }
156
157 tqp_vectors->affinity_notify.notify =
158 hns3_nic_irq_affinity_notify;
159 tqp_vectors->affinity_notify.release =
160 hns3_nic_irq_affinity_release;
161 irq_set_affinity_notifier(tqp_vectors->vector_irq,
162 &tqp_vectors->affinity_notify);
163 irq_set_affinity_hint(tqp_vectors->vector_irq,
164 &tqp_vectors->affinity_mask);
165
166 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
167 }
168
169 return 0;
170}
171
172static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
173 u32 mask_en)
174{
175 writel(mask_en, tqp_vector->mask_addr);
176}
177
178static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
179{
180 napi_enable(&tqp_vector->napi);
181
182
183 hns3_mask_vector_irq(tqp_vector, 1);
184}
185
186static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
187{
188
189 hns3_mask_vector_irq(tqp_vector, 0);
190
191 disable_irq(tqp_vector->vector_irq);
192 napi_disable(&tqp_vector->napi);
193}
194
195void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
196 u32 rl_value)
197{
198 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
199
200
201
202
203
204
205 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
206 !tqp_vector->rx_group.coal.gl_adapt_enable)
207
208
209
210 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
211
212 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
213}
214
215void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
216 u32 gl_value)
217{
218 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
219
220 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
221}
222
223void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
224 u32 gl_value)
225{
226 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
227
228 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
229}
230
231static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
232 struct hns3_nic_priv *priv)
233{
234
235
236
237
238
239
240 tqp_vector->tx_group.coal.gl_adapt_enable = 1;
241 tqp_vector->rx_group.coal.gl_adapt_enable = 1;
242
243 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
244 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
245
246 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
247 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
248}
249
250static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
251 struct hns3_nic_priv *priv)
252{
253 struct hnae3_handle *h = priv->ae_handle;
254
255 hns3_set_vector_coalesce_tx_gl(tqp_vector,
256 tqp_vector->tx_group.coal.int_gl);
257 hns3_set_vector_coalesce_rx_gl(tqp_vector,
258 tqp_vector->rx_group.coal.int_gl);
259 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
260}
261
262static int hns3_nic_set_real_num_queue(struct net_device *netdev)
263{
264 struct hnae3_handle *h = hns3_get_handle(netdev);
265 struct hnae3_knic_private_info *kinfo = &h->kinfo;
266 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
267 int i, ret;
268
269 if (kinfo->num_tc <= 1) {
270 netdev_reset_tc(netdev);
271 } else {
272 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
273 if (ret) {
274 netdev_err(netdev,
275 "netdev_set_num_tc fail, ret=%d!\n", ret);
276 return ret;
277 }
278
279 for (i = 0; i < HNAE3_MAX_TC; i++) {
280 if (!kinfo->tc_info[i].enable)
281 continue;
282
283 netdev_set_tc_queue(netdev,
284 kinfo->tc_info[i].tc,
285 kinfo->tc_info[i].tqp_count,
286 kinfo->tc_info[i].tqp_offset);
287 }
288 }
289
290 ret = netif_set_real_num_tx_queues(netdev, queue_size);
291 if (ret) {
292 netdev_err(netdev,
293 "netif_set_real_num_tx_queues fail, ret=%d!\n",
294 ret);
295 return ret;
296 }
297
298 ret = netif_set_real_num_rx_queues(netdev, queue_size);
299 if (ret) {
300 netdev_err(netdev,
301 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
302 return ret;
303 }
304
305 return 0;
306}
307
308static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
309{
310 u16 alloc_tqps, max_rss_size, rss_size;
311
312 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
313 rss_size = alloc_tqps / h->kinfo.num_tc;
314
315 return min_t(u16, rss_size, max_rss_size);
316}
317
318static void hns3_tqp_enable(struct hnae3_queue *tqp)
319{
320 u32 rcb_reg;
321
322 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
323 rcb_reg |= BIT(HNS3_RING_EN_B);
324 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
325}
326
327static void hns3_tqp_disable(struct hnae3_queue *tqp)
328{
329 u32 rcb_reg;
330
331 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
332 rcb_reg &= ~BIT(HNS3_RING_EN_B);
333 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
334}
335
336static int hns3_nic_net_up(struct net_device *netdev)
337{
338 struct hns3_nic_priv *priv = netdev_priv(netdev);
339 struct hnae3_handle *h = priv->ae_handle;
340 int i, j;
341 int ret;
342
343 ret = hns3_nic_reset_all_ring(h);
344 if (ret)
345 return ret;
346
347
348 ret = hns3_nic_init_irq(priv);
349 if (ret) {
350 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
351 return ret;
352 }
353
354 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
355
356
357 for (i = 0; i < priv->vector_num; i++)
358 hns3_vector_enable(&priv->tqp_vector[i]);
359
360
361 for (j = 0; j < h->kinfo.num_tqps; j++)
362 hns3_tqp_enable(h->kinfo.tqp[j]);
363
364
365 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
366 if (ret)
367 goto out_start_err;
368
369 return 0;
370
371out_start_err:
372 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
373 while (j--)
374 hns3_tqp_disable(h->kinfo.tqp[j]);
375
376 for (j = i - 1; j >= 0; j--)
377 hns3_vector_disable(&priv->tqp_vector[j]);
378
379 hns3_nic_uninit_irq(priv);
380
381 return ret;
382}
383
384static void hns3_config_xps(struct hns3_nic_priv *priv)
385{
386 int i;
387
388 for (i = 0; i < priv->vector_num; i++) {
389 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
390 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
391
392 while (ring) {
393 int ret;
394
395 ret = netif_set_xps_queue(priv->netdev,
396 &tqp_vector->affinity_mask,
397 ring->tqp->tqp_index);
398 if (ret)
399 netdev_warn(priv->netdev,
400 "set xps queue failed: %d", ret);
401
402 ring = ring->next;
403 }
404 }
405}
406
407static int hns3_nic_net_open(struct net_device *netdev)
408{
409 struct hns3_nic_priv *priv = netdev_priv(netdev);
410 struct hnae3_handle *h = hns3_get_handle(netdev);
411 struct hnae3_knic_private_info *kinfo;
412 int i, ret;
413
414 if (hns3_nic_resetting(netdev))
415 return -EBUSY;
416
417 netif_carrier_off(netdev);
418
419 ret = hns3_nic_set_real_num_queue(netdev);
420 if (ret)
421 return ret;
422
423 ret = hns3_nic_net_up(netdev);
424 if (ret) {
425 netdev_err(netdev,
426 "hns net up fail, ret=%d!\n", ret);
427 return ret;
428 }
429
430 kinfo = &h->kinfo;
431 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
432 netdev_set_prio_tc_map(netdev, i,
433 kinfo->prio_tc[i]);
434 }
435
436 if (h->ae_algo->ops->set_timer_task)
437 h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
438
439 hns3_config_xps(priv);
440 return 0;
441}
442
443static void hns3_nic_net_down(struct net_device *netdev)
444{
445 struct hns3_nic_priv *priv = netdev_priv(netdev);
446 struct hnae3_handle *h = hns3_get_handle(netdev);
447 const struct hnae3_ae_ops *ops;
448 int i;
449
450
451 for (i = 0; i < priv->vector_num; i++)
452 hns3_vector_disable(&priv->tqp_vector[i]);
453
454
455 for (i = 0; i < h->kinfo.num_tqps; i++)
456 hns3_tqp_disable(h->kinfo.tqp[i]);
457
458
459 ops = priv->ae_handle->ae_algo->ops;
460 if (ops->stop)
461 ops->stop(priv->ae_handle);
462
463
464 hns3_nic_uninit_irq(priv);
465
466 hns3_clear_all_ring(priv->ae_handle);
467}
468
469static int hns3_nic_net_stop(struct net_device *netdev)
470{
471 struct hns3_nic_priv *priv = netdev_priv(netdev);
472 struct hnae3_handle *h = hns3_get_handle(netdev);
473
474 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
475 return 0;
476
477 if (h->ae_algo->ops->set_timer_task)
478 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
479
480 netif_tx_stop_all_queues(netdev);
481 netif_carrier_off(netdev);
482
483 hns3_nic_net_down(netdev);
484
485 return 0;
486}
487
488static int hns3_nic_uc_sync(struct net_device *netdev,
489 const unsigned char *addr)
490{
491 struct hnae3_handle *h = hns3_get_handle(netdev);
492
493 if (h->ae_algo->ops->add_uc_addr)
494 return h->ae_algo->ops->add_uc_addr(h, addr);
495
496 return 0;
497}
498
499static int hns3_nic_uc_unsync(struct net_device *netdev,
500 const unsigned char *addr)
501{
502 struct hnae3_handle *h = hns3_get_handle(netdev);
503
504 if (h->ae_algo->ops->rm_uc_addr)
505 return h->ae_algo->ops->rm_uc_addr(h, addr);
506
507 return 0;
508}
509
510static int hns3_nic_mc_sync(struct net_device *netdev,
511 const unsigned char *addr)
512{
513 struct hnae3_handle *h = hns3_get_handle(netdev);
514
515 if (h->ae_algo->ops->add_mc_addr)
516 return h->ae_algo->ops->add_mc_addr(h, addr);
517
518 return 0;
519}
520
521static int hns3_nic_mc_unsync(struct net_device *netdev,
522 const unsigned char *addr)
523{
524 struct hnae3_handle *h = hns3_get_handle(netdev);
525
526 if (h->ae_algo->ops->rm_mc_addr)
527 return h->ae_algo->ops->rm_mc_addr(h, addr);
528
529 return 0;
530}
531
532static u8 hns3_get_netdev_flags(struct net_device *netdev)
533{
534 u8 flags = 0;
535
536 if (netdev->flags & IFF_PROMISC) {
537 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
538 } else {
539 flags |= HNAE3_VLAN_FLTR;
540 if (netdev->flags & IFF_ALLMULTI)
541 flags |= HNAE3_USER_MPE;
542 }
543
544 return flags;
545}
546
547static void hns3_nic_set_rx_mode(struct net_device *netdev)
548{
549 struct hnae3_handle *h = hns3_get_handle(netdev);
550 u8 new_flags;
551 int ret;
552
553 new_flags = hns3_get_netdev_flags(netdev);
554
555 ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
556 if (ret) {
557 netdev_err(netdev, "sync uc address fail\n");
558 if (ret == -ENOSPC)
559 new_flags |= HNAE3_OVERFLOW_UPE;
560 }
561
562 if (netdev->flags & IFF_MULTICAST) {
563 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
564 hns3_nic_mc_unsync);
565 if (ret) {
566 netdev_err(netdev, "sync mc address fail\n");
567 if (ret == -ENOSPC)
568 new_flags |= HNAE3_OVERFLOW_MPE;
569 }
570 }
571
572
573
574
575
576 hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
577 h->netdev_flags = new_flags;
578 hns3_update_promisc_mode(netdev, new_flags);
579}
580
581int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
582{
583 struct hns3_nic_priv *priv = netdev_priv(netdev);
584 struct hnae3_handle *h = priv->ae_handle;
585
586 if (h->ae_algo->ops->set_promisc_mode) {
587 return h->ae_algo->ops->set_promisc_mode(h,
588 promisc_flags & HNAE3_UPE,
589 promisc_flags & HNAE3_MPE);
590 }
591
592 return 0;
593}
594
595void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
596{
597 struct hns3_nic_priv *priv = netdev_priv(netdev);
598 struct hnae3_handle *h = priv->ae_handle;
599 bool last_state;
600
601 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
602 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
603 if (enable != last_state) {
604 netdev_info(netdev,
605 "%s vlan filter\n",
606 enable ? "enable" : "disable");
607 h->ae_algo->ops->enable_vlan_filter(h, enable);
608 }
609 }
610}
611
612static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
613 u16 *mss, u32 *type_cs_vlan_tso)
614{
615 u32 l4_offset, hdr_len;
616 union l3_hdr_info l3;
617 union l4_hdr_info l4;
618 u32 l4_paylen;
619 int ret;
620
621 if (!skb_is_gso(skb))
622 return 0;
623
624 ret = skb_cow_head(skb, 0);
625 if (unlikely(ret))
626 return ret;
627
628 l3.hdr = skb_network_header(skb);
629 l4.hdr = skb_transport_header(skb);
630
631
632
633
634 if (l3.v4->version == 4)
635 l3.v4->check = 0;
636
637
638 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
639 SKB_GSO_GRE_CSUM |
640 SKB_GSO_UDP_TUNNEL |
641 SKB_GSO_UDP_TUNNEL_CSUM)) {
642 if ((!(skb_shinfo(skb)->gso_type &
643 SKB_GSO_PARTIAL)) &&
644 (skb_shinfo(skb)->gso_type &
645 SKB_GSO_UDP_TUNNEL_CSUM)) {
646
647
648
649 l4.udp->check = 0;
650 }
651
652 l3.hdr = skb_inner_network_header(skb);
653 l4.hdr = skb_inner_transport_header(skb);
654
655
656
657
658 if (l3.v4->version == 4)
659 l3.v4->check = 0;
660 }
661
662
663 l4_offset = l4.hdr - skb->data;
664 hdr_len = (l4.tcp->doff << 2) + l4_offset;
665
666
667 l4_paylen = skb->len - l4_offset;
668 csum_replace_by_diff(&l4.tcp->check,
669 (__force __wsum)htonl(l4_paylen));
670
671
672 *paylen = skb->len - hdr_len;
673 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
674
675
676 *mss = skb_shinfo(skb)->gso_size;
677
678 return 0;
679}
680
681static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
682 u8 *il4_proto)
683{
684 union l3_hdr_info l3;
685 unsigned char *l4_hdr;
686 unsigned char *exthdr;
687 u8 l4_proto_tmp;
688 __be16 frag_off;
689
690
691 l3.hdr = skb_network_header(skb);
692 l4_hdr = skb_transport_header(skb);
693
694 if (skb->protocol == htons(ETH_P_IPV6)) {
695 exthdr = l3.hdr + sizeof(*l3.v6);
696 l4_proto_tmp = l3.v6->nexthdr;
697 if (l4_hdr != exthdr)
698 ipv6_skip_exthdr(skb, exthdr - skb->data,
699 &l4_proto_tmp, &frag_off);
700 } else if (skb->protocol == htons(ETH_P_IP)) {
701 l4_proto_tmp = l3.v4->protocol;
702 } else {
703 return -EINVAL;
704 }
705
706 *ol4_proto = l4_proto_tmp;
707
708
709 if (!skb->encapsulation) {
710 *il4_proto = 0;
711 return 0;
712 }
713
714
715 l3.hdr = skb_inner_network_header(skb);
716 l4_hdr = skb_inner_transport_header(skb);
717
718 if (l3.v6->version == 6) {
719 exthdr = l3.hdr + sizeof(*l3.v6);
720 l4_proto_tmp = l3.v6->nexthdr;
721 if (l4_hdr != exthdr)
722 ipv6_skip_exthdr(skb, exthdr - skb->data,
723 &l4_proto_tmp, &frag_off);
724 } else if (l3.v4->version == 4) {
725 l4_proto_tmp = l3.v4->protocol;
726 }
727
728 *il4_proto = l4_proto_tmp;
729
730 return 0;
731}
732
733static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
734 u8 il4_proto, u32 *type_cs_vlan_tso,
735 u32 *ol_type_vlan_len_msec)
736{
737 union l3_hdr_info l3;
738 union l4_hdr_info l4;
739 unsigned char *l2_hdr;
740 u8 l4_proto = ol4_proto;
741 u32 ol2_len;
742 u32 ol3_len;
743 u32 ol4_len;
744 u32 l2_len;
745 u32 l3_len;
746
747 l3.hdr = skb_network_header(skb);
748 l4.hdr = skb_transport_header(skb);
749
750
751 l2_len = l3.hdr - skb->data;
752 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
753
754
755 if (skb->encapsulation) {
756
757 ol2_len = l2_len;
758 hns3_set_field(*ol_type_vlan_len_msec,
759 HNS3_TXD_L2LEN_S, ol2_len >> 1);
760
761
762 ol3_len = l4.hdr - l3.hdr;
763 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
764 ol3_len >> 2);
765
766
767 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
768
769 l2_hdr = skb_inner_mac_header(skb);
770
771
772 ol4_len = l2_hdr - l4.hdr;
773 hns3_set_field(*ol_type_vlan_len_msec,
774 HNS3_TXD_L4LEN_S, ol4_len >> 2);
775
776
777 l3.hdr = skb_inner_network_header(skb);
778
779
780 l2_len = l3.hdr - l2_hdr;
781 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S,
782 l2_len >> 1);
783 } else {
784
785
786
787 return;
788 }
789
790
791 l4.hdr = skb_inner_transport_header(skb);
792
793 l4_proto = il4_proto;
794 }
795
796
797 l3_len = l4.hdr - l3.hdr;
798 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
799
800
801 switch (l4_proto) {
802 case IPPROTO_TCP:
803 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
804 l4.tcp->doff);
805 break;
806 case IPPROTO_SCTP:
807 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
808 (sizeof(struct sctphdr) >> 2));
809 break;
810 case IPPROTO_UDP:
811 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
812 (sizeof(struct udphdr) >> 2));
813 break;
814 default:
815
816
817
818 return;
819 }
820}
821
822
823
824
825
826
827
828static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
829{
830#define IANA_VXLAN_PORT 4789
831 union l4_hdr_info l4;
832
833 l4.hdr = skb_transport_header(skb);
834
835 if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
836 return false;
837
838 skb_checksum_help(skb);
839
840 return true;
841}
842
843static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
844 u8 il4_proto, u32 *type_cs_vlan_tso,
845 u32 *ol_type_vlan_len_msec)
846{
847 union l3_hdr_info l3;
848 u32 l4_proto = ol4_proto;
849
850 l3.hdr = skb_network_header(skb);
851
852
853 if (skb->encapsulation) {
854
855 if (skb->protocol == htons(ETH_P_IP)) {
856 if (skb_is_gso(skb))
857 hns3_set_field(*ol_type_vlan_len_msec,
858 HNS3_TXD_OL3T_S,
859 HNS3_OL3T_IPV4_CSUM);
860 else
861 hns3_set_field(*ol_type_vlan_len_msec,
862 HNS3_TXD_OL3T_S,
863 HNS3_OL3T_IPV4_NO_CSUM);
864
865 } else if (skb->protocol == htons(ETH_P_IPV6)) {
866 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
867 HNS3_OL3T_IPV6);
868 }
869
870
871 switch (l4_proto) {
872 case IPPROTO_UDP:
873 hns3_set_field(*ol_type_vlan_len_msec,
874 HNS3_TXD_TUNTYPE_S,
875 HNS3_TUN_MAC_IN_UDP);
876 break;
877 case IPPROTO_GRE:
878 hns3_set_field(*ol_type_vlan_len_msec,
879 HNS3_TXD_TUNTYPE_S,
880 HNS3_TUN_NVGRE);
881 break;
882 default:
883
884
885
886 if (skb_is_gso(skb))
887 return -EDOM;
888
889
890
891
892 skb_checksum_help(skb);
893 return 0;
894 }
895
896 l3.hdr = skb_inner_network_header(skb);
897 l4_proto = il4_proto;
898 }
899
900 if (l3.v4->version == 4) {
901 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
902 HNS3_L3T_IPV4);
903
904
905
906
907 if (skb_is_gso(skb))
908 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
909 } else if (l3.v6->version == 6) {
910 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
911 HNS3_L3T_IPV6);
912 }
913
914 switch (l4_proto) {
915 case IPPROTO_TCP:
916 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
917 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
918 HNS3_L4T_TCP);
919 break;
920 case IPPROTO_UDP:
921 if (hns3_tunnel_csum_bug(skb))
922 break;
923
924 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
925 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
926 HNS3_L4T_UDP);
927 break;
928 case IPPROTO_SCTP:
929 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
930 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
931 HNS3_L4T_SCTP);
932 break;
933 default:
934
935
936
937 if (skb_is_gso(skb))
938 return -EDOM;
939
940
941
942
943 skb_checksum_help(skb);
944 return 0;
945 }
946
947 return 0;
948}
949
950static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
951{
952
953 hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
954 hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
955}
956
957static int hns3_fill_desc_vtags(struct sk_buff *skb,
958 struct hns3_enet_ring *tx_ring,
959 u32 *inner_vlan_flag,
960 u32 *out_vlan_flag,
961 u16 *inner_vtag,
962 u16 *out_vtag)
963{
964#define HNS3_TX_VLAN_PRIO_SHIFT 13
965
966 if (skb->protocol == htons(ETH_P_8021Q) &&
967 !(tx_ring->tqp->handle->kinfo.netdev->features &
968 NETIF_F_HW_VLAN_CTAG_TX)) {
969
970
971
972
973 skb->protocol = vlan_get_protocol(skb);
974 return 0;
975 }
976
977 if (skb_vlan_tag_present(skb)) {
978 u16 vlan_tag;
979
980 vlan_tag = skb_vlan_tag_get(skb);
981 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
982
983
984
985
986 if (skb->protocol == htons(ETH_P_8021Q)) {
987 hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
988 *out_vtag = vlan_tag;
989 } else {
990 hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
991 *inner_vtag = vlan_tag;
992 }
993 } else if (skb->protocol == htons(ETH_P_8021Q)) {
994 struct vlan_ethhdr *vhdr;
995 int rc;
996
997 rc = skb_cow_head(skb, 0);
998 if (unlikely(rc < 0))
999 return rc;
1000 vhdr = (struct vlan_ethhdr *)skb->data;
1001 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
1002 << HNS3_TX_VLAN_PRIO_SHIFT);
1003 }
1004
1005 skb->protocol = vlan_get_protocol(skb);
1006 return 0;
1007}
1008
1009static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1010 int size, int frag_end, enum hns_desc_type type)
1011{
1012 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1013 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1014 struct device *dev = ring_to_dev(ring);
1015 u16 bdtp_fe_sc_vld_ra_ri = 0;
1016 struct skb_frag_struct *frag;
1017 unsigned int frag_buf_num;
1018 int k, sizeoflast;
1019 dma_addr_t dma;
1020
1021 if (type == DESC_TYPE_SKB) {
1022 struct sk_buff *skb = (struct sk_buff *)priv;
1023 u32 ol_type_vlan_len_msec = 0;
1024 u32 type_cs_vlan_tso = 0;
1025 u32 paylen = skb->len;
1026 u16 inner_vtag = 0;
1027 u16 out_vtag = 0;
1028 u16 mss = 0;
1029 int ret;
1030
1031 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
1032 &ol_type_vlan_len_msec,
1033 &inner_vtag, &out_vtag);
1034 if (unlikely(ret))
1035 return ret;
1036
1037 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1038 u8 ol4_proto, il4_proto;
1039
1040 skb_reset_mac_len(skb);
1041
1042 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1043 if (unlikely(ret))
1044 return ret;
1045 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
1046 &type_cs_vlan_tso,
1047 &ol_type_vlan_len_msec);
1048 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
1049 &type_cs_vlan_tso,
1050 &ol_type_vlan_len_msec);
1051 if (unlikely(ret))
1052 return ret;
1053
1054 ret = hns3_set_tso(skb, &paylen, &mss,
1055 &type_cs_vlan_tso);
1056 if (unlikely(ret))
1057 return ret;
1058 }
1059
1060
1061 desc->tx.ol_type_vlan_len_msec =
1062 cpu_to_le32(ol_type_vlan_len_msec);
1063 desc->tx.type_cs_vlan_tso_len =
1064 cpu_to_le32(type_cs_vlan_tso);
1065 desc->tx.paylen = cpu_to_le32(paylen);
1066 desc->tx.mss = cpu_to_le16(mss);
1067 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1068 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1069
1070 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1071 } else {
1072 frag = (struct skb_frag_struct *)priv;
1073 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1074 }
1075
1076 if (unlikely(dma_mapping_error(ring->dev, dma))) {
1077 ring->stats.sw_err_cnt++;
1078 return -ENOMEM;
1079 }
1080
1081 desc_cb->length = size;
1082
1083 frag_buf_num = hns3_tx_bd_count(size);
1084 sizeoflast = size & HNS3_TX_LAST_SIZE_M;
1085 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1086
1087
1088 for (k = 0; k < frag_buf_num; k++) {
1089
1090 desc_cb->priv = priv;
1091 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1092 desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1093 DESC_TYPE_SKB : DESC_TYPE_PAGE;
1094
1095
1096 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1097 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1098 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1099 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1100 frag_end && (k == frag_buf_num - 1) ?
1101 1 : 0);
1102 desc->tx.bdtp_fe_sc_vld_ra_ri =
1103 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1104
1105
1106 ring_ptr_move_fw(ring, next_to_use);
1107
1108 desc_cb = &ring->desc_cb[ring->next_to_use];
1109 desc = &ring->desc[ring->next_to_use];
1110 }
1111
1112 return 0;
1113}
1114
1115static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1116 struct hns3_enet_ring *ring)
1117{
1118 struct sk_buff *skb = *out_skb;
1119 struct sk_buff *new_skb = NULL;
1120 struct skb_frag_struct *frag;
1121 int bdnum_for_frag;
1122 int frag_num;
1123 int buf_num;
1124 int size;
1125 int i;
1126
1127 size = skb_headlen(skb);
1128 buf_num = hns3_tx_bd_count(size);
1129
1130 frag_num = skb_shinfo(skb)->nr_frags;
1131 for (i = 0; i < frag_num; i++) {
1132 frag = &skb_shinfo(skb)->frags[i];
1133 size = skb_frag_size(frag);
1134 bdnum_for_frag = hns3_tx_bd_count(size);
1135 if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
1136 return -ENOMEM;
1137
1138 buf_num += bdnum_for_frag;
1139 }
1140
1141 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1142 buf_num = hns3_tx_bd_count(skb->len);
1143 if (ring_space(ring) < buf_num)
1144 return -EBUSY;
1145
1146 new_skb = skb_copy(skb, GFP_ATOMIC);
1147 if (!new_skb)
1148 return -ENOMEM;
1149 dev_kfree_skb_any(skb);
1150 *out_skb = new_skb;
1151 }
1152
1153 if (unlikely(ring_space(ring) < buf_num))
1154 return -EBUSY;
1155
1156 *bnum = buf_num;
1157 return 0;
1158}
1159
1160static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1161 struct hns3_enet_ring *ring)
1162{
1163 struct sk_buff *skb = *out_skb;
1164 struct sk_buff *new_skb = NULL;
1165 int buf_num;
1166
1167
1168 buf_num = skb_shinfo(skb)->nr_frags + 1;
1169
1170 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1171 buf_num = hns3_tx_bd_count(skb->len);
1172 if (ring_space(ring) < buf_num)
1173 return -EBUSY;
1174
1175 new_skb = skb_copy(skb, GFP_ATOMIC);
1176 if (!new_skb)
1177 return -ENOMEM;
1178 dev_kfree_skb_any(skb);
1179 *out_skb = new_skb;
1180 }
1181
1182 if (unlikely(ring_space(ring) < buf_num))
1183 return -EBUSY;
1184
1185 *bnum = buf_num;
1186
1187 return 0;
1188}
1189
1190static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1191{
1192 struct device *dev = ring_to_dev(ring);
1193 unsigned int i;
1194
1195 for (i = 0; i < ring->desc_num; i++) {
1196
1197 if (ring->next_to_use == next_to_use_orig)
1198 break;
1199
1200
1201 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1202 dma_unmap_single(dev,
1203 ring->desc_cb[ring->next_to_use].dma,
1204 ring->desc_cb[ring->next_to_use].length,
1205 DMA_TO_DEVICE);
1206 else if (ring->desc_cb[ring->next_to_use].length)
1207 dma_unmap_page(dev,
1208 ring->desc_cb[ring->next_to_use].dma,
1209 ring->desc_cb[ring->next_to_use].length,
1210 DMA_TO_DEVICE);
1211
1212 ring->desc_cb[ring->next_to_use].length = 0;
1213
1214
1215 ring_ptr_move_bw(ring, next_to_use);
1216 }
1217}
1218
1219netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1220{
1221 struct hns3_nic_priv *priv = netdev_priv(netdev);
1222 struct hns3_nic_ring_data *ring_data =
1223 &tx_ring_data(priv, skb->queue_mapping);
1224 struct hns3_enet_ring *ring = ring_data->ring;
1225 struct netdev_queue *dev_queue;
1226 struct skb_frag_struct *frag;
1227 int next_to_use_head;
1228 int next_to_use_frag;
1229 int buf_num;
1230 int seg_num;
1231 int size;
1232 int ret;
1233 int i;
1234
1235
1236 prefetch(skb->data);
1237
1238 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1239 case -EBUSY:
1240 u64_stats_update_begin(&ring->syncp);
1241 ring->stats.tx_busy++;
1242 u64_stats_update_end(&ring->syncp);
1243
1244 goto out_net_tx_busy;
1245 case -ENOMEM:
1246 u64_stats_update_begin(&ring->syncp);
1247 ring->stats.sw_err_cnt++;
1248 u64_stats_update_end(&ring->syncp);
1249 netdev_err(netdev, "no memory to xmit!\n");
1250
1251 goto out_err_tx_ok;
1252 default:
1253 break;
1254 }
1255
1256
1257 seg_num = skb_shinfo(skb)->nr_frags + 1;
1258
1259 size = skb_headlen(skb);
1260
1261 next_to_use_head = ring->next_to_use;
1262
1263 ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1264 DESC_TYPE_SKB);
1265 if (unlikely(ret))
1266 goto head_fill_err;
1267
1268 next_to_use_frag = ring->next_to_use;
1269
1270 for (i = 1; i < seg_num; i++) {
1271 frag = &skb_shinfo(skb)->frags[i - 1];
1272 size = skb_frag_size(frag);
1273
1274 ret = hns3_fill_desc(ring, frag, size,
1275 seg_num - 1 == i ? 1 : 0,
1276 DESC_TYPE_PAGE);
1277
1278 if (unlikely(ret))
1279 goto frag_fill_err;
1280 }
1281
1282
1283 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1284 netdev_tx_sent_queue(dev_queue, skb->len);
1285
1286 wmb();
1287
1288 hnae3_queue_xmit(ring->tqp, buf_num);
1289
1290 return NETDEV_TX_OK;
1291
1292frag_fill_err:
1293 hns3_clear_desc(ring, next_to_use_frag);
1294
1295head_fill_err:
1296 hns3_clear_desc(ring, next_to_use_head);
1297
1298out_err_tx_ok:
1299 dev_kfree_skb_any(skb);
1300 return NETDEV_TX_OK;
1301
1302out_net_tx_busy:
1303 netif_stop_subqueue(netdev, ring_data->queue_index);
1304 smp_mb();
1305
1306 return NETDEV_TX_BUSY;
1307}
1308
1309static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1310{
1311 struct hnae3_handle *h = hns3_get_handle(netdev);
1312 struct sockaddr *mac_addr = p;
1313 int ret;
1314
1315 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1316 return -EADDRNOTAVAIL;
1317
1318 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1319 netdev_info(netdev, "already using mac address %pM\n",
1320 mac_addr->sa_data);
1321 return 0;
1322 }
1323
1324 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1325 if (ret) {
1326 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1327 return ret;
1328 }
1329
1330 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1331
1332 return 0;
1333}
1334
1335static int hns3_nic_do_ioctl(struct net_device *netdev,
1336 struct ifreq *ifr, int cmd)
1337{
1338 struct hnae3_handle *h = hns3_get_handle(netdev);
1339
1340 if (!netif_running(netdev))
1341 return -EINVAL;
1342
1343 if (!h->ae_algo->ops->do_ioctl)
1344 return -EOPNOTSUPP;
1345
1346 return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1347}
1348
1349static int hns3_nic_set_features(struct net_device *netdev,
1350 netdev_features_t features)
1351{
1352 netdev_features_t changed = netdev->features ^ features;
1353 struct hns3_nic_priv *priv = netdev_priv(netdev);
1354 struct hnae3_handle *h = priv->ae_handle;
1355 bool enable;
1356 int ret;
1357
1358 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1359 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1360 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1361 else
1362 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1363 }
1364
1365 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
1366 enable = !!(features & NETIF_F_GRO_HW);
1367 ret = h->ae_algo->ops->set_gro_en(h, enable);
1368 if (ret)
1369 return ret;
1370 }
1371
1372 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1373 h->ae_algo->ops->enable_vlan_filter) {
1374 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
1375 h->ae_algo->ops->enable_vlan_filter(h, enable);
1376 }
1377
1378 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1379 h->ae_algo->ops->enable_hw_strip_rxvtag) {
1380 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1381 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
1382 if (ret)
1383 return ret;
1384 }
1385
1386 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1387 enable = !!(features & NETIF_F_NTUPLE);
1388 h->ae_algo->ops->enable_fd(h, enable);
1389 }
1390
1391 netdev->features = features;
1392 return 0;
1393}
1394
1395static void hns3_nic_get_stats64(struct net_device *netdev,
1396 struct rtnl_link_stats64 *stats)
1397{
1398 struct hns3_nic_priv *priv = netdev_priv(netdev);
1399 int queue_num = priv->ae_handle->kinfo.num_tqps;
1400 struct hnae3_handle *handle = priv->ae_handle;
1401 struct hns3_enet_ring *ring;
1402 u64 rx_length_errors = 0;
1403 u64 rx_crc_errors = 0;
1404 u64 rx_multicast = 0;
1405 unsigned int start;
1406 u64 tx_errors = 0;
1407 u64 rx_errors = 0;
1408 unsigned int idx;
1409 u64 tx_bytes = 0;
1410 u64 rx_bytes = 0;
1411 u64 tx_pkts = 0;
1412 u64 rx_pkts = 0;
1413 u64 tx_drop = 0;
1414 u64 rx_drop = 0;
1415
1416 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1417 return;
1418
1419 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1420
1421 for (idx = 0; idx < queue_num; idx++) {
1422
1423 ring = priv->ring_data[idx].ring;
1424 do {
1425 start = u64_stats_fetch_begin_irq(&ring->syncp);
1426 tx_bytes += ring->stats.tx_bytes;
1427 tx_pkts += ring->stats.tx_pkts;
1428 tx_drop += ring->stats.sw_err_cnt;
1429 tx_errors += ring->stats.sw_err_cnt;
1430 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1431
1432
1433 ring = priv->ring_data[idx + queue_num].ring;
1434 do {
1435 start = u64_stats_fetch_begin_irq(&ring->syncp);
1436 rx_bytes += ring->stats.rx_bytes;
1437 rx_pkts += ring->stats.rx_pkts;
1438 rx_drop += ring->stats.non_vld_descs;
1439 rx_drop += ring->stats.l2_err;
1440 rx_errors += ring->stats.non_vld_descs;
1441 rx_errors += ring->stats.l2_err;
1442 rx_crc_errors += ring->stats.l2_err;
1443 rx_crc_errors += ring->stats.l3l4_csum_err;
1444 rx_multicast += ring->stats.rx_multicast;
1445 rx_length_errors += ring->stats.err_pkt_len;
1446 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1447 }
1448
1449 stats->tx_bytes = tx_bytes;
1450 stats->tx_packets = tx_pkts;
1451 stats->rx_bytes = rx_bytes;
1452 stats->rx_packets = rx_pkts;
1453
1454 stats->rx_errors = rx_errors;
1455 stats->multicast = rx_multicast;
1456 stats->rx_length_errors = rx_length_errors;
1457 stats->rx_crc_errors = rx_crc_errors;
1458 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1459
1460 stats->tx_errors = tx_errors;
1461 stats->rx_dropped = rx_drop;
1462 stats->tx_dropped = tx_drop;
1463 stats->collisions = netdev->stats.collisions;
1464 stats->rx_over_errors = netdev->stats.rx_over_errors;
1465 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1466 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1467 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1468 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1469 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1470 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1471 stats->tx_window_errors = netdev->stats.tx_window_errors;
1472 stats->rx_compressed = netdev->stats.rx_compressed;
1473 stats->tx_compressed = netdev->stats.tx_compressed;
1474}
1475
1476static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1477{
1478 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1479 struct hnae3_handle *h = hns3_get_handle(netdev);
1480 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1481 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1482 u8 tc = mqprio_qopt->qopt.num_tc;
1483 u16 mode = mqprio_qopt->mode;
1484 u8 hw = mqprio_qopt->qopt.hw;
1485
1486 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1487 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1488 return -EOPNOTSUPP;
1489
1490 if (tc > HNAE3_MAX_TC)
1491 return -EINVAL;
1492
1493 if (!netdev)
1494 return -EINVAL;
1495
1496 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1497 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1498}
1499
1500static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1501 void *type_data)
1502{
1503 if (type != TC_SETUP_QDISC_MQPRIO)
1504 return -EOPNOTSUPP;
1505
1506 return hns3_setup_tc(dev, type_data);
1507}
1508
1509static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1510 __be16 proto, u16 vid)
1511{
1512 struct hnae3_handle *h = hns3_get_handle(netdev);
1513 struct hns3_nic_priv *priv = netdev_priv(netdev);
1514 int ret = -EIO;
1515
1516 if (h->ae_algo->ops->set_vlan_filter)
1517 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1518
1519 if (!ret)
1520 set_bit(vid, priv->active_vlans);
1521
1522 return ret;
1523}
1524
1525static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1526 __be16 proto, u16 vid)
1527{
1528 struct hnae3_handle *h = hns3_get_handle(netdev);
1529 struct hns3_nic_priv *priv = netdev_priv(netdev);
1530 int ret = -EIO;
1531
1532 if (h->ae_algo->ops->set_vlan_filter)
1533 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1534
1535 if (!ret)
1536 clear_bit(vid, priv->active_vlans);
1537
1538 return ret;
1539}
1540
1541static int hns3_restore_vlan(struct net_device *netdev)
1542{
1543 struct hns3_nic_priv *priv = netdev_priv(netdev);
1544 int ret = 0;
1545 u16 vid;
1546
1547 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1548 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1549 if (ret) {
1550 netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
1551 vid, ret);
1552 return ret;
1553 }
1554 }
1555
1556 return ret;
1557}
1558
1559static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1560 u8 qos, __be16 vlan_proto)
1561{
1562 struct hnae3_handle *h = hns3_get_handle(netdev);
1563 int ret = -EIO;
1564
1565 if (h->ae_algo->ops->set_vf_vlan_filter)
1566 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1567 qos, vlan_proto);
1568
1569 return ret;
1570}
1571
1572static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1573{
1574 struct hnae3_handle *h = hns3_get_handle(netdev);
1575 int ret;
1576
1577 if (!h->ae_algo->ops->set_mtu)
1578 return -EOPNOTSUPP;
1579
1580 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1581 if (ret)
1582 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1583 ret);
1584 else
1585 netdev->mtu = new_mtu;
1586
1587 return ret;
1588}
1589
1590static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1591{
1592 struct hns3_nic_priv *priv = netdev_priv(ndev);
1593 struct hns3_enet_ring *tx_ring = NULL;
1594 int timeout_queue = 0;
1595 int hw_head, hw_tail;
1596 int i;
1597
1598
1599 for (i = 0; i < ndev->real_num_tx_queues; i++) {
1600 struct netdev_queue *q;
1601 unsigned long trans_start;
1602
1603 q = netdev_get_tx_queue(ndev, i);
1604 trans_start = q->trans_start;
1605 if (netif_xmit_stopped(q) &&
1606 time_after(jiffies,
1607 (trans_start + ndev->watchdog_timeo))) {
1608 timeout_queue = i;
1609 break;
1610 }
1611 }
1612
1613 if (i == ndev->num_tx_queues) {
1614 netdev_info(ndev,
1615 "no netdev TX timeout queue found, timeout count: %llu\n",
1616 priv->tx_timeout_count);
1617 return false;
1618 }
1619
1620 tx_ring = priv->ring_data[timeout_queue].ring;
1621
1622 hw_head = readl_relaxed(tx_ring->tqp->io_base +
1623 HNS3_RING_TX_RING_HEAD_REG);
1624 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1625 HNS3_RING_TX_RING_TAIL_REG);
1626 netdev_info(ndev,
1627 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1628 priv->tx_timeout_count,
1629 timeout_queue,
1630 tx_ring->next_to_use,
1631 tx_ring->next_to_clean,
1632 hw_head,
1633 hw_tail,
1634 readl(tx_ring->tqp_vector->mask_addr));
1635
1636 return true;
1637}
1638
1639static void hns3_nic_net_timeout(struct net_device *ndev)
1640{
1641 struct hns3_nic_priv *priv = netdev_priv(ndev);
1642 struct hnae3_handle *h = priv->ae_handle;
1643
1644 if (!hns3_get_tx_timeo_queue_info(ndev))
1645 return;
1646
1647 priv->tx_timeout_count++;
1648
1649
1650
1651
1652 if (h->ae_algo->ops->reset_event)
1653 h->ae_algo->ops->reset_event(h->pdev, h);
1654}
1655
1656static const struct net_device_ops hns3_nic_netdev_ops = {
1657 .ndo_open = hns3_nic_net_open,
1658 .ndo_stop = hns3_nic_net_stop,
1659 .ndo_start_xmit = hns3_nic_net_xmit,
1660 .ndo_tx_timeout = hns3_nic_net_timeout,
1661 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
1662 .ndo_do_ioctl = hns3_nic_do_ioctl,
1663 .ndo_change_mtu = hns3_nic_change_mtu,
1664 .ndo_set_features = hns3_nic_set_features,
1665 .ndo_get_stats64 = hns3_nic_get_stats64,
1666 .ndo_setup_tc = hns3_nic_setup_tc,
1667 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1668 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1669 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1670 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1671};
1672
1673static bool hns3_is_phys_func(struct pci_dev *pdev)
1674{
1675 u32 dev_id = pdev->device;
1676
1677 switch (dev_id) {
1678 case HNAE3_DEV_ID_GE:
1679 case HNAE3_DEV_ID_25GE:
1680 case HNAE3_DEV_ID_25GE_RDMA:
1681 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1682 case HNAE3_DEV_ID_50GE_RDMA:
1683 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1684 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1685 return true;
1686 case HNAE3_DEV_ID_100G_VF:
1687 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1688 return false;
1689 default:
1690 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1691 dev_id);
1692 }
1693
1694 return false;
1695}
1696
1697static void hns3_disable_sriov(struct pci_dev *pdev)
1698{
1699
1700
1701
1702
1703 if (pci_vfs_assigned(pdev)) {
1704 dev_warn(&pdev->dev,
1705 "disabling driver while VFs are assigned\n");
1706 return;
1707 }
1708
1709 pci_disable_sriov(pdev);
1710}
1711
1712static void hns3_get_dev_capability(struct pci_dev *pdev,
1713 struct hnae3_ae_dev *ae_dev)
1714{
1715 if (pdev->revision >= 0x21) {
1716 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1717 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
1718 }
1719}
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1732{
1733 struct hnae3_ae_dev *ae_dev;
1734 int ret;
1735
1736 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1737 GFP_KERNEL);
1738 if (!ae_dev) {
1739 ret = -ENOMEM;
1740 return ret;
1741 }
1742
1743 ae_dev->pdev = pdev;
1744 ae_dev->flag = ent->driver_data;
1745 ae_dev->dev_type = HNAE3_DEV_KNIC;
1746 ae_dev->reset_type = HNAE3_NONE_RESET;
1747 hns3_get_dev_capability(pdev, ae_dev);
1748 pci_set_drvdata(pdev, ae_dev);
1749
1750 ret = hnae3_register_ae_dev(ae_dev);
1751 if (ret) {
1752 devm_kfree(&pdev->dev, ae_dev);
1753 pci_set_drvdata(pdev, NULL);
1754 }
1755
1756 return ret;
1757}
1758
1759
1760
1761
1762static void hns3_remove(struct pci_dev *pdev)
1763{
1764 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1765
1766 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1767 hns3_disable_sriov(pdev);
1768
1769 hnae3_unregister_ae_dev(ae_dev);
1770 pci_set_drvdata(pdev, NULL);
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1782{
1783 int ret;
1784
1785 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1786 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1787 return -EINVAL;
1788 }
1789
1790 if (num_vfs) {
1791 ret = pci_enable_sriov(pdev, num_vfs);
1792 if (ret)
1793 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1794 else
1795 return num_vfs;
1796 } else if (!pci_vfs_assigned(pdev)) {
1797 pci_disable_sriov(pdev);
1798 } else {
1799 dev_warn(&pdev->dev,
1800 "Unable to free VFs because some are assigned to VMs.\n");
1801 }
1802
1803 return 0;
1804}
1805
1806static void hns3_shutdown(struct pci_dev *pdev)
1807{
1808 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1809
1810 hnae3_unregister_ae_dev(ae_dev);
1811 devm_kfree(&pdev->dev, ae_dev);
1812 pci_set_drvdata(pdev, NULL);
1813
1814 if (system_state == SYSTEM_POWER_OFF)
1815 pci_set_power_state(pdev, PCI_D3hot);
1816}
1817
1818static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1819 pci_channel_state_t state)
1820{
1821 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1822 pci_ers_result_t ret;
1823
1824 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1825
1826 if (state == pci_channel_io_perm_failure)
1827 return PCI_ERS_RESULT_DISCONNECT;
1828
1829 if (!ae_dev) {
1830 dev_err(&pdev->dev,
1831 "Can't recover - error happened during device init\n");
1832 return PCI_ERS_RESULT_NONE;
1833 }
1834
1835 if (ae_dev->ops->handle_hw_ras_error)
1836 ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
1837 else
1838 return PCI_ERS_RESULT_NONE;
1839
1840 return ret;
1841}
1842
1843static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1844{
1845 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1846 struct device *dev = &pdev->dev;
1847
1848 dev_info(dev, "requesting reset due to PCI error\n");
1849
1850
1851 if (ae_dev->ops->reset_event) {
1852 if (!ae_dev->override_pci_need_reset)
1853 ae_dev->ops->reset_event(pdev, NULL);
1854
1855 return PCI_ERS_RESULT_RECOVERED;
1856 }
1857
1858 return PCI_ERS_RESULT_DISCONNECT;
1859}
1860
1861static void hns3_reset_prepare(struct pci_dev *pdev)
1862{
1863 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1864
1865 dev_info(&pdev->dev, "hns3 flr prepare\n");
1866 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
1867 ae_dev->ops->flr_prepare(ae_dev);
1868}
1869
1870static void hns3_reset_done(struct pci_dev *pdev)
1871{
1872 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1873
1874 dev_info(&pdev->dev, "hns3 flr done\n");
1875 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
1876 ae_dev->ops->flr_done(ae_dev);
1877}
1878
1879static const struct pci_error_handlers hns3_err_handler = {
1880 .error_detected = hns3_error_detected,
1881 .slot_reset = hns3_slot_reset,
1882 .reset_prepare = hns3_reset_prepare,
1883 .reset_done = hns3_reset_done,
1884};
1885
1886static struct pci_driver hns3_driver = {
1887 .name = hns3_driver_name,
1888 .id_table = hns3_pci_tbl,
1889 .probe = hns3_probe,
1890 .remove = hns3_remove,
1891 .shutdown = hns3_shutdown,
1892 .sriov_configure = hns3_pci_sriov_configure,
1893 .err_handler = &hns3_err_handler,
1894};
1895
1896
1897static void hns3_set_default_feature(struct net_device *netdev)
1898{
1899 struct hnae3_handle *h = hns3_get_handle(netdev);
1900 struct pci_dev *pdev = h->pdev;
1901
1902 netdev->priv_flags |= IFF_UNICAST_FLT;
1903
1904 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1905 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1906 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1907 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1908 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1909
1910 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1911
1912 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1913
1914 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1915 NETIF_F_HW_VLAN_CTAG_FILTER |
1916 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1917 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1918 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1919 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1920 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1921
1922 netdev->vlan_features |=
1923 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1924 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1925 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1926 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1927 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1928
1929 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1930 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1931 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1932 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1933 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1934 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1935
1936 if (pdev->revision >= 0x21) {
1937 netdev->hw_features |= NETIF_F_GRO_HW;
1938 netdev->features |= NETIF_F_GRO_HW;
1939
1940 if (!(h->flags & HNAE3_SUPPORT_VF)) {
1941 netdev->hw_features |= NETIF_F_NTUPLE;
1942 netdev->features |= NETIF_F_NTUPLE;
1943 }
1944 }
1945}
1946
1947static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1948 struct hns3_desc_cb *cb)
1949{
1950 unsigned int order = hnae3_page_order(ring);
1951 struct page *p;
1952
1953 p = dev_alloc_pages(order);
1954 if (!p)
1955 return -ENOMEM;
1956
1957 cb->priv = p;
1958 cb->page_offset = 0;
1959 cb->reuse_flag = 0;
1960 cb->buf = page_address(p);
1961 cb->length = hnae3_page_size(ring);
1962 cb->type = DESC_TYPE_PAGE;
1963
1964 return 0;
1965}
1966
1967static void hns3_free_buffer(struct hns3_enet_ring *ring,
1968 struct hns3_desc_cb *cb)
1969{
1970 if (cb->type == DESC_TYPE_SKB)
1971 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1972 else if (!HNAE3_IS_TX_RING(ring))
1973 put_page((struct page *)cb->priv);
1974 memset(cb, 0, sizeof(*cb));
1975}
1976
1977static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1978{
1979 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1980 cb->length, ring_to_dma_dir(ring));
1981
1982 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
1983 return -EIO;
1984
1985 return 0;
1986}
1987
1988static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1989 struct hns3_desc_cb *cb)
1990{
1991 if (cb->type == DESC_TYPE_SKB)
1992 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1993 ring_to_dma_dir(ring));
1994 else if (cb->length)
1995 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1996 ring_to_dma_dir(ring));
1997}
1998
1999static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
2000{
2001 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2002 ring->desc[i].addr = 0;
2003}
2004
2005static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
2006{
2007 struct hns3_desc_cb *cb = &ring->desc_cb[i];
2008
2009 if (!ring->desc_cb[i].dma)
2010 return;
2011
2012 hns3_buffer_detach(ring, i);
2013 hns3_free_buffer(ring, cb);
2014}
2015
2016static void hns3_free_buffers(struct hns3_enet_ring *ring)
2017{
2018 int i;
2019
2020 for (i = 0; i < ring->desc_num; i++)
2021 hns3_free_buffer_detach(ring, i);
2022}
2023
2024
2025static void hns3_free_desc(struct hns3_enet_ring *ring)
2026{
2027 int size = ring->desc_num * sizeof(ring->desc[0]);
2028
2029 hns3_free_buffers(ring);
2030
2031 if (ring->desc) {
2032 dma_free_coherent(ring_to_dev(ring), size,
2033 ring->desc, ring->desc_dma_addr);
2034 ring->desc = NULL;
2035 }
2036}
2037
2038static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2039{
2040 int size = ring->desc_num * sizeof(ring->desc[0]);
2041
2042 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2043 &ring->desc_dma_addr, GFP_KERNEL);
2044 if (!ring->desc)
2045 return -ENOMEM;
2046
2047 return 0;
2048}
2049
2050static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
2051 struct hns3_desc_cb *cb)
2052{
2053 int ret;
2054
2055 ret = hns3_alloc_buffer(ring, cb);
2056 if (ret)
2057 goto out;
2058
2059 ret = hns3_map_buffer(ring, cb);
2060 if (ret)
2061 goto out_with_buf;
2062
2063 return 0;
2064
2065out_with_buf:
2066 hns3_free_buffer(ring, cb);
2067out:
2068 return ret;
2069}
2070
2071static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
2072{
2073 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
2074
2075 if (ret)
2076 return ret;
2077
2078 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2079
2080 return 0;
2081}
2082
2083
2084static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
2085{
2086 int i, j, ret;
2087
2088 for (i = 0; i < ring->desc_num; i++) {
2089 ret = hns3_alloc_buffer_attach(ring, i);
2090 if (ret)
2091 goto out_buffer_fail;
2092 }
2093
2094 return 0;
2095
2096out_buffer_fail:
2097 for (j = i - 1; j >= 0; j--)
2098 hns3_free_buffer_detach(ring, j);
2099 return ret;
2100}
2101
2102
2103static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2104 struct hns3_desc_cb *res_cb)
2105{
2106 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2107 ring->desc_cb[i] = *res_cb;
2108 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2109 ring->desc[i].rx.bd_base_info = 0;
2110}
2111
2112static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2113{
2114 ring->desc_cb[i].reuse_flag = 0;
2115 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
2116 + ring->desc_cb[i].page_offset);
2117 ring->desc[i].rx.bd_base_info = 0;
2118}
2119
2120static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
2121 int *pkts)
2122{
2123 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2124
2125 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
2126 (*bytes) += desc_cb->length;
2127
2128 hns3_free_buffer_detach(ring, ring->next_to_clean);
2129
2130 ring_ptr_move_fw(ring, next_to_clean);
2131}
2132
2133static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
2134{
2135 int u = ring->next_to_use;
2136 int c = ring->next_to_clean;
2137
2138 if (unlikely(h > ring->desc_num))
2139 return 0;
2140
2141 return u > c ? (h > c && h <= u) : (h > c || h <= u);
2142}
2143
2144void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
2145{
2146 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2147 struct hns3_nic_priv *priv = netdev_priv(netdev);
2148 struct netdev_queue *dev_queue;
2149 int bytes, pkts;
2150 int head;
2151
2152 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2153 rmb();
2154
2155 if (is_ring_empty(ring) || head == ring->next_to_clean)
2156 return;
2157
2158 if (unlikely(!is_valid_clean_head(ring, head))) {
2159 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2160 ring->next_to_use, ring->next_to_clean);
2161
2162 u64_stats_update_begin(&ring->syncp);
2163 ring->stats.io_err_cnt++;
2164 u64_stats_update_end(&ring->syncp);
2165 return;
2166 }
2167
2168 bytes = 0;
2169 pkts = 0;
2170 while (head != ring->next_to_clean) {
2171 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
2172
2173 prefetch(&ring->desc_cb[ring->next_to_clean]);
2174 }
2175
2176 ring->tqp_vector->tx_group.total_bytes += bytes;
2177 ring->tqp_vector->tx_group.total_packets += pkts;
2178
2179 u64_stats_update_begin(&ring->syncp);
2180 ring->stats.tx_bytes += bytes;
2181 ring->stats.tx_pkts += pkts;
2182 u64_stats_update_end(&ring->syncp);
2183
2184 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2185 netdev_tx_completed_queue(dev_queue, pkts, bytes);
2186
2187 if (unlikely(pkts && netif_carrier_ok(netdev) &&
2188 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2189
2190
2191
2192 smp_mb();
2193 if (netif_tx_queue_stopped(dev_queue) &&
2194 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2195 netif_tx_wake_queue(dev_queue);
2196 ring->stats.restart_queue++;
2197 }
2198 }
2199}
2200
2201static int hns3_desc_unused(struct hns3_enet_ring *ring)
2202{
2203 int ntc = ring->next_to_clean;
2204 int ntu = ring->next_to_use;
2205
2206 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2207}
2208
2209static void
2210hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2211{
2212 struct hns3_desc_cb *desc_cb;
2213 struct hns3_desc_cb res_cbs;
2214 int i, ret;
2215
2216 for (i = 0; i < cleand_count; i++) {
2217 desc_cb = &ring->desc_cb[ring->next_to_use];
2218 if (desc_cb->reuse_flag) {
2219 u64_stats_update_begin(&ring->syncp);
2220 ring->stats.reuse_pg_cnt++;
2221 u64_stats_update_end(&ring->syncp);
2222
2223 hns3_reuse_buffer(ring, ring->next_to_use);
2224 } else {
2225 ret = hns3_reserve_buffer_map(ring, &res_cbs);
2226 if (ret) {
2227 u64_stats_update_begin(&ring->syncp);
2228 ring->stats.sw_err_cnt++;
2229 u64_stats_update_end(&ring->syncp);
2230
2231 netdev_err(ring->tqp->handle->kinfo.netdev,
2232 "hnae reserve buffer map failed.\n");
2233 break;
2234 }
2235 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2236 }
2237
2238 ring_ptr_move_fw(ring, next_to_use);
2239 }
2240
2241 wmb();
2242 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2243}
2244
2245static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2246 struct hns3_enet_ring *ring, int pull_len,
2247 struct hns3_desc_cb *desc_cb)
2248{
2249 struct hns3_desc *desc;
2250 u32 truesize;
2251 int size;
2252 int last_offset;
2253 bool twobufs;
2254
2255 twobufs = ((PAGE_SIZE < 8192) &&
2256 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2257
2258 desc = &ring->desc[ring->next_to_clean];
2259 size = le16_to_cpu(desc->rx.size);
2260
2261 truesize = hnae3_buf_size(ring);
2262
2263 if (!twobufs)
2264 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
2265
2266 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2267 size - pull_len, truesize);
2268
2269
2270 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2271 return;
2272
2273 if (twobufs) {
2274
2275 if (likely(page_count(desc_cb->priv) == 1)) {
2276
2277 desc_cb->page_offset ^= truesize;
2278
2279 desc_cb->reuse_flag = 1;
2280
2281 get_page(desc_cb->priv);
2282 }
2283 return;
2284 }
2285
2286
2287 desc_cb->page_offset += truesize;
2288
2289 if (desc_cb->page_offset <= last_offset) {
2290 desc_cb->reuse_flag = 1;
2291
2292 get_page(desc_cb->priv);
2293 }
2294}
2295
2296static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2297 struct hns3_desc *desc)
2298{
2299 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2300 int l3_type, l4_type;
2301 u32 bd_base_info;
2302 int ol4_type;
2303 u32 l234info;
2304
2305 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2306 l234info = le32_to_cpu(desc->rx.l234_info);
2307
2308 skb->ip_summed = CHECKSUM_NONE;
2309
2310 skb_checksum_none_assert(skb);
2311
2312 if (!(netdev->features & NETIF_F_RXCSUM))
2313 return;
2314
2315
2316 if (skb_shinfo(skb)->gso_size) {
2317 skb->ip_summed = CHECKSUM_UNNECESSARY;
2318 return;
2319 }
2320
2321
2322 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2323 return;
2324
2325 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
2326 BIT(HNS3_RXD_OL3E_B) |
2327 BIT(HNS3_RXD_OL4E_B)))) {
2328 u64_stats_update_begin(&ring->syncp);
2329 ring->stats.l3l4_csum_err++;
2330 u64_stats_update_end(&ring->syncp);
2331
2332 return;
2333 }
2334
2335 ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2336 HNS3_RXD_OL4ID_S);
2337 switch (ol4_type) {
2338 case HNS3_OL4_TYPE_MAC_IN_UDP:
2339 case HNS3_OL4_TYPE_NVGRE:
2340 skb->csum_level = 1;
2341
2342 case HNS3_OL4_TYPE_NO_TUN:
2343 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2344 HNS3_RXD_L3ID_S);
2345 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2346 HNS3_RXD_L4ID_S);
2347
2348
2349 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2350 l3_type == HNS3_L3_TYPE_IPV6) &&
2351 (l4_type == HNS3_L4_TYPE_UDP ||
2352 l4_type == HNS3_L4_TYPE_TCP ||
2353 l4_type == HNS3_L4_TYPE_SCTP))
2354 skb->ip_summed = CHECKSUM_UNNECESSARY;
2355 break;
2356 default:
2357 break;
2358 }
2359}
2360
2361static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2362{
2363 if (skb_has_frag_list(skb))
2364 napi_gro_flush(&ring->tqp_vector->napi, false);
2365
2366 napi_gro_receive(&ring->tqp_vector->napi, skb);
2367}
2368
2369static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2370 struct hns3_desc *desc, u32 l234info,
2371 u16 *vlan_tag)
2372{
2373 struct pci_dev *pdev = ring->tqp->handle->pdev;
2374
2375 if (pdev->revision == 0x20) {
2376 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2377 if (!(*vlan_tag & VLAN_VID_MASK))
2378 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2379
2380 return (*vlan_tag != 0);
2381 }
2382
2383#define HNS3_STRP_OUTER_VLAN 0x1
2384#define HNS3_STRP_INNER_VLAN 0x2
2385
2386 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2387 HNS3_RXD_STRP_TAGP_S)) {
2388 case HNS3_STRP_OUTER_VLAN:
2389 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2390 return true;
2391 case HNS3_STRP_INNER_VLAN:
2392 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2393 return true;
2394 default:
2395 return false;
2396 }
2397}
2398
2399static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
2400 unsigned char *va)
2401{
2402#define HNS3_NEED_ADD_FRAG 1
2403 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2404 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2405 struct sk_buff *skb;
2406
2407 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
2408 skb = ring->skb;
2409 if (unlikely(!skb)) {
2410 netdev_err(netdev, "alloc rx skb fail\n");
2411
2412 u64_stats_update_begin(&ring->syncp);
2413 ring->stats.sw_err_cnt++;
2414 u64_stats_update_end(&ring->syncp);
2415
2416 return -ENOMEM;
2417 }
2418
2419 prefetchw(skb->data);
2420
2421 ring->pending_buf = 1;
2422 ring->frag_num = 0;
2423 ring->tail_skb = NULL;
2424 if (length <= HNS3_RX_HEAD_SIZE) {
2425 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2426
2427
2428 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2429 desc_cb->reuse_flag = 1;
2430 else
2431 put_page(desc_cb->priv);
2432
2433 ring_ptr_move_fw(ring, next_to_clean);
2434 return 0;
2435 }
2436 u64_stats_update_begin(&ring->syncp);
2437 ring->stats.seg_pkt_cnt++;
2438 u64_stats_update_end(&ring->syncp);
2439
2440 ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2441 __skb_put(skb, ring->pull_len);
2442 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
2443 desc_cb);
2444 ring_ptr_move_fw(ring, next_to_clean);
2445
2446 return HNS3_NEED_ADD_FRAG;
2447}
2448
2449static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
2450 struct sk_buff **out_skb, bool pending)
2451{
2452 struct sk_buff *skb = *out_skb;
2453 struct sk_buff *head_skb = *out_skb;
2454 struct sk_buff *new_skb;
2455 struct hns3_desc_cb *desc_cb;
2456 struct hns3_desc *pre_desc;
2457 u32 bd_base_info;
2458 int pre_bd;
2459
2460
2461
2462
2463 if (pending) {
2464 pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
2465 ring->desc_num;
2466 pre_desc = &ring->desc[pre_bd];
2467 bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
2468 } else {
2469 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2470 }
2471
2472 while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2473 desc = &ring->desc[ring->next_to_clean];
2474 desc_cb = &ring->desc_cb[ring->next_to_clean];
2475 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2476
2477 dma_rmb();
2478 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
2479 return -ENXIO;
2480
2481 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
2482 new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2483 HNS3_RX_HEAD_SIZE);
2484 if (unlikely(!new_skb)) {
2485 netdev_err(ring->tqp->handle->kinfo.netdev,
2486 "alloc rx skb frag fail\n");
2487 return -ENXIO;
2488 }
2489 ring->frag_num = 0;
2490
2491 if (ring->tail_skb) {
2492 ring->tail_skb->next = new_skb;
2493 ring->tail_skb = new_skb;
2494 } else {
2495 skb_shinfo(skb)->frag_list = new_skb;
2496 ring->tail_skb = new_skb;
2497 }
2498 }
2499
2500 if (ring->tail_skb) {
2501 head_skb->truesize += hnae3_buf_size(ring);
2502 head_skb->data_len += le16_to_cpu(desc->rx.size);
2503 head_skb->len += le16_to_cpu(desc->rx.size);
2504 skb = ring->tail_skb;
2505 }
2506
2507 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
2508 ring_ptr_move_fw(ring, next_to_clean);
2509 ring->pending_buf++;
2510 }
2511
2512 return 0;
2513}
2514
2515static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
2516 u32 bd_base_info)
2517{
2518 u16 gro_count;
2519 u32 l3_type;
2520
2521 gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
2522 HNS3_RXD_GRO_COUNT_S);
2523
2524 if (!gro_count)
2525 return;
2526
2527
2528
2529
2530 NAPI_GRO_CB(skb)->count = gro_count;
2531
2532 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2533 HNS3_RXD_L3ID_S);
2534 if (l3_type == HNS3_L3_TYPE_IPV4)
2535 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2536 else if (l3_type == HNS3_L3_TYPE_IPV6)
2537 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2538 else
2539 return;
2540
2541 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
2542 HNS3_RXD_GRO_SIZE_M,
2543 HNS3_RXD_GRO_SIZE_S);
2544 if (skb_shinfo(skb)->gso_size)
2545 tcp_gro_complete(skb);
2546}
2547
2548static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2549 struct sk_buff *skb)
2550{
2551 struct hnae3_handle *handle = ring->tqp->handle;
2552 enum pkt_hash_types rss_type;
2553 struct hns3_desc *desc;
2554 int last_bd;
2555
2556
2557
2558
2559 last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
2560 desc = &ring->desc[last_bd];
2561
2562 if (le32_to_cpu(desc->rx.rss_hash))
2563 rss_type = handle->kinfo.rss_type;
2564 else
2565 rss_type = PKT_HASH_TYPE_NONE;
2566
2567 skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
2568}
2569
2570static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2571 struct sk_buff **out_skb)
2572{
2573 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2574 enum hns3_pkt_l2t_type l2_frame_type;
2575 struct sk_buff *skb = ring->skb;
2576 struct hns3_desc_cb *desc_cb;
2577 struct hns3_desc *desc;
2578 u32 bd_base_info;
2579 u32 l234info;
2580 int length;
2581 int ret;
2582
2583 desc = &ring->desc[ring->next_to_clean];
2584 desc_cb = &ring->desc_cb[ring->next_to_clean];
2585
2586 prefetch(desc);
2587
2588 length = le16_to_cpu(desc->rx.size);
2589 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2590
2591
2592 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2593 return -ENXIO;
2594
2595 if (!skb)
2596 ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2597
2598
2599
2600
2601
2602
2603
2604
2605 prefetch(ring->va);
2606#if L1_CACHE_BYTES < 128
2607 prefetch(ring->va + L1_CACHE_BYTES);
2608#endif
2609
2610 if (!skb) {
2611 ret = hns3_alloc_skb(ring, length, ring->va);
2612 *out_skb = skb = ring->skb;
2613
2614 if (ret < 0)
2615 return ret;
2616 if (ret > 0) {
2617 ret = hns3_add_frag(ring, desc, &skb, false);
2618 if (ret)
2619 return ret;
2620
2621
2622
2623
2624 memcpy(skb->data, ring->va,
2625 ALIGN(ring->pull_len, sizeof(long)));
2626 }
2627 } else {
2628 ret = hns3_add_frag(ring, desc, &skb, true);
2629 if (ret)
2630 return ret;
2631
2632
2633
2634
2635 memcpy(skb->data, ring->va,
2636 ALIGN(ring->pull_len, sizeof(long)));
2637 }
2638
2639 l234info = le32_to_cpu(desc->rx.l234_info);
2640 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2641
2642
2643
2644
2645
2646 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2647 u16 vlan_tag;
2648
2649 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2650 __vlan_hwaccel_put_tag(skb,
2651 htons(ETH_P_8021Q),
2652 vlan_tag);
2653 }
2654
2655 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
2656 u64_stats_update_begin(&ring->syncp);
2657 ring->stats.non_vld_descs++;
2658 u64_stats_update_end(&ring->syncp);
2659
2660 dev_kfree_skb_any(skb);
2661 return -EINVAL;
2662 }
2663
2664 if (unlikely((!desc->rx.pkt_len) ||
2665 (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
2666 BIT(HNS3_RXD_L2E_B))))) {
2667 u64_stats_update_begin(&ring->syncp);
2668 if (l234info & BIT(HNS3_RXD_L2E_B))
2669 ring->stats.l2_err++;
2670 else
2671 ring->stats.err_pkt_len++;
2672 u64_stats_update_end(&ring->syncp);
2673
2674 dev_kfree_skb_any(skb);
2675 return -EFAULT;
2676 }
2677
2678
2679 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
2680 HNS3_RXD_DMAC_S);
2681 u64_stats_update_begin(&ring->syncp);
2682 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
2683 ring->stats.rx_multicast++;
2684
2685 ring->stats.rx_pkts++;
2686 ring->stats.rx_bytes += skb->len;
2687 u64_stats_update_end(&ring->syncp);
2688
2689 ring->tqp_vector->rx_group.total_bytes += skb->len;
2690
2691
2692 hns3_set_gro_param(skb, l234info, bd_base_info);
2693
2694 hns3_rx_checksum(ring, skb, desc);
2695 *out_skb = skb;
2696 hns3_set_rx_skb_rss_type(ring, skb);
2697
2698 return 0;
2699}
2700
2701int hns3_clean_rx_ring(
2702 struct hns3_enet_ring *ring, int budget,
2703 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2704{
2705#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2706 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2707 int recv_pkts, recv_bds, clean_count, err;
2708 int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
2709 struct sk_buff *skb = ring->skb;
2710 int num;
2711
2712 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2713 rmb();
2714
2715 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2716 num -= unused_count;
2717
2718 while (recv_pkts < budget && recv_bds < num) {
2719
2720 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2721 hns3_nic_alloc_rx_buffers(ring,
2722 clean_count + unused_count);
2723 clean_count = 0;
2724 unused_count = hns3_desc_unused(ring) -
2725 ring->pending_buf;
2726 }
2727
2728
2729 err = hns3_handle_rx_bd(ring, &skb);
2730 if (unlikely(!skb))
2731 goto out;
2732
2733 if (err == -ENXIO) {
2734 goto out;
2735 } else if (unlikely(err)) {
2736 recv_bds += ring->pending_buf;
2737 clean_count += ring->pending_buf;
2738 ring->skb = NULL;
2739 ring->pending_buf = 0;
2740 continue;
2741 }
2742
2743
2744 skb->protocol = eth_type_trans(skb, netdev);
2745 rx_fn(ring, skb);
2746 recv_bds += ring->pending_buf;
2747 clean_count += ring->pending_buf;
2748 ring->skb = NULL;
2749 ring->pending_buf = 0;
2750
2751 recv_pkts++;
2752 }
2753
2754out:
2755
2756 if (clean_count + unused_count > 0)
2757 hns3_nic_alloc_rx_buffers(ring,
2758 clean_count + unused_count);
2759
2760 return recv_pkts;
2761}
2762
2763static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2764{
2765 struct hns3_enet_tqp_vector *tqp_vector =
2766 ring_group->ring->tqp_vector;
2767 enum hns3_flow_level_range new_flow_level;
2768 int packets_per_msecs;
2769 int bytes_per_msecs;
2770 u32 time_passed_ms;
2771 u16 new_int_gl;
2772
2773 if (!tqp_vector->last_jiffies)
2774 return false;
2775
2776 if (ring_group->total_packets == 0) {
2777 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2778 ring_group->coal.flow_level = HNS3_FLOW_LOW;
2779 return true;
2780 }
2781
2782
2783
2784
2785
2786
2787
2788 new_flow_level = ring_group->coal.flow_level;
2789 new_int_gl = ring_group->coal.int_gl;
2790 time_passed_ms =
2791 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2792
2793 if (!time_passed_ms)
2794 return false;
2795
2796 do_div(ring_group->total_packets, time_passed_ms);
2797 packets_per_msecs = ring_group->total_packets;
2798
2799 do_div(ring_group->total_bytes, time_passed_ms);
2800 bytes_per_msecs = ring_group->total_bytes;
2801
2802#define HNS3_RX_LOW_BYTE_RATE 10000
2803#define HNS3_RX_MID_BYTE_RATE 20000
2804
2805 switch (new_flow_level) {
2806 case HNS3_FLOW_LOW:
2807 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2808 new_flow_level = HNS3_FLOW_MID;
2809 break;
2810 case HNS3_FLOW_MID:
2811 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2812 new_flow_level = HNS3_FLOW_HIGH;
2813 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2814 new_flow_level = HNS3_FLOW_LOW;
2815 break;
2816 case HNS3_FLOW_HIGH:
2817 case HNS3_FLOW_ULTRA:
2818 default:
2819 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2820 new_flow_level = HNS3_FLOW_MID;
2821 break;
2822 }
2823
2824#define HNS3_RX_ULTRA_PACKET_RATE 40
2825
2826 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2827 &tqp_vector->rx_group == ring_group)
2828 new_flow_level = HNS3_FLOW_ULTRA;
2829
2830 switch (new_flow_level) {
2831 case HNS3_FLOW_LOW:
2832 new_int_gl = HNS3_INT_GL_50K;
2833 break;
2834 case HNS3_FLOW_MID:
2835 new_int_gl = HNS3_INT_GL_20K;
2836 break;
2837 case HNS3_FLOW_HIGH:
2838 new_int_gl = HNS3_INT_GL_18K;
2839 break;
2840 case HNS3_FLOW_ULTRA:
2841 new_int_gl = HNS3_INT_GL_8K;
2842 break;
2843 default:
2844 break;
2845 }
2846
2847 ring_group->total_bytes = 0;
2848 ring_group->total_packets = 0;
2849 ring_group->coal.flow_level = new_flow_level;
2850 if (new_int_gl != ring_group->coal.int_gl) {
2851 ring_group->coal.int_gl = new_int_gl;
2852 return true;
2853 }
2854 return false;
2855}
2856
2857static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2858{
2859 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2860 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2861 bool rx_update, tx_update;
2862
2863
2864 if (time_before(jiffies,
2865 tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
2866 return;
2867
2868 if (rx_group->coal.gl_adapt_enable) {
2869 rx_update = hns3_get_new_int_gl(rx_group);
2870 if (rx_update)
2871 hns3_set_vector_coalesce_rx_gl(tqp_vector,
2872 rx_group->coal.int_gl);
2873 }
2874
2875 if (tx_group->coal.gl_adapt_enable) {
2876 tx_update = hns3_get_new_int_gl(tx_group);
2877 if (tx_update)
2878 hns3_set_vector_coalesce_tx_gl(tqp_vector,
2879 tx_group->coal.int_gl);
2880 }
2881
2882 tqp_vector->last_jiffies = jiffies;
2883}
2884
2885static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2886{
2887 struct hns3_nic_priv *priv = netdev_priv(napi->dev);
2888 struct hns3_enet_ring *ring;
2889 int rx_pkt_total = 0;
2890
2891 struct hns3_enet_tqp_vector *tqp_vector =
2892 container_of(napi, struct hns3_enet_tqp_vector, napi);
2893 bool clean_complete = true;
2894 int rx_budget;
2895
2896 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
2897 napi_complete(napi);
2898 return 0;
2899 }
2900
2901
2902
2903
2904 hns3_for_each_ring(ring, tqp_vector->tx_group)
2905 hns3_clean_tx_ring(ring);
2906
2907
2908 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2909
2910 hns3_for_each_ring(ring, tqp_vector->rx_group) {
2911 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2912 hns3_rx_skb);
2913
2914 if (rx_cleaned >= rx_budget)
2915 clean_complete = false;
2916
2917 rx_pkt_total += rx_cleaned;
2918 }
2919
2920 tqp_vector->rx_group.total_packets += rx_pkt_total;
2921
2922 if (!clean_complete)
2923 return budget;
2924
2925 if (napi_complete(napi) &&
2926 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
2927 hns3_update_new_int_gl(tqp_vector);
2928 hns3_mask_vector_irq(tqp_vector, 1);
2929 }
2930
2931 return rx_pkt_total;
2932}
2933
2934static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2935 struct hnae3_ring_chain_node *head)
2936{
2937 struct pci_dev *pdev = tqp_vector->handle->pdev;
2938 struct hnae3_ring_chain_node *cur_chain = head;
2939 struct hnae3_ring_chain_node *chain;
2940 struct hns3_enet_ring *tx_ring;
2941 struct hns3_enet_ring *rx_ring;
2942
2943 tx_ring = tqp_vector->tx_group.ring;
2944 if (tx_ring) {
2945 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2946 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2947 HNAE3_RING_TYPE_TX);
2948 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2949 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2950
2951 cur_chain->next = NULL;
2952
2953 while (tx_ring->next) {
2954 tx_ring = tx_ring->next;
2955
2956 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2957 GFP_KERNEL);
2958 if (!chain)
2959 goto err_free_chain;
2960
2961 cur_chain->next = chain;
2962 chain->tqp_index = tx_ring->tqp->tqp_index;
2963 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2964 HNAE3_RING_TYPE_TX);
2965 hnae3_set_field(chain->int_gl_idx,
2966 HNAE3_RING_GL_IDX_M,
2967 HNAE3_RING_GL_IDX_S,
2968 HNAE3_RING_GL_TX);
2969
2970 cur_chain = chain;
2971 }
2972 }
2973
2974 rx_ring = tqp_vector->rx_group.ring;
2975 if (!tx_ring && rx_ring) {
2976 cur_chain->next = NULL;
2977 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2978 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2979 HNAE3_RING_TYPE_RX);
2980 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2981 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2982
2983 rx_ring = rx_ring->next;
2984 }
2985
2986 while (rx_ring) {
2987 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2988 if (!chain)
2989 goto err_free_chain;
2990
2991 cur_chain->next = chain;
2992 chain->tqp_index = rx_ring->tqp->tqp_index;
2993 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2994 HNAE3_RING_TYPE_RX);
2995 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2996 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2997
2998 cur_chain = chain;
2999
3000 rx_ring = rx_ring->next;
3001 }
3002
3003 return 0;
3004
3005err_free_chain:
3006 cur_chain = head->next;
3007 while (cur_chain) {
3008 chain = cur_chain->next;
3009 devm_kfree(&pdev->dev, cur_chain);
3010 cur_chain = chain;
3011 }
3012 head->next = NULL;
3013
3014 return -ENOMEM;
3015}
3016
3017static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3018 struct hnae3_ring_chain_node *head)
3019{
3020 struct pci_dev *pdev = tqp_vector->handle->pdev;
3021 struct hnae3_ring_chain_node *chain_tmp, *chain;
3022
3023 chain = head->next;
3024
3025 while (chain) {
3026 chain_tmp = chain->next;
3027 devm_kfree(&pdev->dev, chain);
3028 chain = chain_tmp;
3029 }
3030}
3031
3032static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
3033 struct hns3_enet_ring *ring)
3034{
3035 ring->next = group->ring;
3036 group->ring = ring;
3037
3038 group->count++;
3039}
3040
3041static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
3042{
3043 struct pci_dev *pdev = priv->ae_handle->pdev;
3044 struct hns3_enet_tqp_vector *tqp_vector;
3045 int num_vectors = priv->vector_num;
3046 int numa_node;
3047 int vector_i;
3048
3049 numa_node = dev_to_node(&pdev->dev);
3050
3051 for (vector_i = 0; vector_i < num_vectors; vector_i++) {
3052 tqp_vector = &priv->tqp_vector[vector_i];
3053 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
3054 &tqp_vector->affinity_mask);
3055 }
3056}
3057
3058static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
3059{
3060 struct hnae3_ring_chain_node vector_ring_chain;
3061 struct hnae3_handle *h = priv->ae_handle;
3062 struct hns3_enet_tqp_vector *tqp_vector;
3063 int ret = 0;
3064 int i;
3065
3066 hns3_nic_set_cpumask(priv);
3067
3068 for (i = 0; i < priv->vector_num; i++) {
3069 tqp_vector = &priv->tqp_vector[i];
3070 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
3071 tqp_vector->num_tqps = 0;
3072 }
3073
3074 for (i = 0; i < h->kinfo.num_tqps; i++) {
3075 u16 vector_i = i % priv->vector_num;
3076 u16 tqp_num = h->kinfo.num_tqps;
3077
3078 tqp_vector = &priv->tqp_vector[vector_i];
3079
3080 hns3_add_ring_to_group(&tqp_vector->tx_group,
3081 priv->ring_data[i].ring);
3082
3083 hns3_add_ring_to_group(&tqp_vector->rx_group,
3084 priv->ring_data[i + tqp_num].ring);
3085
3086 priv->ring_data[i].ring->tqp_vector = tqp_vector;
3087 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
3088 tqp_vector->num_tqps++;
3089 }
3090
3091 for (i = 0; i < priv->vector_num; i++) {
3092 tqp_vector = &priv->tqp_vector[i];
3093
3094 tqp_vector->rx_group.total_bytes = 0;
3095 tqp_vector->rx_group.total_packets = 0;
3096 tqp_vector->tx_group.total_bytes = 0;
3097 tqp_vector->tx_group.total_packets = 0;
3098 tqp_vector->handle = h;
3099
3100 ret = hns3_get_vector_ring_chain(tqp_vector,
3101 &vector_ring_chain);
3102 if (ret)
3103 goto map_ring_fail;
3104
3105 ret = h->ae_algo->ops->map_ring_to_vector(h,
3106 tqp_vector->vector_irq, &vector_ring_chain);
3107
3108 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3109
3110 if (ret)
3111 goto map_ring_fail;
3112
3113 netif_napi_add(priv->netdev, &tqp_vector->napi,
3114 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
3115 }
3116
3117 return 0;
3118
3119map_ring_fail:
3120 while (i--)
3121 netif_napi_del(&priv->tqp_vector[i].napi);
3122
3123 return ret;
3124}
3125
3126static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
3127{
3128#define HNS3_VECTOR_PF_MAX_NUM 64
3129
3130 struct hnae3_handle *h = priv->ae_handle;
3131 struct hns3_enet_tqp_vector *tqp_vector;
3132 struct hnae3_vector_info *vector;
3133 struct pci_dev *pdev = h->pdev;
3134 u16 tqp_num = h->kinfo.num_tqps;
3135 u16 vector_num;
3136 int ret = 0;
3137 u16 i;
3138
3139
3140
3141 vector_num = min_t(u16, num_online_cpus(), tqp_num);
3142 vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
3143
3144 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
3145 GFP_KERNEL);
3146 if (!vector)
3147 return -ENOMEM;
3148
3149 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
3150
3151 priv->vector_num = vector_num;
3152 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
3153 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
3154 GFP_KERNEL);
3155 if (!priv->tqp_vector) {
3156 ret = -ENOMEM;
3157 goto out;
3158 }
3159
3160 for (i = 0; i < priv->vector_num; i++) {
3161 tqp_vector = &priv->tqp_vector[i];
3162 tqp_vector->idx = i;
3163 tqp_vector->mask_addr = vector[i].io_addr;
3164 tqp_vector->vector_irq = vector[i].vector;
3165 hns3_vector_gl_rl_init(tqp_vector, priv);
3166 }
3167
3168out:
3169 devm_kfree(&pdev->dev, vector);
3170 return ret;
3171}
3172
3173static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
3174{
3175 group->ring = NULL;
3176 group->count = 0;
3177}
3178
3179static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
3180{
3181 struct hnae3_ring_chain_node vector_ring_chain;
3182 struct hnae3_handle *h = priv->ae_handle;
3183 struct hns3_enet_tqp_vector *tqp_vector;
3184 int i;
3185
3186 for (i = 0; i < priv->vector_num; i++) {
3187 tqp_vector = &priv->tqp_vector[i];
3188
3189 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
3190 continue;
3191
3192 hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
3193
3194 h->ae_algo->ops->unmap_ring_from_vector(h,
3195 tqp_vector->vector_irq, &vector_ring_chain);
3196
3197 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3198
3199 if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
3200 irq_set_affinity_notifier(tqp_vector->vector_irq,
3201 NULL);
3202 irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
3203 free_irq(tqp_vector->vector_irq, tqp_vector);
3204 tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
3205 }
3206
3207 hns3_clear_ring_group(&tqp_vector->rx_group);
3208 hns3_clear_ring_group(&tqp_vector->tx_group);
3209 netif_napi_del(&priv->tqp_vector[i].napi);
3210 }
3211}
3212
3213static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
3214{
3215 struct hnae3_handle *h = priv->ae_handle;
3216 struct pci_dev *pdev = h->pdev;
3217 int i, ret;
3218
3219 for (i = 0; i < priv->vector_num; i++) {
3220 struct hns3_enet_tqp_vector *tqp_vector;
3221
3222 tqp_vector = &priv->tqp_vector[i];
3223 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
3224 if (ret)
3225 return ret;
3226 }
3227
3228 devm_kfree(&pdev->dev, priv->tqp_vector);
3229 return 0;
3230}
3231
3232static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
3233 int ring_type)
3234{
3235 struct hns3_nic_ring_data *ring_data = priv->ring_data;
3236 int queue_num = priv->ae_handle->kinfo.num_tqps;
3237 struct pci_dev *pdev = priv->ae_handle->pdev;
3238 struct hns3_enet_ring *ring;
3239 int desc_num;
3240
3241 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
3242 if (!ring)
3243 return -ENOMEM;
3244
3245 if (ring_type == HNAE3_RING_TYPE_TX) {
3246 desc_num = priv->ae_handle->kinfo.num_tx_desc;
3247 ring_data[q->tqp_index].ring = ring;
3248 ring_data[q->tqp_index].queue_index = q->tqp_index;
3249 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
3250 } else {
3251 desc_num = priv->ae_handle->kinfo.num_rx_desc;
3252 ring_data[q->tqp_index + queue_num].ring = ring;
3253 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
3254 ring->io_base = q->io_base;
3255 }
3256
3257 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
3258
3259 ring->tqp = q;
3260 ring->desc = NULL;
3261 ring->desc_cb = NULL;
3262 ring->dev = priv->dev;
3263 ring->desc_dma_addr = 0;
3264 ring->buf_size = q->buf_size;
3265 ring->desc_num = desc_num;
3266 ring->next_to_use = 0;
3267 ring->next_to_clean = 0;
3268
3269 return 0;
3270}
3271
3272static int hns3_queue_to_ring(struct hnae3_queue *tqp,
3273 struct hns3_nic_priv *priv)
3274{
3275 int ret;
3276
3277 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
3278 if (ret)
3279 return ret;
3280
3281 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3282 if (ret) {
3283 devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
3284 return ret;
3285 }
3286
3287 return 0;
3288}
3289
3290static int hns3_get_ring_config(struct hns3_nic_priv *priv)
3291{
3292 struct hnae3_handle *h = priv->ae_handle;
3293 struct pci_dev *pdev = h->pdev;
3294 int i, ret;
3295
3296 priv->ring_data = devm_kzalloc(&pdev->dev,
3297 array3_size(h->kinfo.num_tqps,
3298 sizeof(*priv->ring_data),
3299 2),
3300 GFP_KERNEL);
3301 if (!priv->ring_data)
3302 return -ENOMEM;
3303
3304 for (i = 0; i < h->kinfo.num_tqps; i++) {
3305 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
3306 if (ret)
3307 goto err;
3308 }
3309
3310 return 0;
3311err:
3312 while (i--) {
3313 devm_kfree(priv->dev, priv->ring_data[i].ring);
3314 devm_kfree(priv->dev,
3315 priv->ring_data[i + h->kinfo.num_tqps].ring);
3316 }
3317
3318 devm_kfree(&pdev->dev, priv->ring_data);
3319 return ret;
3320}
3321
3322static void hns3_put_ring_config(struct hns3_nic_priv *priv)
3323{
3324 struct hnae3_handle *h = priv->ae_handle;
3325 int i;
3326
3327 for (i = 0; i < h->kinfo.num_tqps; i++) {
3328 devm_kfree(priv->dev, priv->ring_data[i].ring);
3329 devm_kfree(priv->dev,
3330 priv->ring_data[i + h->kinfo.num_tqps].ring);
3331 }
3332 devm_kfree(priv->dev, priv->ring_data);
3333}
3334
3335static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
3336{
3337 int ret;
3338
3339 if (ring->desc_num <= 0 || ring->buf_size <= 0)
3340 return -EINVAL;
3341
3342 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
3343 GFP_KERNEL);
3344 if (!ring->desc_cb) {
3345 ret = -ENOMEM;
3346 goto out;
3347 }
3348
3349 ret = hns3_alloc_desc(ring);
3350 if (ret)
3351 goto out_with_desc_cb;
3352
3353 if (!HNAE3_IS_TX_RING(ring)) {
3354 ret = hns3_alloc_ring_buffers(ring);
3355 if (ret)
3356 goto out_with_desc;
3357 }
3358
3359 return 0;
3360
3361out_with_desc:
3362 hns3_free_desc(ring);
3363out_with_desc_cb:
3364 kfree(ring->desc_cb);
3365 ring->desc_cb = NULL;
3366out:
3367 return ret;
3368}
3369
3370static void hns3_fini_ring(struct hns3_enet_ring *ring)
3371{
3372 hns3_free_desc(ring);
3373 kfree(ring->desc_cb);
3374 ring->desc_cb = NULL;
3375 ring->next_to_clean = 0;
3376 ring->next_to_use = 0;
3377 ring->pending_buf = 0;
3378 if (ring->skb) {
3379 dev_kfree_skb_any(ring->skb);
3380 ring->skb = NULL;
3381 }
3382}
3383
3384static int hns3_buf_size2type(u32 buf_size)
3385{
3386 int bd_size_type;
3387
3388 switch (buf_size) {
3389 case 512:
3390 bd_size_type = HNS3_BD_SIZE_512_TYPE;
3391 break;
3392 case 1024:
3393 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3394 break;
3395 case 2048:
3396 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3397 break;
3398 case 4096:
3399 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3400 break;
3401 default:
3402 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3403 }
3404
3405 return bd_size_type;
3406}
3407
3408static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3409{
3410 dma_addr_t dma = ring->desc_dma_addr;
3411 struct hnae3_queue *q = ring->tqp;
3412
3413 if (!HNAE3_IS_TX_RING(ring)) {
3414 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3415 (u32)dma);
3416 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3417 (u32)((dma >> 31) >> 1));
3418
3419 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3420 hns3_buf_size2type(ring->buf_size));
3421 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3422 ring->desc_num / 8 - 1);
3423
3424 } else {
3425 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3426 (u32)dma);
3427 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3428 (u32)((dma >> 31) >> 1));
3429
3430 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3431 ring->desc_num / 8 - 1);
3432 }
3433}
3434
3435static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3436{
3437 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3438 int i;
3439
3440 for (i = 0; i < HNAE3_MAX_TC; i++) {
3441 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3442 int j;
3443
3444 if (!tc_info->enable)
3445 continue;
3446
3447 for (j = 0; j < tc_info->tqp_count; j++) {
3448 struct hnae3_queue *q;
3449
3450 q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3451 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3452 tc_info->tc);
3453 }
3454 }
3455}
3456
3457int hns3_init_all_ring(struct hns3_nic_priv *priv)
3458{
3459 struct hnae3_handle *h = priv->ae_handle;
3460 int ring_num = h->kinfo.num_tqps * 2;
3461 int i, j;
3462 int ret;
3463
3464 for (i = 0; i < ring_num; i++) {
3465 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3466 if (ret) {
3467 dev_err(priv->dev,
3468 "Alloc ring memory fail! ret=%d\n", ret);
3469 goto out_when_alloc_ring_memory;
3470 }
3471
3472 u64_stats_init(&priv->ring_data[i].ring->syncp);
3473 }
3474
3475 return 0;
3476
3477out_when_alloc_ring_memory:
3478 for (j = i - 1; j >= 0; j--)
3479 hns3_fini_ring(priv->ring_data[j].ring);
3480
3481 return -ENOMEM;
3482}
3483
3484int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3485{
3486 struct hnae3_handle *h = priv->ae_handle;
3487 int i;
3488
3489 for (i = 0; i < h->kinfo.num_tqps; i++) {
3490 hns3_fini_ring(priv->ring_data[i].ring);
3491 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3492 }
3493 return 0;
3494}
3495
3496
3497static int hns3_init_mac_addr(struct net_device *netdev, bool init)
3498{
3499 struct hns3_nic_priv *priv = netdev_priv(netdev);
3500 struct hnae3_handle *h = priv->ae_handle;
3501 u8 mac_addr_temp[ETH_ALEN];
3502 int ret = 0;
3503
3504 if (h->ae_algo->ops->get_mac_addr && init) {
3505 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3506 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3507 }
3508
3509
3510 if (!is_valid_ether_addr(netdev->dev_addr)) {
3511 eth_hw_addr_random(netdev);
3512 dev_warn(priv->dev, "using random MAC address %pM\n",
3513 netdev->dev_addr);
3514 }
3515
3516 if (h->ae_algo->ops->set_mac_addr)
3517 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3518
3519 return ret;
3520}
3521
3522static int hns3_init_phy(struct net_device *netdev)
3523{
3524 struct hnae3_handle *h = hns3_get_handle(netdev);
3525 int ret = 0;
3526
3527 if (h->ae_algo->ops->mac_connect_phy)
3528 ret = h->ae_algo->ops->mac_connect_phy(h);
3529
3530 return ret;
3531}
3532
3533static void hns3_uninit_phy(struct net_device *netdev)
3534{
3535 struct hnae3_handle *h = hns3_get_handle(netdev);
3536
3537 if (h->ae_algo->ops->mac_disconnect_phy)
3538 h->ae_algo->ops->mac_disconnect_phy(h);
3539}
3540
3541static int hns3_restore_fd_rules(struct net_device *netdev)
3542{
3543 struct hnae3_handle *h = hns3_get_handle(netdev);
3544 int ret = 0;
3545
3546 if (h->ae_algo->ops->restore_fd_rules)
3547 ret = h->ae_algo->ops->restore_fd_rules(h);
3548
3549 return ret;
3550}
3551
3552static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3553{
3554 struct hnae3_handle *h = hns3_get_handle(netdev);
3555
3556 if (h->ae_algo->ops->del_all_fd_entries)
3557 h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3558}
3559
3560static void hns3_nic_set_priv_ops(struct net_device *netdev)
3561{
3562 struct hns3_nic_priv *priv = netdev_priv(netdev);
3563
3564 if ((netdev->features & NETIF_F_TSO) ||
3565 (netdev->features & NETIF_F_TSO6))
3566 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3567 else
3568 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3569}
3570
3571static int hns3_client_start(struct hnae3_handle *handle)
3572{
3573 if (!handle->ae_algo->ops->client_start)
3574 return 0;
3575
3576 return handle->ae_algo->ops->client_start(handle);
3577}
3578
3579static void hns3_client_stop(struct hnae3_handle *handle)
3580{
3581 if (!handle->ae_algo->ops->client_stop)
3582 return;
3583
3584 handle->ae_algo->ops->client_stop(handle);
3585}
3586
3587static int hns3_client_init(struct hnae3_handle *handle)
3588{
3589 struct pci_dev *pdev = handle->pdev;
3590 u16 alloc_tqps, max_rss_size;
3591 struct hns3_nic_priv *priv;
3592 struct net_device *netdev;
3593 int ret;
3594
3595 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3596 &max_rss_size);
3597 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3598 if (!netdev)
3599 return -ENOMEM;
3600
3601 priv = netdev_priv(netdev);
3602 priv->dev = &pdev->dev;
3603 priv->netdev = netdev;
3604 priv->ae_handle = handle;
3605 priv->tx_timeout_count = 0;
3606 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
3607
3608 handle->kinfo.netdev = netdev;
3609 handle->priv = (void *)priv;
3610
3611 hns3_init_mac_addr(netdev, true);
3612
3613 hns3_set_default_feature(netdev);
3614
3615 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3616 netdev->priv_flags |= IFF_UNICAST_FLT;
3617 netdev->netdev_ops = &hns3_nic_netdev_ops;
3618 SET_NETDEV_DEV(netdev, &pdev->dev);
3619 hns3_ethtool_set_ops(netdev);
3620 hns3_nic_set_priv_ops(netdev);
3621
3622
3623 netif_carrier_off(netdev);
3624
3625 ret = hns3_get_ring_config(priv);
3626 if (ret) {
3627 ret = -ENOMEM;
3628 goto out_get_ring_cfg;
3629 }
3630
3631 ret = hns3_nic_alloc_vector_data(priv);
3632 if (ret) {
3633 ret = -ENOMEM;
3634 goto out_alloc_vector_data;
3635 }
3636
3637 ret = hns3_nic_init_vector_data(priv);
3638 if (ret) {
3639 ret = -ENOMEM;
3640 goto out_init_vector_data;
3641 }
3642
3643 ret = hns3_init_all_ring(priv);
3644 if (ret) {
3645 ret = -ENOMEM;
3646 goto out_init_ring_data;
3647 }
3648
3649 ret = hns3_init_phy(netdev);
3650 if (ret)
3651 goto out_init_phy;
3652
3653 ret = register_netdev(netdev);
3654 if (ret) {
3655 dev_err(priv->dev, "probe register netdev fail!\n");
3656 goto out_reg_netdev_fail;
3657 }
3658
3659 ret = hns3_client_start(handle);
3660 if (ret) {
3661 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
3662 goto out_client_start;
3663 }
3664
3665 hns3_dcbnl_setup(handle);
3666
3667 hns3_dbg_init(handle);
3668
3669
3670 netdev->max_mtu = HNS3_MAX_MTU;
3671
3672 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
3673
3674 return ret;
3675
3676out_client_start:
3677 unregister_netdev(netdev);
3678out_reg_netdev_fail:
3679 hns3_uninit_phy(netdev);
3680out_init_phy:
3681 hns3_uninit_all_ring(priv);
3682out_init_ring_data:
3683 hns3_nic_uninit_vector_data(priv);
3684out_init_vector_data:
3685 hns3_nic_dealloc_vector_data(priv);
3686out_alloc_vector_data:
3687 priv->ring_data = NULL;
3688out_get_ring_cfg:
3689 priv->ae_handle = NULL;
3690 free_netdev(netdev);
3691 return ret;
3692}
3693
3694static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3695{
3696 struct net_device *netdev = handle->kinfo.netdev;
3697 struct hns3_nic_priv *priv = netdev_priv(netdev);
3698 int ret;
3699
3700 hns3_client_stop(handle);
3701
3702 hns3_remove_hw_addr(netdev);
3703
3704 if (netdev->reg_state != NETREG_UNINITIALIZED)
3705 unregister_netdev(netdev);
3706
3707 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
3708 netdev_warn(netdev, "already uninitialized\n");
3709 goto out_netdev_free;
3710 }
3711
3712 hns3_del_all_fd_rules(netdev, true);
3713
3714 hns3_force_clear_all_rx_ring(handle);
3715
3716 hns3_uninit_phy(netdev);
3717
3718 hns3_nic_uninit_vector_data(priv);
3719
3720 ret = hns3_nic_dealloc_vector_data(priv);
3721 if (ret)
3722 netdev_err(netdev, "dealloc vector error\n");
3723
3724 ret = hns3_uninit_all_ring(priv);
3725 if (ret)
3726 netdev_err(netdev, "uninit ring error\n");
3727
3728 hns3_put_ring_config(priv);
3729
3730 hns3_dbg_uninit(handle);
3731
3732 priv->ring_data = NULL;
3733
3734out_netdev_free:
3735 free_netdev(netdev);
3736}
3737
3738static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3739{
3740 struct net_device *netdev = handle->kinfo.netdev;
3741
3742 if (!netdev)
3743 return;
3744
3745 if (linkup) {
3746 netif_carrier_on(netdev);
3747 netif_tx_wake_all_queues(netdev);
3748 netdev_info(netdev, "link up\n");
3749 } else {
3750 netif_carrier_off(netdev);
3751 netif_tx_stop_all_queues(netdev);
3752 netdev_info(netdev, "link down\n");
3753 }
3754}
3755
3756static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3757{
3758 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3759 struct net_device *ndev = kinfo->netdev;
3760
3761 if (tc > HNAE3_MAX_TC)
3762 return -EINVAL;
3763
3764 if (!ndev)
3765 return -ENODEV;
3766
3767 return hns3_nic_set_real_num_queue(ndev);
3768}
3769
3770static int hns3_recover_hw_addr(struct net_device *ndev)
3771{
3772 struct netdev_hw_addr_list *list;
3773 struct netdev_hw_addr *ha, *tmp;
3774 int ret = 0;
3775
3776
3777 list = &ndev->uc;
3778 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3779 ret = hns3_nic_uc_sync(ndev, ha->addr);
3780 if (ret)
3781 return ret;
3782 }
3783
3784
3785 list = &ndev->mc;
3786 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3787 ret = hns3_nic_mc_sync(ndev, ha->addr);
3788 if (ret)
3789 return ret;
3790 }
3791
3792 return ret;
3793}
3794
3795static void hns3_remove_hw_addr(struct net_device *netdev)
3796{
3797 struct netdev_hw_addr_list *list;
3798 struct netdev_hw_addr *ha, *tmp;
3799
3800 hns3_nic_uc_unsync(netdev, netdev->dev_addr);
3801
3802
3803 list = &netdev->uc;
3804 list_for_each_entry_safe(ha, tmp, &list->list, list)
3805 hns3_nic_uc_unsync(netdev, ha->addr);
3806
3807
3808 list = &netdev->mc;
3809 list_for_each_entry_safe(ha, tmp, &list->list, list)
3810 if (ha->refcount > 1)
3811 hns3_nic_mc_unsync(netdev, ha->addr);
3812}
3813
3814static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
3815{
3816 while (ring->next_to_clean != ring->next_to_use) {
3817 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
3818 hns3_free_buffer_detach(ring, ring->next_to_clean);
3819 ring_ptr_move_fw(ring, next_to_clean);
3820 }
3821}
3822
3823static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3824{
3825 struct hns3_desc_cb res_cbs;
3826 int ret;
3827
3828 while (ring->next_to_use != ring->next_to_clean) {
3829
3830
3831
3832
3833 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3834 ret = hns3_reserve_buffer_map(ring, &res_cbs);
3835 if (ret) {
3836 u64_stats_update_begin(&ring->syncp);
3837 ring->stats.sw_err_cnt++;
3838 u64_stats_update_end(&ring->syncp);
3839
3840
3841
3842 netdev_warn(ring->tqp->handle->kinfo.netdev,
3843 "reserve buffer map failed, ret = %d\n",
3844 ret);
3845 return ret;
3846 }
3847 hns3_replace_buffer(ring, ring->next_to_use,
3848 &res_cbs);
3849 }
3850 ring_ptr_move_fw(ring, next_to_use);
3851 }
3852
3853 return 0;
3854}
3855
3856static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
3857{
3858 while (ring->next_to_use != ring->next_to_clean) {
3859
3860
3861
3862
3863 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3864 hns3_unmap_buffer(ring,
3865 &ring->desc_cb[ring->next_to_use]);
3866 ring->desc_cb[ring->next_to_use].dma = 0;
3867 }
3868
3869 ring_ptr_move_fw(ring, next_to_use);
3870 }
3871}
3872
3873static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
3874{
3875 struct net_device *ndev = h->kinfo.netdev;
3876 struct hns3_nic_priv *priv = netdev_priv(ndev);
3877 struct hns3_enet_ring *ring;
3878 u32 i;
3879
3880 for (i = 0; i < h->kinfo.num_tqps; i++) {
3881 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3882 hns3_force_clear_rx_ring(ring);
3883 }
3884}
3885
3886static void hns3_clear_all_ring(struct hnae3_handle *h)
3887{
3888 struct net_device *ndev = h->kinfo.netdev;
3889 struct hns3_nic_priv *priv = netdev_priv(ndev);
3890 u32 i;
3891
3892 for (i = 0; i < h->kinfo.num_tqps; i++) {
3893 struct netdev_queue *dev_queue;
3894 struct hns3_enet_ring *ring;
3895
3896 ring = priv->ring_data[i].ring;
3897 hns3_clear_tx_ring(ring);
3898 dev_queue = netdev_get_tx_queue(ndev,
3899 priv->ring_data[i].queue_index);
3900 netdev_tx_reset_queue(dev_queue);
3901
3902 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3903
3904
3905
3906 hns3_clear_rx_ring(ring);
3907 }
3908}
3909
3910int hns3_nic_reset_all_ring(struct hnae3_handle *h)
3911{
3912 struct net_device *ndev = h->kinfo.netdev;
3913 struct hns3_nic_priv *priv = netdev_priv(ndev);
3914 struct hns3_enet_ring *rx_ring;
3915 int i, j;
3916 int ret;
3917
3918 for (i = 0; i < h->kinfo.num_tqps; i++) {
3919 ret = h->ae_algo->ops->reset_queue(h, i);
3920 if (ret)
3921 return ret;
3922
3923 hns3_init_ring_hw(priv->ring_data[i].ring);
3924
3925
3926
3927
3928 hns3_clear_tx_ring(priv->ring_data[i].ring);
3929 priv->ring_data[i].ring->next_to_clean = 0;
3930 priv->ring_data[i].ring->next_to_use = 0;
3931
3932 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3933 hns3_init_ring_hw(rx_ring);
3934 ret = hns3_clear_rx_ring(rx_ring);
3935 if (ret)
3936 return ret;
3937
3938
3939
3940
3941 for (j = 0; j < rx_ring->desc_num; j++)
3942 hns3_reuse_buffer(rx_ring, j);
3943
3944 rx_ring->next_to_clean = 0;
3945 rx_ring->next_to_use = 0;
3946 }
3947
3948 hns3_init_tx_ring_tc(priv);
3949
3950 return 0;
3951}
3952
3953static void hns3_store_coal(struct hns3_nic_priv *priv)
3954{
3955
3956
3957
3958
3959 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
3960 sizeof(struct hns3_enet_coalesce));
3961 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
3962 sizeof(struct hns3_enet_coalesce));
3963}
3964
3965static void hns3_restore_coal(struct hns3_nic_priv *priv)
3966{
3967 u16 vector_num = priv->vector_num;
3968 int i;
3969
3970 for (i = 0; i < vector_num; i++) {
3971 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
3972 sizeof(struct hns3_enet_coalesce));
3973 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
3974 sizeof(struct hns3_enet_coalesce));
3975 }
3976}
3977
3978static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3979{
3980 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
3981 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3982 struct net_device *ndev = kinfo->netdev;
3983 struct hns3_nic_priv *priv = netdev_priv(ndev);
3984
3985 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
3986 return 0;
3987
3988
3989
3990
3991
3992 if (hns3_dev_ongoing_func_reset(ae_dev)) {
3993 hns3_remove_hw_addr(ndev);
3994 hns3_del_all_fd_rules(ndev, false);
3995 }
3996
3997 if (!netif_running(ndev))
3998 return 0;
3999
4000 return hns3_nic_net_stop(ndev);
4001}
4002
4003static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
4004{
4005 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4006 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
4007 int ret = 0;
4008
4009 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4010
4011 if (netif_running(kinfo->netdev)) {
4012 ret = hns3_nic_net_open(kinfo->netdev);
4013 if (ret) {
4014 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4015 netdev_err(kinfo->netdev,
4016 "hns net up fail, ret=%d!\n", ret);
4017 return ret;
4018 }
4019 }
4020
4021 return ret;
4022}
4023
4024static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
4025{
4026 struct net_device *netdev = handle->kinfo.netdev;
4027 struct hns3_nic_priv *priv = netdev_priv(netdev);
4028 int ret;
4029
4030
4031 netif_carrier_off(netdev);
4032
4033 ret = hns3_get_ring_config(priv);
4034 if (ret)
4035 return ret;
4036
4037 ret = hns3_nic_alloc_vector_data(priv);
4038 if (ret)
4039 goto err_put_ring;
4040
4041 hns3_restore_coal(priv);
4042
4043 ret = hns3_nic_init_vector_data(priv);
4044 if (ret)
4045 goto err_dealloc_vector;
4046
4047 ret = hns3_init_all_ring(priv);
4048 if (ret)
4049 goto err_uninit_vector;
4050
4051 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
4052
4053 return ret;
4054
4055err_uninit_vector:
4056 hns3_nic_uninit_vector_data(priv);
4057 priv->ring_data = NULL;
4058err_dealloc_vector:
4059 hns3_nic_dealloc_vector_data(priv);
4060err_put_ring:
4061 hns3_put_ring_config(priv);
4062 priv->ring_data = NULL;
4063
4064 return ret;
4065}
4066
4067static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
4068{
4069 struct net_device *netdev = handle->kinfo.netdev;
4070 bool vlan_filter_enable;
4071 int ret;
4072
4073 ret = hns3_init_mac_addr(netdev, false);
4074 if (ret)
4075 return ret;
4076
4077 ret = hns3_recover_hw_addr(netdev);
4078 if (ret)
4079 return ret;
4080
4081 ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
4082 if (ret)
4083 return ret;
4084
4085 vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
4086 hns3_enable_vlan_filter(netdev, vlan_filter_enable);
4087
4088
4089 if (!(handle->flags & HNAE3_SUPPORT_VF)) {
4090 ret = hns3_restore_vlan(netdev);
4091 if (ret)
4092 return ret;
4093 }
4094
4095 return hns3_restore_fd_rules(netdev);
4096}
4097
4098static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
4099{
4100 struct net_device *netdev = handle->kinfo.netdev;
4101 struct hns3_nic_priv *priv = netdev_priv(netdev);
4102 int ret;
4103
4104 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
4105 netdev_warn(netdev, "already uninitialized\n");
4106 return 0;
4107 }
4108
4109 hns3_force_clear_all_rx_ring(handle);
4110
4111 hns3_nic_uninit_vector_data(priv);
4112
4113 hns3_store_coal(priv);
4114
4115 ret = hns3_nic_dealloc_vector_data(priv);
4116 if (ret)
4117 netdev_err(netdev, "dealloc vector error\n");
4118
4119 ret = hns3_uninit_all_ring(priv);
4120 if (ret)
4121 netdev_err(netdev, "uninit ring error\n");
4122
4123 hns3_put_ring_config(priv);
4124 priv->ring_data = NULL;
4125
4126 clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
4127
4128 return ret;
4129}
4130
4131static int hns3_reset_notify(struct hnae3_handle *handle,
4132 enum hnae3_reset_notify_type type)
4133{
4134 int ret = 0;
4135
4136 switch (type) {
4137 case HNAE3_UP_CLIENT:
4138 ret = hns3_reset_notify_up_enet(handle);
4139 break;
4140 case HNAE3_DOWN_CLIENT:
4141 ret = hns3_reset_notify_down_enet(handle);
4142 break;
4143 case HNAE3_INIT_CLIENT:
4144 ret = hns3_reset_notify_init_enet(handle);
4145 break;
4146 case HNAE3_UNINIT_CLIENT:
4147 ret = hns3_reset_notify_uninit_enet(handle);
4148 break;
4149 case HNAE3_RESTORE_CLIENT:
4150 ret = hns3_reset_notify_restore_enet(handle);
4151 break;
4152 default:
4153 break;
4154 }
4155
4156 return ret;
4157}
4158
4159int hns3_set_channels(struct net_device *netdev,
4160 struct ethtool_channels *ch)
4161{
4162 struct hnae3_handle *h = hns3_get_handle(netdev);
4163 struct hnae3_knic_private_info *kinfo = &h->kinfo;
4164 bool rxfh_configured = netif_is_rxfh_configured(netdev);
4165 u32 new_tqp_num = ch->combined_count;
4166 u16 org_tqp_num;
4167 int ret;
4168
4169 if (ch->rx_count || ch->tx_count)
4170 return -EINVAL;
4171
4172 if (new_tqp_num > hns3_get_max_available_channels(h) ||
4173 new_tqp_num < 1) {
4174 dev_err(&netdev->dev,
4175 "Change tqps fail, the tqp range is from 1 to %d",
4176 hns3_get_max_available_channels(h));
4177 return -EINVAL;
4178 }
4179
4180 if (kinfo->rss_size == new_tqp_num)
4181 return 0;
4182
4183 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
4184 if (ret)
4185 return ret;
4186
4187 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
4188 if (ret)
4189 return ret;
4190
4191 org_tqp_num = h->kinfo.num_tqps;
4192 ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured);
4193 if (ret) {
4194 ret = h->ae_algo->ops->set_channels(h, org_tqp_num,
4195 rxfh_configured);
4196 if (ret) {
4197
4198 dev_err(&netdev->dev,
4199 "Revert to old tqp num fail, ret=%d", ret);
4200 return ret;
4201 }
4202 dev_info(&netdev->dev,
4203 "Change tqp num fail, Revert to old tqp num");
4204 }
4205 ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
4206 if (ret)
4207 return ret;
4208
4209 return hns3_reset_notify(h, HNAE3_UP_CLIENT);
4210}
4211
4212static const struct hnae3_client_ops client_ops = {
4213 .init_instance = hns3_client_init,
4214 .uninit_instance = hns3_client_uninit,
4215 .link_status_change = hns3_link_status_change,
4216 .setup_tc = hns3_client_setup_tc,
4217 .reset_notify = hns3_reset_notify,
4218};
4219
4220
4221
4222
4223
4224static int __init hns3_init_module(void)
4225{
4226 int ret;
4227
4228 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
4229 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
4230
4231 client.type = HNAE3_CLIENT_KNIC;
4232 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
4233 hns3_driver_name);
4234
4235 client.ops = &client_ops;
4236
4237 INIT_LIST_HEAD(&client.node);
4238
4239 hns3_dbg_register_debugfs(hns3_driver_name);
4240
4241 ret = hnae3_register_client(&client);
4242 if (ret)
4243 goto err_reg_client;
4244
4245 ret = pci_register_driver(&hns3_driver);
4246 if (ret)
4247 goto err_reg_driver;
4248
4249 return ret;
4250
4251err_reg_driver:
4252 hnae3_unregister_client(&client);
4253err_reg_client:
4254 hns3_dbg_unregister_debugfs();
4255 return ret;
4256}
4257module_init(hns3_init_module);
4258
4259
4260
4261
4262
4263static void __exit hns3_exit_module(void)
4264{
4265 pci_unregister_driver(&hns3_driver);
4266 hnae3_unregister_client(&client);
4267 hns3_dbg_unregister_debugfs();
4268}
4269module_exit(hns3_exit_module);
4270
4271MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
4272MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4273MODULE_LICENSE("GPL");
4274MODULE_ALIAS("pci:hns-nic");
4275MODULE_VERSION(HNS3_MOD_VERSION);
4276