1
2
3
4#include <linux/dma-mapping.h>
5#include <linux/etherdevice.h>
6#include <linux/interrupt.h>
7#include <linux/if_vlan.h>
8#include <linux/ip.h>
9#include <linux/ipv6.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/aer.h>
13#include <linux/skbuff.h>
14#include <linux/sctp.h>
15#include <linux/vermagic.h>
16#include <net/gre.h>
17#include <net/pkt_cls.h>
18#include <net/tcp.h>
19#include <net/vxlan.h>
20
21#include "hnae3.h"
22#include "hns3_enet.h"
23
24#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
25#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
26
27static void hns3_clear_all_ring(struct hnae3_handle *h);
28static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
29static void hns3_remove_hw_addr(struct net_device *netdev);
30
31static const char hns3_driver_name[] = "hns3";
32const char hns3_driver_version[] = VERMAGIC_STRING;
33static const char hns3_driver_string[] =
34 "Hisilicon Ethernet Network Driver for Hip08 Family";
35static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
36static struct hnae3_client client;
37
38static int debug = -1;
39module_param(debug, int, 0);
40MODULE_PARM_DESC(debug, " Network interface message level setting");
41
42#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
43 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
44
45
46
47
48
49
50
51
52static const struct pci_device_id hns3_pci_tbl[] = {
53 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
56 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
57 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
58 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
59 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
60 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
61 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
62 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
63 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
64 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
65 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
66 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
67 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
68
69 {0, }
70};
71MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
72
73static irqreturn_t hns3_irq_handle(int irq, void *vector)
74{
75 struct hns3_enet_tqp_vector *tqp_vector = vector;
76
77 napi_schedule_irqoff(&tqp_vector->napi);
78
79 return IRQ_HANDLED;
80}
81
82
83
84
85static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
86 const cpumask_t *mask)
87{
88 struct hns3_enet_tqp_vector *tqp_vectors =
89 container_of(notify, struct hns3_enet_tqp_vector,
90 affinity_notify);
91
92 tqp_vectors->affinity_mask = *mask;
93}
94
95static void hns3_nic_irq_affinity_release(struct kref *ref)
96{
97}
98
99static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
100{
101 struct hns3_enet_tqp_vector *tqp_vectors;
102 unsigned int i;
103
104 for (i = 0; i < priv->vector_num; i++) {
105 tqp_vectors = &priv->tqp_vector[i];
106
107 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
108 continue;
109
110
111 irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
112 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
113
114
115 free_irq(tqp_vectors->vector_irq, tqp_vectors);
116 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
117 }
118}
119
120static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
121{
122 struct hns3_enet_tqp_vector *tqp_vectors;
123 int txrx_int_idx = 0;
124 int rx_int_idx = 0;
125 int tx_int_idx = 0;
126 unsigned int i;
127 int ret;
128
129 for (i = 0; i < priv->vector_num; i++) {
130 tqp_vectors = &priv->tqp_vector[i];
131
132 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
133 continue;
134
135 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
136 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
137 "%s-%s-%d", priv->netdev->name, "TxRx",
138 txrx_int_idx++);
139 txrx_int_idx++;
140 } else if (tqp_vectors->rx_group.ring) {
141 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
142 "%s-%s-%d", priv->netdev->name, "Rx",
143 rx_int_idx++);
144 } else if (tqp_vectors->tx_group.ring) {
145 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
146 "%s-%s-%d", priv->netdev->name, "Tx",
147 tx_int_idx++);
148 } else {
149
150 continue;
151 }
152
153 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
154
155 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
156 tqp_vectors->name,
157 tqp_vectors);
158 if (ret) {
159 netdev_err(priv->netdev, "request irq(%d) fail\n",
160 tqp_vectors->vector_irq);
161 return ret;
162 }
163
164 tqp_vectors->affinity_notify.notify =
165 hns3_nic_irq_affinity_notify;
166 tqp_vectors->affinity_notify.release =
167 hns3_nic_irq_affinity_release;
168 irq_set_affinity_notifier(tqp_vectors->vector_irq,
169 &tqp_vectors->affinity_notify);
170 irq_set_affinity_hint(tqp_vectors->vector_irq,
171 &tqp_vectors->affinity_mask);
172
173 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
174 }
175
176 return 0;
177}
178
179static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
180 u32 mask_en)
181{
182 writel(mask_en, tqp_vector->mask_addr);
183}
184
185static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
186{
187 napi_enable(&tqp_vector->napi);
188
189
190 hns3_mask_vector_irq(tqp_vector, 1);
191}
192
193static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
194{
195
196 hns3_mask_vector_irq(tqp_vector, 0);
197
198 disable_irq(tqp_vector->vector_irq);
199 napi_disable(&tqp_vector->napi);
200}
201
202void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
203 u32 rl_value)
204{
205 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
206
207
208
209
210
211
212 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
213 !tqp_vector->rx_group.coal.gl_adapt_enable)
214
215
216
217 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
218
219 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
220}
221
222void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
223 u32 gl_value)
224{
225 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
226
227 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
228}
229
230void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
231 u32 gl_value)
232{
233 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
234
235 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
236}
237
238static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
239 struct hns3_nic_priv *priv)
240{
241
242
243
244
245
246
247 tqp_vector->tx_group.coal.gl_adapt_enable = 1;
248 tqp_vector->rx_group.coal.gl_adapt_enable = 1;
249
250 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
251 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
252
253 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
254 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
255}
256
257static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
258 struct hns3_nic_priv *priv)
259{
260 struct hnae3_handle *h = priv->ae_handle;
261
262 hns3_set_vector_coalesce_tx_gl(tqp_vector,
263 tqp_vector->tx_group.coal.int_gl);
264 hns3_set_vector_coalesce_rx_gl(tqp_vector,
265 tqp_vector->rx_group.coal.int_gl);
266 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
267}
268
269static int hns3_nic_set_real_num_queue(struct net_device *netdev)
270{
271 struct hnae3_handle *h = hns3_get_handle(netdev);
272 struct hnae3_knic_private_info *kinfo = &h->kinfo;
273 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
274 int i, ret;
275
276 if (kinfo->num_tc <= 1) {
277 netdev_reset_tc(netdev);
278 } else {
279 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
280 if (ret) {
281 netdev_err(netdev,
282 "netdev_set_num_tc fail, ret=%d!\n", ret);
283 return ret;
284 }
285
286 for (i = 0; i < HNAE3_MAX_TC; i++) {
287 if (!kinfo->tc_info[i].enable)
288 continue;
289
290 netdev_set_tc_queue(netdev,
291 kinfo->tc_info[i].tc,
292 kinfo->tc_info[i].tqp_count,
293 kinfo->tc_info[i].tqp_offset);
294 }
295 }
296
297 ret = netif_set_real_num_tx_queues(netdev, queue_size);
298 if (ret) {
299 netdev_err(netdev,
300 "netif_set_real_num_tx_queues fail, ret=%d!\n",
301 ret);
302 return ret;
303 }
304
305 ret = netif_set_real_num_rx_queues(netdev, queue_size);
306 if (ret) {
307 netdev_err(netdev,
308 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
309 return ret;
310 }
311
312 return 0;
313}
314
315static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
316{
317 u16 alloc_tqps, max_rss_size, rss_size;
318
319 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
320 rss_size = alloc_tqps / h->kinfo.num_tc;
321
322 return min_t(u16, rss_size, max_rss_size);
323}
324
325static void hns3_tqp_enable(struct hnae3_queue *tqp)
326{
327 u32 rcb_reg;
328
329 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
330 rcb_reg |= BIT(HNS3_RING_EN_B);
331 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
332}
333
334static void hns3_tqp_disable(struct hnae3_queue *tqp)
335{
336 u32 rcb_reg;
337
338 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
339 rcb_reg &= ~BIT(HNS3_RING_EN_B);
340 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
341}
342
343static int hns3_nic_net_up(struct net_device *netdev)
344{
345 struct hns3_nic_priv *priv = netdev_priv(netdev);
346 struct hnae3_handle *h = priv->ae_handle;
347 int i, j;
348 int ret;
349
350 ret = hns3_nic_reset_all_ring(h);
351 if (ret)
352 return ret;
353
354
355 ret = hns3_nic_init_irq(priv);
356 if (ret) {
357 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
358 return ret;
359 }
360
361 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
362
363
364 for (i = 0; i < priv->vector_num; i++)
365 hns3_vector_enable(&priv->tqp_vector[i]);
366
367
368 for (j = 0; j < h->kinfo.num_tqps; j++)
369 hns3_tqp_enable(h->kinfo.tqp[j]);
370
371
372 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
373 if (ret)
374 goto out_start_err;
375
376 return 0;
377
378out_start_err:
379 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
380 while (j--)
381 hns3_tqp_disable(h->kinfo.tqp[j]);
382
383 for (j = i - 1; j >= 0; j--)
384 hns3_vector_disable(&priv->tqp_vector[j]);
385
386 hns3_nic_uninit_irq(priv);
387
388 return ret;
389}
390
391static void hns3_config_xps(struct hns3_nic_priv *priv)
392{
393 int i;
394
395 for (i = 0; i < priv->vector_num; i++) {
396 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
397 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
398
399 while (ring) {
400 int ret;
401
402 ret = netif_set_xps_queue(priv->netdev,
403 &tqp_vector->affinity_mask,
404 ring->tqp->tqp_index);
405 if (ret)
406 netdev_warn(priv->netdev,
407 "set xps queue failed: %d", ret);
408
409 ring = ring->next;
410 }
411 }
412}
413
414static int hns3_nic_net_open(struct net_device *netdev)
415{
416 struct hns3_nic_priv *priv = netdev_priv(netdev);
417 struct hnae3_handle *h = hns3_get_handle(netdev);
418 struct hnae3_knic_private_info *kinfo;
419 int i, ret;
420
421 if (hns3_nic_resetting(netdev))
422 return -EBUSY;
423
424 netif_carrier_off(netdev);
425
426 ret = hns3_nic_set_real_num_queue(netdev);
427 if (ret)
428 return ret;
429
430 ret = hns3_nic_net_up(netdev);
431 if (ret) {
432 netdev_err(netdev,
433 "hns net up fail, ret=%d!\n", ret);
434 return ret;
435 }
436
437 kinfo = &h->kinfo;
438 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
439 netdev_set_prio_tc_map(netdev, i,
440 kinfo->prio_tc[i]);
441 }
442
443 if (h->ae_algo->ops->set_timer_task)
444 h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
445
446 hns3_config_xps(priv);
447 return 0;
448}
449
450static void hns3_nic_net_down(struct net_device *netdev)
451{
452 struct hns3_nic_priv *priv = netdev_priv(netdev);
453 struct hnae3_handle *h = hns3_get_handle(netdev);
454 const struct hnae3_ae_ops *ops;
455 int i;
456
457
458 for (i = 0; i < priv->vector_num; i++)
459 hns3_vector_disable(&priv->tqp_vector[i]);
460
461
462 for (i = 0; i < h->kinfo.num_tqps; i++)
463 hns3_tqp_disable(h->kinfo.tqp[i]);
464
465
466 ops = priv->ae_handle->ae_algo->ops;
467 if (ops->stop)
468 ops->stop(priv->ae_handle);
469
470
471 hns3_nic_uninit_irq(priv);
472
473 hns3_clear_all_ring(priv->ae_handle);
474}
475
476static int hns3_nic_net_stop(struct net_device *netdev)
477{
478 struct hns3_nic_priv *priv = netdev_priv(netdev);
479 struct hnae3_handle *h = hns3_get_handle(netdev);
480
481 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
482 return 0;
483
484 if (h->ae_algo->ops->set_timer_task)
485 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
486
487 netif_tx_stop_all_queues(netdev);
488 netif_carrier_off(netdev);
489
490 hns3_nic_net_down(netdev);
491
492 return 0;
493}
494
495static int hns3_nic_uc_sync(struct net_device *netdev,
496 const unsigned char *addr)
497{
498 struct hnae3_handle *h = hns3_get_handle(netdev);
499
500 if (h->ae_algo->ops->add_uc_addr)
501 return h->ae_algo->ops->add_uc_addr(h, addr);
502
503 return 0;
504}
505
506static int hns3_nic_uc_unsync(struct net_device *netdev,
507 const unsigned char *addr)
508{
509 struct hnae3_handle *h = hns3_get_handle(netdev);
510
511 if (h->ae_algo->ops->rm_uc_addr)
512 return h->ae_algo->ops->rm_uc_addr(h, addr);
513
514 return 0;
515}
516
517static int hns3_nic_mc_sync(struct net_device *netdev,
518 const unsigned char *addr)
519{
520 struct hnae3_handle *h = hns3_get_handle(netdev);
521
522 if (h->ae_algo->ops->add_mc_addr)
523 return h->ae_algo->ops->add_mc_addr(h, addr);
524
525 return 0;
526}
527
528static int hns3_nic_mc_unsync(struct net_device *netdev,
529 const unsigned char *addr)
530{
531 struct hnae3_handle *h = hns3_get_handle(netdev);
532
533 if (h->ae_algo->ops->rm_mc_addr)
534 return h->ae_algo->ops->rm_mc_addr(h, addr);
535
536 return 0;
537}
538
539static u8 hns3_get_netdev_flags(struct net_device *netdev)
540{
541 u8 flags = 0;
542
543 if (netdev->flags & IFF_PROMISC) {
544 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
545 } else {
546 flags |= HNAE3_VLAN_FLTR;
547 if (netdev->flags & IFF_ALLMULTI)
548 flags |= HNAE3_USER_MPE;
549 }
550
551 return flags;
552}
553
554static void hns3_nic_set_rx_mode(struct net_device *netdev)
555{
556 struct hnae3_handle *h = hns3_get_handle(netdev);
557 u8 new_flags;
558 int ret;
559
560 new_flags = hns3_get_netdev_flags(netdev);
561
562 ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
563 if (ret) {
564 netdev_err(netdev, "sync uc address fail\n");
565 if (ret == -ENOSPC)
566 new_flags |= HNAE3_OVERFLOW_UPE;
567 }
568
569 if (netdev->flags & IFF_MULTICAST) {
570 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
571 hns3_nic_mc_unsync);
572 if (ret) {
573 netdev_err(netdev, "sync mc address fail\n");
574 if (ret == -ENOSPC)
575 new_flags |= HNAE3_OVERFLOW_MPE;
576 }
577 }
578
579
580
581
582
583 hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
584 h->netdev_flags = new_flags;
585 hns3_update_promisc_mode(netdev, new_flags);
586}
587
588int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
589{
590 struct hns3_nic_priv *priv = netdev_priv(netdev);
591 struct hnae3_handle *h = priv->ae_handle;
592
593 if (h->ae_algo->ops->set_promisc_mode) {
594 return h->ae_algo->ops->set_promisc_mode(h,
595 promisc_flags & HNAE3_UPE,
596 promisc_flags & HNAE3_MPE);
597 }
598
599 return 0;
600}
601
602void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
603{
604 struct hns3_nic_priv *priv = netdev_priv(netdev);
605 struct hnae3_handle *h = priv->ae_handle;
606 bool last_state;
607
608 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
609 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
610 if (enable != last_state) {
611 netdev_info(netdev,
612 "%s vlan filter\n",
613 enable ? "enable" : "disable");
614 h->ae_algo->ops->enable_vlan_filter(h, enable);
615 }
616 }
617}
618
619static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
620 u16 *mss, u32 *type_cs_vlan_tso)
621{
622 u32 l4_offset, hdr_len;
623 union l3_hdr_info l3;
624 union l4_hdr_info l4;
625 u32 l4_paylen;
626 int ret;
627
628 if (!skb_is_gso(skb))
629 return 0;
630
631 ret = skb_cow_head(skb, 0);
632 if (unlikely(ret))
633 return ret;
634
635 l3.hdr = skb_network_header(skb);
636 l4.hdr = skb_transport_header(skb);
637
638
639
640
641 if (l3.v4->version == 4)
642 l3.v4->check = 0;
643
644
645 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
646 SKB_GSO_GRE_CSUM |
647 SKB_GSO_UDP_TUNNEL |
648 SKB_GSO_UDP_TUNNEL_CSUM)) {
649 if ((!(skb_shinfo(skb)->gso_type &
650 SKB_GSO_PARTIAL)) &&
651 (skb_shinfo(skb)->gso_type &
652 SKB_GSO_UDP_TUNNEL_CSUM)) {
653
654
655
656 l4.udp->check = 0;
657 }
658
659 l3.hdr = skb_inner_network_header(skb);
660 l4.hdr = skb_inner_transport_header(skb);
661
662
663
664
665 if (l3.v4->version == 4)
666 l3.v4->check = 0;
667 }
668
669
670 l4_offset = l4.hdr - skb->data;
671 hdr_len = (l4.tcp->doff << 2) + l4_offset;
672
673
674 l4_paylen = skb->len - l4_offset;
675 csum_replace_by_diff(&l4.tcp->check,
676 (__force __wsum)htonl(l4_paylen));
677
678
679 *paylen = skb->len - hdr_len;
680 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
681
682
683 *mss = skb_shinfo(skb)->gso_size;
684
685 return 0;
686}
687
688static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
689 u8 *il4_proto)
690{
691 union l3_hdr_info l3;
692 unsigned char *l4_hdr;
693 unsigned char *exthdr;
694 u8 l4_proto_tmp;
695 __be16 frag_off;
696
697
698 l3.hdr = skb_network_header(skb);
699 l4_hdr = skb_transport_header(skb);
700
701 if (skb->protocol == htons(ETH_P_IPV6)) {
702 exthdr = l3.hdr + sizeof(*l3.v6);
703 l4_proto_tmp = l3.v6->nexthdr;
704 if (l4_hdr != exthdr)
705 ipv6_skip_exthdr(skb, exthdr - skb->data,
706 &l4_proto_tmp, &frag_off);
707 } else if (skb->protocol == htons(ETH_P_IP)) {
708 l4_proto_tmp = l3.v4->protocol;
709 } else {
710 return -EINVAL;
711 }
712
713 *ol4_proto = l4_proto_tmp;
714
715
716 if (!skb->encapsulation) {
717 *il4_proto = 0;
718 return 0;
719 }
720
721
722 l3.hdr = skb_inner_network_header(skb);
723 l4_hdr = skb_inner_transport_header(skb);
724
725 if (l3.v6->version == 6) {
726 exthdr = l3.hdr + sizeof(*l3.v6);
727 l4_proto_tmp = l3.v6->nexthdr;
728 if (l4_hdr != exthdr)
729 ipv6_skip_exthdr(skb, exthdr - skb->data,
730 &l4_proto_tmp, &frag_off);
731 } else if (l3.v4->version == 4) {
732 l4_proto_tmp = l3.v4->protocol;
733 }
734
735 *il4_proto = l4_proto_tmp;
736
737 return 0;
738}
739
740
741
742
743
744
745
746static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
747{
748 union l4_hdr_info l4;
749
750 l4.hdr = skb_transport_header(skb);
751
752 if (!(!skb->encapsulation &&
753 l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
754 return false;
755
756 skb_checksum_help(skb);
757
758 return true;
759}
760
761static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
762 u32 *ol_type_vlan_len_msec)
763{
764 u32 l2_len, l3_len, l4_len;
765 unsigned char *il2_hdr;
766 union l3_hdr_info l3;
767 union l4_hdr_info l4;
768
769 l3.hdr = skb_network_header(skb);
770 l4.hdr = skb_transport_header(skb);
771
772
773 l2_len = l3.hdr - skb->data;
774 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
775
776
777 l3_len = l4.hdr - l3.hdr;
778 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
779
780 il2_hdr = skb_inner_mac_header(skb);
781
782 l4_len = il2_hdr - l4.hdr;
783 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
784
785
786 if (skb->protocol == htons(ETH_P_IP)) {
787 if (skb_is_gso(skb))
788 hns3_set_field(*ol_type_vlan_len_msec,
789 HNS3_TXD_OL3T_S,
790 HNS3_OL3T_IPV4_CSUM);
791 else
792 hns3_set_field(*ol_type_vlan_len_msec,
793 HNS3_TXD_OL3T_S,
794 HNS3_OL3T_IPV4_NO_CSUM);
795
796 } else if (skb->protocol == htons(ETH_P_IPV6)) {
797 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
798 HNS3_OL3T_IPV6);
799 }
800
801 if (ol4_proto == IPPROTO_UDP)
802 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
803 HNS3_TUN_MAC_IN_UDP);
804 else if (ol4_proto == IPPROTO_GRE)
805 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
806 HNS3_TUN_NVGRE);
807}
808
809static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
810 u8 il4_proto, u32 *type_cs_vlan_tso,
811 u32 *ol_type_vlan_len_msec)
812{
813 unsigned char *l2_hdr = skb->data;
814 u32 l4_proto = ol4_proto;
815 union l4_hdr_info l4;
816 union l3_hdr_info l3;
817 u32 l2_len, l3_len;
818
819 l4.hdr = skb_transport_header(skb);
820 l3.hdr = skb_network_header(skb);
821
822
823 if (skb->encapsulation) {
824
825 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
826
827
828
829 if (skb_is_gso(skb))
830 return -EDOM;
831
832
833
834
835 skb_checksum_help(skb);
836 return 0;
837 }
838
839 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
840
841
842 l2_hdr = skb_inner_mac_header(skb);
843 l3.hdr = skb_inner_network_header(skb);
844 l4.hdr = skb_inner_transport_header(skb);
845 l4_proto = il4_proto;
846 }
847
848 if (l3.v4->version == 4) {
849 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
850 HNS3_L3T_IPV4);
851
852
853
854
855 if (skb_is_gso(skb))
856 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
857 } else if (l3.v6->version == 6) {
858 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
859 HNS3_L3T_IPV6);
860 }
861
862
863 l2_len = l3.hdr - l2_hdr;
864 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
865
866
867 l3_len = l4.hdr - l3.hdr;
868 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
869
870
871 switch (l4_proto) {
872 case IPPROTO_TCP:
873 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
874 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
875 HNS3_L4T_TCP);
876 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
877 l4.tcp->doff);
878 break;
879 case IPPROTO_UDP:
880 if (hns3_tunnel_csum_bug(skb))
881 break;
882
883 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
884 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
885 HNS3_L4T_UDP);
886 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
887 (sizeof(struct udphdr) >> 2));
888 break;
889 case IPPROTO_SCTP:
890 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
891 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
892 HNS3_L4T_SCTP);
893 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
894 (sizeof(struct sctphdr) >> 2));
895 break;
896 default:
897
898
899
900 if (skb_is_gso(skb))
901 return -EDOM;
902
903
904
905
906 skb_checksum_help(skb);
907 return 0;
908 }
909
910 return 0;
911}
912
913static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
914{
915
916 hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
917 hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
918}
919
920static int hns3_fill_desc_vtags(struct sk_buff *skb,
921 struct hns3_enet_ring *tx_ring,
922 u32 *inner_vlan_flag,
923 u32 *out_vlan_flag,
924 u16 *inner_vtag,
925 u16 *out_vtag)
926{
927#define HNS3_TX_VLAN_PRIO_SHIFT 13
928
929 struct hnae3_handle *handle = tx_ring->tqp->handle;
930
931
932
933
934 if (unlikely(skb_vlan_tagged_multi(skb) &&
935 handle->port_base_vlan_state ==
936 HNAE3_PORT_BASE_VLAN_ENABLE))
937 return -EINVAL;
938
939 if (skb->protocol == htons(ETH_P_8021Q) &&
940 !(tx_ring->tqp->handle->kinfo.netdev->features &
941 NETIF_F_HW_VLAN_CTAG_TX)) {
942
943
944
945
946 skb->protocol = vlan_get_protocol(skb);
947 return 0;
948 }
949
950 if (skb_vlan_tag_present(skb)) {
951 u16 vlan_tag;
952
953 vlan_tag = skb_vlan_tag_get(skb);
954 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
955
956
957
958
959 if (skb->protocol == htons(ETH_P_8021Q)) {
960 if (handle->port_base_vlan_state ==
961 HNAE3_PORT_BASE_VLAN_DISABLE){
962 hns3_set_field(*out_vlan_flag,
963 HNS3_TXD_OVLAN_B, 1);
964 *out_vtag = vlan_tag;
965 } else {
966 hns3_set_field(*inner_vlan_flag,
967 HNS3_TXD_VLAN_B, 1);
968 *inner_vtag = vlan_tag;
969 }
970 } else {
971 hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
972 *inner_vtag = vlan_tag;
973 }
974 } else if (skb->protocol == htons(ETH_P_8021Q)) {
975 struct vlan_ethhdr *vhdr;
976 int rc;
977
978 rc = skb_cow_head(skb, 0);
979 if (unlikely(rc < 0))
980 return rc;
981 vhdr = (struct vlan_ethhdr *)skb->data;
982 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
983 << HNS3_TX_VLAN_PRIO_SHIFT);
984 }
985
986 skb->protocol = vlan_get_protocol(skb);
987 return 0;
988}
989
990static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
991 int size, int frag_end, enum hns_desc_type type)
992{
993 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
994 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
995 struct device *dev = ring_to_dev(ring);
996 struct skb_frag_struct *frag;
997 unsigned int frag_buf_num;
998 int k, sizeoflast;
999 dma_addr_t dma;
1000
1001 if (type == DESC_TYPE_SKB) {
1002 struct sk_buff *skb = (struct sk_buff *)priv;
1003 u32 ol_type_vlan_len_msec = 0;
1004 u32 type_cs_vlan_tso = 0;
1005 u32 paylen = skb->len;
1006 u16 inner_vtag = 0;
1007 u16 out_vtag = 0;
1008 u16 mss = 0;
1009 int ret;
1010
1011 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
1012 &ol_type_vlan_len_msec,
1013 &inner_vtag, &out_vtag);
1014 if (unlikely(ret))
1015 return ret;
1016
1017 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1018 u8 ol4_proto, il4_proto;
1019
1020 skb_reset_mac_len(skb);
1021
1022 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1023 if (unlikely(ret))
1024 return ret;
1025
1026 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
1027 &type_cs_vlan_tso,
1028 &ol_type_vlan_len_msec);
1029 if (unlikely(ret))
1030 return ret;
1031
1032 ret = hns3_set_tso(skb, &paylen, &mss,
1033 &type_cs_vlan_tso);
1034 if (unlikely(ret))
1035 return ret;
1036 }
1037
1038
1039 desc->tx.ol_type_vlan_len_msec =
1040 cpu_to_le32(ol_type_vlan_len_msec);
1041 desc->tx.type_cs_vlan_tso_len =
1042 cpu_to_le32(type_cs_vlan_tso);
1043 desc->tx.paylen = cpu_to_le32(paylen);
1044 desc->tx.mss = cpu_to_le16(mss);
1045 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1046 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1047
1048 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1049 } else {
1050 frag = (struct skb_frag_struct *)priv;
1051 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1052 }
1053
1054 if (unlikely(dma_mapping_error(dev, dma))) {
1055 ring->stats.sw_err_cnt++;
1056 return -ENOMEM;
1057 }
1058
1059 desc_cb->length = size;
1060
1061 if (likely(size <= HNS3_MAX_BD_SIZE)) {
1062 u16 bdtp_fe_sc_vld_ra_ri = 0;
1063
1064 desc_cb->priv = priv;
1065 desc_cb->dma = dma;
1066 desc_cb->type = type;
1067 desc->addr = cpu_to_le64(dma);
1068 desc->tx.send_size = cpu_to_le16(size);
1069 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
1070 desc->tx.bdtp_fe_sc_vld_ra_ri =
1071 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1072
1073 ring_ptr_move_fw(ring, next_to_use);
1074 return 0;
1075 }
1076
1077 frag_buf_num = hns3_tx_bd_count(size);
1078 sizeoflast = size & HNS3_TX_LAST_SIZE_M;
1079 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1080
1081
1082 for (k = 0; k < frag_buf_num; k++) {
1083 u16 bdtp_fe_sc_vld_ra_ri = 0;
1084
1085
1086 desc_cb->priv = priv;
1087 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1088 desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1089 DESC_TYPE_SKB : DESC_TYPE_PAGE;
1090
1091
1092 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1093 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1094 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1095 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1096 frag_end && (k == frag_buf_num - 1) ?
1097 1 : 0);
1098 desc->tx.bdtp_fe_sc_vld_ra_ri =
1099 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1100
1101
1102 ring_ptr_move_fw(ring, next_to_use);
1103
1104 desc_cb = &ring->desc_cb[ring->next_to_use];
1105 desc = &ring->desc[ring->next_to_use];
1106 }
1107
1108 return 0;
1109}
1110
1111static int hns3_nic_bd_num(struct sk_buff *skb)
1112{
1113 int size = skb_headlen(skb);
1114 int i, bd_num;
1115
1116
1117 if (likely(skb->len <= HNS3_MAX_BD_SIZE))
1118 return skb_shinfo(skb)->nr_frags + 1;
1119
1120 bd_num = hns3_tx_bd_count(size);
1121
1122 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1123 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1124 int frag_bd_num;
1125
1126 size = skb_frag_size(frag);
1127 frag_bd_num = hns3_tx_bd_count(size);
1128
1129 if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
1130 return -ENOMEM;
1131
1132 bd_num += frag_bd_num;
1133 }
1134
1135 return bd_num;
1136}
1137
1138static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
1139{
1140 if (!skb->encapsulation)
1141 return skb_transport_offset(skb) + tcp_hdrlen(skb);
1142
1143 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
1144}
1145
1146
1147
1148
1149
1150
1151static bool hns3_skb_need_linearized(struct sk_buff *skb)
1152{
1153 int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
1154 unsigned int tot_len = 0;
1155 int i;
1156
1157 for (i = 0; i < bd_limit; i++)
1158 tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1159
1160
1161
1162
1163 if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
1164 hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
1165 return true;
1166
1167
1168 for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
1169 tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
1170 tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
1171
1172 if (tot_len < skb_shinfo(skb)->gso_size)
1173 return true;
1174 }
1175
1176 return false;
1177}
1178
1179static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1180 struct sk_buff **out_skb)
1181{
1182 struct sk_buff *skb = *out_skb;
1183 int bd_num;
1184
1185 bd_num = hns3_nic_bd_num(skb);
1186 if (bd_num < 0)
1187 return bd_num;
1188
1189 if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
1190 struct sk_buff *new_skb;
1191
1192 if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
1193 goto out;
1194
1195 bd_num = hns3_tx_bd_count(skb->len);
1196 if (unlikely(ring_space(ring) < bd_num))
1197 return -EBUSY;
1198
1199 new_skb = skb_copy(skb, GFP_ATOMIC);
1200 if (!new_skb)
1201 return -ENOMEM;
1202 dev_kfree_skb_any(skb);
1203 *out_skb = new_skb;
1204
1205 u64_stats_update_begin(&ring->syncp);
1206 ring->stats.tx_copy++;
1207 u64_stats_update_end(&ring->syncp);
1208 }
1209
1210out:
1211 if (unlikely(ring_space(ring) < bd_num))
1212 return -EBUSY;
1213
1214 return bd_num;
1215}
1216
1217static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1218{
1219 struct device *dev = ring_to_dev(ring);
1220 unsigned int i;
1221
1222 for (i = 0; i < ring->desc_num; i++) {
1223
1224 if (ring->next_to_use == next_to_use_orig)
1225 break;
1226
1227
1228 ring_ptr_move_bw(ring, next_to_use);
1229
1230
1231 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1232 dma_unmap_single(dev,
1233 ring->desc_cb[ring->next_to_use].dma,
1234 ring->desc_cb[ring->next_to_use].length,
1235 DMA_TO_DEVICE);
1236 else if (ring->desc_cb[ring->next_to_use].length)
1237 dma_unmap_page(dev,
1238 ring->desc_cb[ring->next_to_use].dma,
1239 ring->desc_cb[ring->next_to_use].length,
1240 DMA_TO_DEVICE);
1241
1242 ring->desc_cb[ring->next_to_use].length = 0;
1243 ring->desc_cb[ring->next_to_use].dma = 0;
1244 }
1245}
1246
1247netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1248{
1249 struct hns3_nic_priv *priv = netdev_priv(netdev);
1250 struct hns3_nic_ring_data *ring_data =
1251 &tx_ring_data(priv, skb->queue_mapping);
1252 struct hns3_enet_ring *ring = ring_data->ring;
1253 struct netdev_queue *dev_queue;
1254 struct skb_frag_struct *frag;
1255 int next_to_use_head;
1256 int buf_num;
1257 int seg_num;
1258 int size;
1259 int ret;
1260 int i;
1261
1262
1263 prefetch(skb->data);
1264
1265 buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
1266 if (unlikely(buf_num <= 0)) {
1267 if (buf_num == -EBUSY) {
1268 u64_stats_update_begin(&ring->syncp);
1269 ring->stats.tx_busy++;
1270 u64_stats_update_end(&ring->syncp);
1271 goto out_net_tx_busy;
1272 } else if (buf_num == -ENOMEM) {
1273 u64_stats_update_begin(&ring->syncp);
1274 ring->stats.sw_err_cnt++;
1275 u64_stats_update_end(&ring->syncp);
1276 }
1277
1278 if (net_ratelimit())
1279 netdev_err(netdev, "xmit error: %d!\n", buf_num);
1280
1281 goto out_err_tx_ok;
1282 }
1283
1284
1285 seg_num = skb_shinfo(skb)->nr_frags + 1;
1286
1287 size = skb_headlen(skb);
1288
1289 next_to_use_head = ring->next_to_use;
1290
1291 ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1292 DESC_TYPE_SKB);
1293 if (unlikely(ret))
1294 goto fill_err;
1295
1296
1297 for (i = 1; i < seg_num; i++) {
1298 frag = &skb_shinfo(skb)->frags[i - 1];
1299 size = skb_frag_size(frag);
1300
1301 ret = hns3_fill_desc(ring, frag, size,
1302 seg_num - 1 == i ? 1 : 0,
1303 DESC_TYPE_PAGE);
1304
1305 if (unlikely(ret))
1306 goto fill_err;
1307 }
1308
1309
1310 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1311 netdev_tx_sent_queue(dev_queue, skb->len);
1312
1313 wmb();
1314
1315 hnae3_queue_xmit(ring->tqp, buf_num);
1316
1317 return NETDEV_TX_OK;
1318
1319fill_err:
1320 hns3_clear_desc(ring, next_to_use_head);
1321
1322out_err_tx_ok:
1323 dev_kfree_skb_any(skb);
1324 return NETDEV_TX_OK;
1325
1326out_net_tx_busy:
1327 netif_stop_subqueue(netdev, ring_data->queue_index);
1328 smp_mb();
1329
1330 return NETDEV_TX_BUSY;
1331}
1332
1333static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1334{
1335 struct hnae3_handle *h = hns3_get_handle(netdev);
1336 struct sockaddr *mac_addr = p;
1337 int ret;
1338
1339 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1340 return -EADDRNOTAVAIL;
1341
1342 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1343 netdev_info(netdev, "already using mac address %pM\n",
1344 mac_addr->sa_data);
1345 return 0;
1346 }
1347
1348 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1349 if (ret) {
1350 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1351 return ret;
1352 }
1353
1354 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1355
1356 return 0;
1357}
1358
1359static int hns3_nic_do_ioctl(struct net_device *netdev,
1360 struct ifreq *ifr, int cmd)
1361{
1362 struct hnae3_handle *h = hns3_get_handle(netdev);
1363
1364 if (!netif_running(netdev))
1365 return -EINVAL;
1366
1367 if (!h->ae_algo->ops->do_ioctl)
1368 return -EOPNOTSUPP;
1369
1370 return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1371}
1372
1373static int hns3_nic_set_features(struct net_device *netdev,
1374 netdev_features_t features)
1375{
1376 netdev_features_t changed = netdev->features ^ features;
1377 struct hns3_nic_priv *priv = netdev_priv(netdev);
1378 struct hnae3_handle *h = priv->ae_handle;
1379 bool enable;
1380 int ret;
1381
1382 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
1383 enable = !!(features & NETIF_F_GRO_HW);
1384 ret = h->ae_algo->ops->set_gro_en(h, enable);
1385 if (ret)
1386 return ret;
1387 }
1388
1389 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1390 h->ae_algo->ops->enable_vlan_filter) {
1391 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
1392 h->ae_algo->ops->enable_vlan_filter(h, enable);
1393 }
1394
1395 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1396 h->ae_algo->ops->enable_hw_strip_rxvtag) {
1397 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1398 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
1399 if (ret)
1400 return ret;
1401 }
1402
1403 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1404 enable = !!(features & NETIF_F_NTUPLE);
1405 h->ae_algo->ops->enable_fd(h, enable);
1406 }
1407
1408 netdev->features = features;
1409 return 0;
1410}
1411
1412static void hns3_nic_get_stats64(struct net_device *netdev,
1413 struct rtnl_link_stats64 *stats)
1414{
1415 struct hns3_nic_priv *priv = netdev_priv(netdev);
1416 int queue_num = priv->ae_handle->kinfo.num_tqps;
1417 struct hnae3_handle *handle = priv->ae_handle;
1418 struct hns3_enet_ring *ring;
1419 u64 rx_length_errors = 0;
1420 u64 rx_crc_errors = 0;
1421 u64 rx_multicast = 0;
1422 unsigned int start;
1423 u64 tx_errors = 0;
1424 u64 rx_errors = 0;
1425 unsigned int idx;
1426 u64 tx_bytes = 0;
1427 u64 rx_bytes = 0;
1428 u64 tx_pkts = 0;
1429 u64 rx_pkts = 0;
1430 u64 tx_drop = 0;
1431 u64 rx_drop = 0;
1432
1433 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1434 return;
1435
1436 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1437
1438 for (idx = 0; idx < queue_num; idx++) {
1439
1440 ring = priv->ring_data[idx].ring;
1441 do {
1442 start = u64_stats_fetch_begin_irq(&ring->syncp);
1443 tx_bytes += ring->stats.tx_bytes;
1444 tx_pkts += ring->stats.tx_pkts;
1445 tx_drop += ring->stats.sw_err_cnt;
1446 tx_errors += ring->stats.sw_err_cnt;
1447 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1448
1449
1450 ring = priv->ring_data[idx + queue_num].ring;
1451 do {
1452 start = u64_stats_fetch_begin_irq(&ring->syncp);
1453 rx_bytes += ring->stats.rx_bytes;
1454 rx_pkts += ring->stats.rx_pkts;
1455 rx_drop += ring->stats.non_vld_descs;
1456 rx_drop += ring->stats.l2_err;
1457 rx_errors += ring->stats.non_vld_descs;
1458 rx_errors += ring->stats.l2_err;
1459 rx_crc_errors += ring->stats.l2_err;
1460 rx_crc_errors += ring->stats.l3l4_csum_err;
1461 rx_multicast += ring->stats.rx_multicast;
1462 rx_length_errors += ring->stats.err_pkt_len;
1463 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1464 }
1465
1466 stats->tx_bytes = tx_bytes;
1467 stats->tx_packets = tx_pkts;
1468 stats->rx_bytes = rx_bytes;
1469 stats->rx_packets = rx_pkts;
1470
1471 stats->rx_errors = rx_errors;
1472 stats->multicast = rx_multicast;
1473 stats->rx_length_errors = rx_length_errors;
1474 stats->rx_crc_errors = rx_crc_errors;
1475 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1476
1477 stats->tx_errors = tx_errors;
1478 stats->rx_dropped = rx_drop;
1479 stats->tx_dropped = tx_drop;
1480 stats->collisions = netdev->stats.collisions;
1481 stats->rx_over_errors = netdev->stats.rx_over_errors;
1482 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1483 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1484 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1485 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1486 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1487 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1488 stats->tx_window_errors = netdev->stats.tx_window_errors;
1489 stats->rx_compressed = netdev->stats.rx_compressed;
1490 stats->tx_compressed = netdev->stats.tx_compressed;
1491}
1492
1493static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1494{
1495 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1496 struct hnae3_handle *h = hns3_get_handle(netdev);
1497 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1498 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1499 u8 tc = mqprio_qopt->qopt.num_tc;
1500 u16 mode = mqprio_qopt->mode;
1501 u8 hw = mqprio_qopt->qopt.hw;
1502
1503 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1504 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1505 return -EOPNOTSUPP;
1506
1507 if (tc > HNAE3_MAX_TC)
1508 return -EINVAL;
1509
1510 if (!netdev)
1511 return -EINVAL;
1512
1513 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1514 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1515}
1516
1517static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1518 void *type_data)
1519{
1520 if (type != TC_SETUP_QDISC_MQPRIO)
1521 return -EOPNOTSUPP;
1522
1523 return hns3_setup_tc(dev, type_data);
1524}
1525
1526static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1527 __be16 proto, u16 vid)
1528{
1529 struct hnae3_handle *h = hns3_get_handle(netdev);
1530 struct hns3_nic_priv *priv = netdev_priv(netdev);
1531 int ret = -EIO;
1532
1533 if (h->ae_algo->ops->set_vlan_filter)
1534 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1535
1536 if (!ret)
1537 set_bit(vid, priv->active_vlans);
1538
1539 return ret;
1540}
1541
1542static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1543 __be16 proto, u16 vid)
1544{
1545 struct hnae3_handle *h = hns3_get_handle(netdev);
1546 struct hns3_nic_priv *priv = netdev_priv(netdev);
1547 int ret = -EIO;
1548
1549 if (h->ae_algo->ops->set_vlan_filter)
1550 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1551
1552 if (!ret)
1553 clear_bit(vid, priv->active_vlans);
1554
1555 return ret;
1556}
1557
1558static int hns3_restore_vlan(struct net_device *netdev)
1559{
1560 struct hns3_nic_priv *priv = netdev_priv(netdev);
1561 int ret = 0;
1562 u16 vid;
1563
1564 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1565 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1566 if (ret) {
1567 netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
1568 vid, ret);
1569 return ret;
1570 }
1571 }
1572
1573 return ret;
1574}
1575
1576static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1577 u8 qos, __be16 vlan_proto)
1578{
1579 struct hnae3_handle *h = hns3_get_handle(netdev);
1580 int ret = -EIO;
1581
1582 if (h->ae_algo->ops->set_vf_vlan_filter)
1583 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1584 qos, vlan_proto);
1585
1586 return ret;
1587}
1588
1589static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1590{
1591 struct hnae3_handle *h = hns3_get_handle(netdev);
1592 int ret;
1593
1594 if (hns3_nic_resetting(netdev))
1595 return -EBUSY;
1596
1597 if (!h->ae_algo->ops->set_mtu)
1598 return -EOPNOTSUPP;
1599
1600 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1601 if (ret)
1602 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1603 ret);
1604 else
1605 netdev->mtu = new_mtu;
1606
1607 return ret;
1608}
1609
1610static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1611{
1612 struct hns3_nic_priv *priv = netdev_priv(ndev);
1613 struct hnae3_handle *h = hns3_get_handle(ndev);
1614 struct hns3_enet_ring *tx_ring = NULL;
1615 struct napi_struct *napi;
1616 int timeout_queue = 0;
1617 int hw_head, hw_tail;
1618 int fbd_num, fbd_oft;
1619 int ebd_num, ebd_oft;
1620 int bd_num, bd_err;
1621 int ring_en, tc;
1622 int i;
1623
1624
1625 for (i = 0; i < ndev->num_tx_queues; i++) {
1626 struct netdev_queue *q;
1627 unsigned long trans_start;
1628
1629 q = netdev_get_tx_queue(ndev, i);
1630 trans_start = q->trans_start;
1631 if (netif_xmit_stopped(q) &&
1632 time_after(jiffies,
1633 (trans_start + ndev->watchdog_timeo))) {
1634 timeout_queue = i;
1635 break;
1636 }
1637 }
1638
1639 if (i == ndev->num_tx_queues) {
1640 netdev_info(ndev,
1641 "no netdev TX timeout queue found, timeout count: %llu\n",
1642 priv->tx_timeout_count);
1643 return false;
1644 }
1645
1646 priv->tx_timeout_count++;
1647
1648 tx_ring = priv->ring_data[timeout_queue].ring;
1649 napi = &tx_ring->tqp_vector->napi;
1650
1651 netdev_info(ndev,
1652 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
1653 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
1654 tx_ring->next_to_clean, napi->state);
1655
1656 netdev_info(ndev,
1657 "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
1658 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
1659 tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
1660
1661 netdev_info(ndev,
1662 "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
1663 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
1664 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
1665
1666
1667
1668
1669 if (h->ae_algo->ops->update_stats &&
1670 h->ae_algo->ops->get_mac_pause_stats) {
1671 u64 tx_pause_cnt, rx_pause_cnt;
1672
1673 h->ae_algo->ops->update_stats(h, &ndev->stats);
1674 h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
1675 &rx_pause_cnt);
1676 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
1677 tx_pause_cnt, rx_pause_cnt);
1678 }
1679
1680 hw_head = readl_relaxed(tx_ring->tqp->io_base +
1681 HNS3_RING_TX_RING_HEAD_REG);
1682 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1683 HNS3_RING_TX_RING_TAIL_REG);
1684 fbd_num = readl_relaxed(tx_ring->tqp->io_base +
1685 HNS3_RING_TX_RING_FBDNUM_REG);
1686 fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
1687 HNS3_RING_TX_RING_OFFSET_REG);
1688 ebd_num = readl_relaxed(tx_ring->tqp->io_base +
1689 HNS3_RING_TX_RING_EBDNUM_REG);
1690 ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
1691 HNS3_RING_TX_RING_EBD_OFFSET_REG);
1692 bd_num = readl_relaxed(tx_ring->tqp->io_base +
1693 HNS3_RING_TX_RING_BD_NUM_REG);
1694 bd_err = readl_relaxed(tx_ring->tqp->io_base +
1695 HNS3_RING_TX_RING_BD_ERR_REG);
1696 ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
1697 tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
1698
1699 netdev_info(ndev,
1700 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
1701 bd_num, hw_head, hw_tail, bd_err,
1702 readl(tx_ring->tqp_vector->mask_addr));
1703 netdev_info(ndev,
1704 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
1705 ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
1706
1707 return true;
1708}
1709
1710static void hns3_nic_net_timeout(struct net_device *ndev)
1711{
1712 struct hns3_nic_priv *priv = netdev_priv(ndev);
1713 struct hnae3_handle *h = priv->ae_handle;
1714
1715 if (!hns3_get_tx_timeo_queue_info(ndev))
1716 return;
1717
1718
1719
1720
1721 if (h->ae_algo->ops->reset_event)
1722 h->ae_algo->ops->reset_event(h->pdev, h);
1723}
1724
1725static const struct net_device_ops hns3_nic_netdev_ops = {
1726 .ndo_open = hns3_nic_net_open,
1727 .ndo_stop = hns3_nic_net_stop,
1728 .ndo_start_xmit = hns3_nic_net_xmit,
1729 .ndo_tx_timeout = hns3_nic_net_timeout,
1730 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
1731 .ndo_do_ioctl = hns3_nic_do_ioctl,
1732 .ndo_change_mtu = hns3_nic_change_mtu,
1733 .ndo_set_features = hns3_nic_set_features,
1734 .ndo_get_stats64 = hns3_nic_get_stats64,
1735 .ndo_setup_tc = hns3_nic_setup_tc,
1736 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1737 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1738 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1739 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1740};
1741
1742bool hns3_is_phys_func(struct pci_dev *pdev)
1743{
1744 u32 dev_id = pdev->device;
1745
1746 switch (dev_id) {
1747 case HNAE3_DEV_ID_GE:
1748 case HNAE3_DEV_ID_25GE:
1749 case HNAE3_DEV_ID_25GE_RDMA:
1750 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1751 case HNAE3_DEV_ID_50GE_RDMA:
1752 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1753 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1754 return true;
1755 case HNAE3_DEV_ID_100G_VF:
1756 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1757 return false;
1758 default:
1759 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1760 dev_id);
1761 }
1762
1763 return false;
1764}
1765
1766static void hns3_disable_sriov(struct pci_dev *pdev)
1767{
1768
1769
1770
1771
1772 if (pci_vfs_assigned(pdev)) {
1773 dev_warn(&pdev->dev,
1774 "disabling driver while VFs are assigned\n");
1775 return;
1776 }
1777
1778 pci_disable_sriov(pdev);
1779}
1780
1781static void hns3_get_dev_capability(struct pci_dev *pdev,
1782 struct hnae3_ae_dev *ae_dev)
1783{
1784 if (pdev->revision >= 0x21) {
1785 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1786 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
1787 }
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1801{
1802 struct hnae3_ae_dev *ae_dev;
1803 int ret;
1804
1805 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1806 GFP_KERNEL);
1807 if (!ae_dev) {
1808 ret = -ENOMEM;
1809 return ret;
1810 }
1811
1812 ae_dev->pdev = pdev;
1813 ae_dev->flag = ent->driver_data;
1814 ae_dev->dev_type = HNAE3_DEV_KNIC;
1815 ae_dev->reset_type = HNAE3_NONE_RESET;
1816 hns3_get_dev_capability(pdev, ae_dev);
1817 pci_set_drvdata(pdev, ae_dev);
1818
1819 ret = hnae3_register_ae_dev(ae_dev);
1820 if (ret) {
1821 devm_kfree(&pdev->dev, ae_dev);
1822 pci_set_drvdata(pdev, NULL);
1823 }
1824
1825 return ret;
1826}
1827
1828
1829
1830
1831static void hns3_remove(struct pci_dev *pdev)
1832{
1833 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1834
1835 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1836 hns3_disable_sriov(pdev);
1837
1838 hnae3_unregister_ae_dev(ae_dev);
1839 pci_set_drvdata(pdev, NULL);
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1851{
1852 int ret;
1853
1854 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1855 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1856 return -EINVAL;
1857 }
1858
1859 if (num_vfs) {
1860 ret = pci_enable_sriov(pdev, num_vfs);
1861 if (ret)
1862 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1863 else
1864 return num_vfs;
1865 } else if (!pci_vfs_assigned(pdev)) {
1866 pci_disable_sriov(pdev);
1867 } else {
1868 dev_warn(&pdev->dev,
1869 "Unable to free VFs because some are assigned to VMs.\n");
1870 }
1871
1872 return 0;
1873}
1874
1875static void hns3_shutdown(struct pci_dev *pdev)
1876{
1877 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1878
1879 hnae3_unregister_ae_dev(ae_dev);
1880 devm_kfree(&pdev->dev, ae_dev);
1881 pci_set_drvdata(pdev, NULL);
1882
1883 if (system_state == SYSTEM_POWER_OFF)
1884 pci_set_power_state(pdev, PCI_D3hot);
1885}
1886
1887static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1888 pci_channel_state_t state)
1889{
1890 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1891 pci_ers_result_t ret;
1892
1893 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1894
1895 if (state == pci_channel_io_perm_failure)
1896 return PCI_ERS_RESULT_DISCONNECT;
1897
1898 if (!ae_dev) {
1899 dev_err(&pdev->dev,
1900 "Can't recover - error happened during device init\n");
1901 return PCI_ERS_RESULT_NONE;
1902 }
1903
1904 if (ae_dev->ops->handle_hw_ras_error)
1905 ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
1906 else
1907 return PCI_ERS_RESULT_NONE;
1908
1909 return ret;
1910}
1911
1912static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1913{
1914 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1915 struct device *dev = &pdev->dev;
1916
1917 dev_info(dev, "requesting reset due to PCI error\n");
1918
1919
1920 if (ae_dev->ops->reset_event) {
1921 if (!ae_dev->override_pci_need_reset)
1922 ae_dev->ops->reset_event(pdev, NULL);
1923
1924 return PCI_ERS_RESULT_RECOVERED;
1925 }
1926
1927 return PCI_ERS_RESULT_DISCONNECT;
1928}
1929
1930static void hns3_reset_prepare(struct pci_dev *pdev)
1931{
1932 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1933
1934 dev_info(&pdev->dev, "hns3 flr prepare\n");
1935 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
1936 ae_dev->ops->flr_prepare(ae_dev);
1937}
1938
1939static void hns3_reset_done(struct pci_dev *pdev)
1940{
1941 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1942
1943 dev_info(&pdev->dev, "hns3 flr done\n");
1944 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
1945 ae_dev->ops->flr_done(ae_dev);
1946}
1947
1948static const struct pci_error_handlers hns3_err_handler = {
1949 .error_detected = hns3_error_detected,
1950 .slot_reset = hns3_slot_reset,
1951 .reset_prepare = hns3_reset_prepare,
1952 .reset_done = hns3_reset_done,
1953};
1954
1955static struct pci_driver hns3_driver = {
1956 .name = hns3_driver_name,
1957 .id_table = hns3_pci_tbl,
1958 .probe = hns3_probe,
1959 .remove = hns3_remove,
1960 .shutdown = hns3_shutdown,
1961 .sriov_configure = hns3_pci_sriov_configure,
1962 .err_handler = &hns3_err_handler,
1963};
1964
1965
1966static void hns3_set_default_feature(struct net_device *netdev)
1967{
1968 struct hnae3_handle *h = hns3_get_handle(netdev);
1969 struct pci_dev *pdev = h->pdev;
1970
1971 netdev->priv_flags |= IFF_UNICAST_FLT;
1972
1973 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1974 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1975 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1976 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1977 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1978
1979 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1980
1981 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1982
1983 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1984 NETIF_F_HW_VLAN_CTAG_FILTER |
1985 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1986 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1987 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1988 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1989 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1990
1991 netdev->vlan_features |=
1992 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1993 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1994 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1995 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1996 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1997
1998 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1999 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2000 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2001 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2002 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2003 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2004
2005 if (pdev->revision >= 0x21) {
2006 netdev->hw_features |= NETIF_F_GRO_HW;
2007 netdev->features |= NETIF_F_GRO_HW;
2008
2009 if (!(h->flags & HNAE3_SUPPORT_VF)) {
2010 netdev->hw_features |= NETIF_F_NTUPLE;
2011 netdev->features |= NETIF_F_NTUPLE;
2012 }
2013 }
2014}
2015
2016static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
2017 struct hns3_desc_cb *cb)
2018{
2019 unsigned int order = hnae3_page_order(ring);
2020 struct page *p;
2021
2022 p = dev_alloc_pages(order);
2023 if (!p)
2024 return -ENOMEM;
2025
2026 cb->priv = p;
2027 cb->page_offset = 0;
2028 cb->reuse_flag = 0;
2029 cb->buf = page_address(p);
2030 cb->length = hnae3_page_size(ring);
2031 cb->type = DESC_TYPE_PAGE;
2032
2033 return 0;
2034}
2035
2036static void hns3_free_buffer(struct hns3_enet_ring *ring,
2037 struct hns3_desc_cb *cb)
2038{
2039 if (cb->type == DESC_TYPE_SKB)
2040 dev_kfree_skb_any((struct sk_buff *)cb->priv);
2041 else if (!HNAE3_IS_TX_RING(ring))
2042 put_page((struct page *)cb->priv);
2043 memset(cb, 0, sizeof(*cb));
2044}
2045
2046static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
2047{
2048 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
2049 cb->length, ring_to_dma_dir(ring));
2050
2051 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
2052 return -EIO;
2053
2054 return 0;
2055}
2056
2057static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
2058 struct hns3_desc_cb *cb)
2059{
2060 if (cb->type == DESC_TYPE_SKB)
2061 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
2062 ring_to_dma_dir(ring));
2063 else if (cb->length)
2064 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
2065 ring_to_dma_dir(ring));
2066}
2067
2068static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
2069{
2070 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2071 ring->desc[i].addr = 0;
2072}
2073
2074static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
2075{
2076 struct hns3_desc_cb *cb = &ring->desc_cb[i];
2077
2078 if (!ring->desc_cb[i].dma)
2079 return;
2080
2081 hns3_buffer_detach(ring, i);
2082 hns3_free_buffer(ring, cb);
2083}
2084
2085static void hns3_free_buffers(struct hns3_enet_ring *ring)
2086{
2087 int i;
2088
2089 for (i = 0; i < ring->desc_num; i++)
2090 hns3_free_buffer_detach(ring, i);
2091}
2092
2093
2094static void hns3_free_desc(struct hns3_enet_ring *ring)
2095{
2096 int size = ring->desc_num * sizeof(ring->desc[0]);
2097
2098 hns3_free_buffers(ring);
2099
2100 if (ring->desc) {
2101 dma_free_coherent(ring_to_dev(ring), size,
2102 ring->desc, ring->desc_dma_addr);
2103 ring->desc = NULL;
2104 }
2105}
2106
2107static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2108{
2109 int size = ring->desc_num * sizeof(ring->desc[0]);
2110
2111 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2112 &ring->desc_dma_addr, GFP_KERNEL);
2113 if (!ring->desc)
2114 return -ENOMEM;
2115
2116 return 0;
2117}
2118
2119static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
2120 struct hns3_desc_cb *cb)
2121{
2122 int ret;
2123
2124 ret = hns3_alloc_buffer(ring, cb);
2125 if (ret)
2126 goto out;
2127
2128 ret = hns3_map_buffer(ring, cb);
2129 if (ret)
2130 goto out_with_buf;
2131
2132 return 0;
2133
2134out_with_buf:
2135 hns3_free_buffer(ring, cb);
2136out:
2137 return ret;
2138}
2139
2140static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
2141{
2142 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
2143
2144 if (ret)
2145 return ret;
2146
2147 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2148
2149 return 0;
2150}
2151
2152
2153static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
2154{
2155 int i, j, ret;
2156
2157 for (i = 0; i < ring->desc_num; i++) {
2158 ret = hns3_alloc_buffer_attach(ring, i);
2159 if (ret)
2160 goto out_buffer_fail;
2161 }
2162
2163 return 0;
2164
2165out_buffer_fail:
2166 for (j = i - 1; j >= 0; j--)
2167 hns3_free_buffer_detach(ring, j);
2168 return ret;
2169}
2170
2171
2172static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2173 struct hns3_desc_cb *res_cb)
2174{
2175 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2176 ring->desc_cb[i] = *res_cb;
2177 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2178 ring->desc[i].rx.bd_base_info = 0;
2179}
2180
2181static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2182{
2183 ring->desc_cb[i].reuse_flag = 0;
2184 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
2185 + ring->desc_cb[i].page_offset);
2186 ring->desc[i].rx.bd_base_info = 0;
2187}
2188
2189static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
2190 int *bytes, int *pkts)
2191{
2192 int ntc = ring->next_to_clean;
2193 struct hns3_desc_cb *desc_cb;
2194
2195 while (head != ntc) {
2196 desc_cb = &ring->desc_cb[ntc];
2197 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
2198 (*bytes) += desc_cb->length;
2199
2200 hns3_free_buffer_detach(ring, ntc);
2201
2202 if (++ntc == ring->desc_num)
2203 ntc = 0;
2204
2205
2206 prefetch(&ring->desc_cb[ntc]);
2207 }
2208
2209
2210
2211
2212 smp_store_release(&ring->next_to_clean, ntc);
2213}
2214
2215static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
2216{
2217 int u = ring->next_to_use;
2218 int c = ring->next_to_clean;
2219
2220 if (unlikely(h > ring->desc_num))
2221 return 0;
2222
2223 return u > c ? (h > c && h <= u) : (h > c || h <= u);
2224}
2225
2226void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
2227{
2228 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2229 struct hns3_nic_priv *priv = netdev_priv(netdev);
2230 struct netdev_queue *dev_queue;
2231 int bytes, pkts;
2232 int head;
2233
2234 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2235 rmb();
2236
2237 if (is_ring_empty(ring) || head == ring->next_to_clean)
2238 return;
2239
2240 if (unlikely(!is_valid_clean_head(ring, head))) {
2241 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2242 ring->next_to_use, ring->next_to_clean);
2243
2244 u64_stats_update_begin(&ring->syncp);
2245 ring->stats.io_err_cnt++;
2246 u64_stats_update_end(&ring->syncp);
2247 return;
2248 }
2249
2250 bytes = 0;
2251 pkts = 0;
2252 hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
2253
2254 ring->tqp_vector->tx_group.total_bytes += bytes;
2255 ring->tqp_vector->tx_group.total_packets += pkts;
2256
2257 u64_stats_update_begin(&ring->syncp);
2258 ring->stats.tx_bytes += bytes;
2259 ring->stats.tx_pkts += pkts;
2260 u64_stats_update_end(&ring->syncp);
2261
2262 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2263 netdev_tx_completed_queue(dev_queue, pkts, bytes);
2264
2265 if (unlikely(pkts && netif_carrier_ok(netdev) &&
2266 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2267
2268
2269
2270 smp_mb();
2271 if (netif_tx_queue_stopped(dev_queue) &&
2272 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2273 netif_tx_wake_queue(dev_queue);
2274 ring->stats.restart_queue++;
2275 }
2276 }
2277}
2278
2279static int hns3_desc_unused(struct hns3_enet_ring *ring)
2280{
2281 int ntc = ring->next_to_clean;
2282 int ntu = ring->next_to_use;
2283
2284 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2285}
2286
2287static void
2288hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2289{
2290 struct hns3_desc_cb *desc_cb;
2291 struct hns3_desc_cb res_cbs;
2292 int i, ret;
2293
2294 for (i = 0; i < cleand_count; i++) {
2295 desc_cb = &ring->desc_cb[ring->next_to_use];
2296 if (desc_cb->reuse_flag) {
2297 u64_stats_update_begin(&ring->syncp);
2298 ring->stats.reuse_pg_cnt++;
2299 u64_stats_update_end(&ring->syncp);
2300
2301 hns3_reuse_buffer(ring, ring->next_to_use);
2302 } else {
2303 ret = hns3_reserve_buffer_map(ring, &res_cbs);
2304 if (ret) {
2305 u64_stats_update_begin(&ring->syncp);
2306 ring->stats.sw_err_cnt++;
2307 u64_stats_update_end(&ring->syncp);
2308
2309 netdev_err(ring->tqp->handle->kinfo.netdev,
2310 "hnae reserve buffer map failed.\n");
2311 break;
2312 }
2313 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2314
2315 u64_stats_update_begin(&ring->syncp);
2316 ring->stats.non_reuse_pg++;
2317 u64_stats_update_end(&ring->syncp);
2318 }
2319
2320 ring_ptr_move_fw(ring, next_to_use);
2321 }
2322
2323 wmb();
2324 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2325}
2326
2327static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2328 struct hns3_enet_ring *ring, int pull_len,
2329 struct hns3_desc_cb *desc_cb)
2330{
2331 struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
2332 int size = le16_to_cpu(desc->rx.size);
2333 u32 truesize = hnae3_buf_size(ring);
2334
2335 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2336 size - pull_len, truesize);
2337
2338
2339
2340
2341 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
2342 (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
2343 return;
2344
2345
2346 desc_cb->page_offset += truesize;
2347
2348 if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
2349 desc_cb->reuse_flag = 1;
2350
2351 get_page(desc_cb->priv);
2352 } else if (page_count(desc_cb->priv) == 1) {
2353 desc_cb->reuse_flag = 1;
2354 desc_cb->page_offset = 0;
2355 get_page(desc_cb->priv);
2356 }
2357}
2358
2359static int hns3_gro_complete(struct sk_buff *skb)
2360{
2361 __be16 type = skb->protocol;
2362 struct tcphdr *th;
2363 int depth = 0;
2364
2365 while (type == htons(ETH_P_8021Q)) {
2366 struct vlan_hdr *vh;
2367
2368 if ((depth + VLAN_HLEN) > skb_headlen(skb))
2369 return -EFAULT;
2370
2371 vh = (struct vlan_hdr *)(skb->data + depth);
2372 type = vh->h_vlan_encapsulated_proto;
2373 depth += VLAN_HLEN;
2374 }
2375
2376 if (type == htons(ETH_P_IP)) {
2377 depth += sizeof(struct iphdr);
2378 } else if (type == htons(ETH_P_IPV6)) {
2379 depth += sizeof(struct ipv6hdr);
2380 } else {
2381 netdev_err(skb->dev,
2382 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
2383 be16_to_cpu(type), depth);
2384 return -EFAULT;
2385 }
2386
2387 th = (struct tcphdr *)(skb->data + depth);
2388 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2389 if (th->cwr)
2390 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
2391
2392 skb->ip_summed = CHECKSUM_UNNECESSARY;
2393
2394 return 0;
2395}
2396
2397static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2398 u32 l234info, u32 bd_base_info, u32 ol_info)
2399{
2400 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2401 int l3_type, l4_type;
2402 int ol4_type;
2403
2404 skb->ip_summed = CHECKSUM_NONE;
2405
2406 skb_checksum_none_assert(skb);
2407
2408 if (!(netdev->features & NETIF_F_RXCSUM))
2409 return;
2410
2411
2412 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2413 return;
2414
2415 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
2416 BIT(HNS3_RXD_OL3E_B) |
2417 BIT(HNS3_RXD_OL4E_B)))) {
2418 u64_stats_update_begin(&ring->syncp);
2419 ring->stats.l3l4_csum_err++;
2420 u64_stats_update_end(&ring->syncp);
2421
2422 return;
2423 }
2424
2425 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
2426 HNS3_RXD_OL4ID_S);
2427 switch (ol4_type) {
2428 case HNS3_OL4_TYPE_MAC_IN_UDP:
2429 case HNS3_OL4_TYPE_NVGRE:
2430 skb->csum_level = 1;
2431
2432 case HNS3_OL4_TYPE_NO_TUN:
2433 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2434 HNS3_RXD_L3ID_S);
2435 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2436 HNS3_RXD_L4ID_S);
2437
2438
2439 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2440 l3_type == HNS3_L3_TYPE_IPV6) &&
2441 (l4_type == HNS3_L4_TYPE_UDP ||
2442 l4_type == HNS3_L4_TYPE_TCP ||
2443 l4_type == HNS3_L4_TYPE_SCTP))
2444 skb->ip_summed = CHECKSUM_UNNECESSARY;
2445 break;
2446 default:
2447 break;
2448 }
2449}
2450
2451static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2452{
2453 if (skb_has_frag_list(skb))
2454 napi_gro_flush(&ring->tqp_vector->napi, false);
2455
2456 napi_gro_receive(&ring->tqp_vector->napi, skb);
2457}
2458
2459static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2460 struct hns3_desc *desc, u32 l234info,
2461 u16 *vlan_tag)
2462{
2463 struct hnae3_handle *handle = ring->tqp->handle;
2464 struct pci_dev *pdev = ring->tqp->handle->pdev;
2465
2466 if (pdev->revision == 0x20) {
2467 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2468 if (!(*vlan_tag & VLAN_VID_MASK))
2469 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2470
2471 return (*vlan_tag != 0);
2472 }
2473
2474#define HNS3_STRP_OUTER_VLAN 0x1
2475#define HNS3_STRP_INNER_VLAN 0x2
2476#define HNS3_STRP_BOTH 0x3
2477
2478
2479
2480
2481
2482 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2483 HNS3_RXD_STRP_TAGP_S)) {
2484 case HNS3_STRP_OUTER_VLAN:
2485 if (handle->port_base_vlan_state !=
2486 HNAE3_PORT_BASE_VLAN_DISABLE)
2487 return false;
2488
2489 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2490 return true;
2491 case HNS3_STRP_INNER_VLAN:
2492 if (handle->port_base_vlan_state !=
2493 HNAE3_PORT_BASE_VLAN_DISABLE)
2494 return false;
2495
2496 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2497 return true;
2498 case HNS3_STRP_BOTH:
2499 if (handle->port_base_vlan_state ==
2500 HNAE3_PORT_BASE_VLAN_DISABLE)
2501 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2502 else
2503 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2504
2505 return true;
2506 default:
2507 return false;
2508 }
2509}
2510
2511static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
2512 unsigned char *va)
2513{
2514#define HNS3_NEED_ADD_FRAG 1
2515 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2516 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2517 struct sk_buff *skb;
2518
2519 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
2520 skb = ring->skb;
2521 if (unlikely(!skb)) {
2522 netdev_err(netdev, "alloc rx skb fail\n");
2523
2524 u64_stats_update_begin(&ring->syncp);
2525 ring->stats.sw_err_cnt++;
2526 u64_stats_update_end(&ring->syncp);
2527
2528 return -ENOMEM;
2529 }
2530
2531 prefetchw(skb->data);
2532
2533 ring->pending_buf = 1;
2534 ring->frag_num = 0;
2535 ring->tail_skb = NULL;
2536 if (length <= HNS3_RX_HEAD_SIZE) {
2537 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2538
2539
2540 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2541 desc_cb->reuse_flag = 1;
2542 else
2543 put_page(desc_cb->priv);
2544
2545 ring_ptr_move_fw(ring, next_to_clean);
2546 return 0;
2547 }
2548 u64_stats_update_begin(&ring->syncp);
2549 ring->stats.seg_pkt_cnt++;
2550 u64_stats_update_end(&ring->syncp);
2551
2552 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
2553 __skb_put(skb, ring->pull_len);
2554 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
2555 desc_cb);
2556 ring_ptr_move_fw(ring, next_to_clean);
2557
2558 return HNS3_NEED_ADD_FRAG;
2559}
2560
2561static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
2562 struct sk_buff **out_skb, bool pending)
2563{
2564 struct sk_buff *skb = *out_skb;
2565 struct sk_buff *head_skb = *out_skb;
2566 struct sk_buff *new_skb;
2567 struct hns3_desc_cb *desc_cb;
2568 struct hns3_desc *pre_desc;
2569 u32 bd_base_info;
2570 int pre_bd;
2571
2572
2573
2574
2575 if (pending) {
2576 pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
2577 ring->desc_num;
2578 pre_desc = &ring->desc[pre_bd];
2579 bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
2580 } else {
2581 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2582 }
2583
2584 while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2585 desc = &ring->desc[ring->next_to_clean];
2586 desc_cb = &ring->desc_cb[ring->next_to_clean];
2587 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2588
2589 dma_rmb();
2590 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
2591 return -ENXIO;
2592
2593 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
2594 new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2595 HNS3_RX_HEAD_SIZE);
2596 if (unlikely(!new_skb)) {
2597 netdev_err(ring->tqp->handle->kinfo.netdev,
2598 "alloc rx skb frag fail\n");
2599 return -ENXIO;
2600 }
2601 ring->frag_num = 0;
2602
2603 if (ring->tail_skb) {
2604 ring->tail_skb->next = new_skb;
2605 ring->tail_skb = new_skb;
2606 } else {
2607 skb_shinfo(skb)->frag_list = new_skb;
2608 ring->tail_skb = new_skb;
2609 }
2610 }
2611
2612 if (ring->tail_skb) {
2613 head_skb->truesize += hnae3_buf_size(ring);
2614 head_skb->data_len += le16_to_cpu(desc->rx.size);
2615 head_skb->len += le16_to_cpu(desc->rx.size);
2616 skb = ring->tail_skb;
2617 }
2618
2619 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
2620 ring_ptr_move_fw(ring, next_to_clean);
2621 ring->pending_buf++;
2622 }
2623
2624 return 0;
2625}
2626
2627static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
2628 struct sk_buff *skb, u32 l234info,
2629 u32 bd_base_info, u32 ol_info)
2630{
2631 u16 gro_count;
2632 u32 l3_type;
2633
2634 gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
2635 HNS3_RXD_GRO_COUNT_S);
2636
2637 if (!gro_count) {
2638 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
2639 return 0;
2640 }
2641
2642 NAPI_GRO_CB(skb)->count = gro_count;
2643
2644 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2645 HNS3_RXD_L3ID_S);
2646 if (l3_type == HNS3_L3_TYPE_IPV4)
2647 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2648 else if (l3_type == HNS3_L3_TYPE_IPV6)
2649 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2650 else
2651 return -EFAULT;
2652
2653 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
2654 HNS3_RXD_GRO_SIZE_M,
2655 HNS3_RXD_GRO_SIZE_S);
2656
2657 return hns3_gro_complete(skb);
2658}
2659
2660static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2661 struct sk_buff *skb, u32 rss_hash)
2662{
2663 struct hnae3_handle *handle = ring->tqp->handle;
2664 enum pkt_hash_types rss_type;
2665
2666 if (rss_hash)
2667 rss_type = handle->kinfo.rss_type;
2668 else
2669 rss_type = PKT_HASH_TYPE_NONE;
2670
2671 skb_set_hash(skb, rss_hash, rss_type);
2672}
2673
2674static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
2675{
2676 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2677 enum hns3_pkt_l2t_type l2_frame_type;
2678 u32 bd_base_info, l234info, ol_info;
2679 struct hns3_desc *desc;
2680 unsigned int len;
2681 int pre_ntc, ret;
2682
2683
2684
2685
2686
2687 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
2688 (ring->desc_num - 1);
2689 desc = &ring->desc[pre_ntc];
2690 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2691 l234info = le32_to_cpu(desc->rx.l234_info);
2692 ol_info = le32_to_cpu(desc->rx.ol_info);
2693
2694
2695
2696
2697
2698 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2699 u16 vlan_tag;
2700
2701 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2702 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2703 vlan_tag);
2704 }
2705
2706 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
2707 u64_stats_update_begin(&ring->syncp);
2708 ring->stats.non_vld_descs++;
2709 u64_stats_update_end(&ring->syncp);
2710
2711 return -EINVAL;
2712 }
2713
2714 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
2715 BIT(HNS3_RXD_L2E_B))))) {
2716 u64_stats_update_begin(&ring->syncp);
2717 if (l234info & BIT(HNS3_RXD_L2E_B))
2718 ring->stats.l2_err++;
2719 else
2720 ring->stats.err_pkt_len++;
2721 u64_stats_update_end(&ring->syncp);
2722
2723 return -EFAULT;
2724 }
2725
2726 len = skb->len;
2727
2728
2729 skb->protocol = eth_type_trans(skb, netdev);
2730
2731
2732 ret = hns3_set_gro_and_checksum(ring, skb, l234info,
2733 bd_base_info, ol_info);
2734 if (unlikely(ret)) {
2735 u64_stats_update_begin(&ring->syncp);
2736 ring->stats.rx_err_cnt++;
2737 u64_stats_update_end(&ring->syncp);
2738 return ret;
2739 }
2740
2741 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
2742 HNS3_RXD_DMAC_S);
2743
2744 u64_stats_update_begin(&ring->syncp);
2745 ring->stats.rx_pkts++;
2746 ring->stats.rx_bytes += len;
2747
2748 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
2749 ring->stats.rx_multicast++;
2750
2751 u64_stats_update_end(&ring->syncp);
2752
2753 ring->tqp_vector->rx_group.total_bytes += len;
2754
2755 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
2756 return 0;
2757}
2758
2759static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2760 struct sk_buff **out_skb)
2761{
2762 struct sk_buff *skb = ring->skb;
2763 struct hns3_desc_cb *desc_cb;
2764 struct hns3_desc *desc;
2765 u32 bd_base_info;
2766 int length;
2767 int ret;
2768
2769 desc = &ring->desc[ring->next_to_clean];
2770 desc_cb = &ring->desc_cb[ring->next_to_clean];
2771
2772 prefetch(desc);
2773
2774 length = le16_to_cpu(desc->rx.size);
2775 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2776
2777
2778 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2779 return -ENXIO;
2780
2781 if (!skb)
2782 ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2783
2784
2785
2786
2787
2788
2789
2790
2791 prefetch(ring->va);
2792#if L1_CACHE_BYTES < 128
2793 prefetch(ring->va + L1_CACHE_BYTES);
2794#endif
2795
2796 if (!skb) {
2797 ret = hns3_alloc_skb(ring, length, ring->va);
2798 *out_skb = skb = ring->skb;
2799
2800 if (ret < 0)
2801 return ret;
2802 if (ret > 0) {
2803 ret = hns3_add_frag(ring, desc, &skb, false);
2804 if (ret)
2805 return ret;
2806
2807
2808
2809
2810 memcpy(skb->data, ring->va,
2811 ALIGN(ring->pull_len, sizeof(long)));
2812 }
2813 } else {
2814 ret = hns3_add_frag(ring, desc, &skb, true);
2815 if (ret)
2816 return ret;
2817
2818
2819
2820
2821 memcpy(skb->data, ring->va,
2822 ALIGN(ring->pull_len, sizeof(long)));
2823 }
2824
2825 ret = hns3_handle_bdinfo(ring, skb);
2826 if (unlikely(ret)) {
2827 dev_kfree_skb_any(skb);
2828 return ret;
2829 }
2830
2831 *out_skb = skb;
2832
2833 return 0;
2834}
2835
2836int hns3_clean_rx_ring(
2837 struct hns3_enet_ring *ring, int budget,
2838 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2839{
2840#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2841 int recv_pkts, recv_bds, clean_count, err;
2842 int unused_count = hns3_desc_unused(ring);
2843 struct sk_buff *skb = ring->skb;
2844 int num;
2845
2846 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2847 rmb();
2848
2849 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2850 num -= unused_count;
2851 unused_count -= ring->pending_buf;
2852
2853 while (recv_pkts < budget && recv_bds < num) {
2854
2855 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2856 hns3_nic_alloc_rx_buffers(ring,
2857 clean_count + unused_count);
2858 clean_count = 0;
2859 unused_count = hns3_desc_unused(ring) -
2860 ring->pending_buf;
2861 }
2862
2863
2864 err = hns3_handle_rx_bd(ring, &skb);
2865 if (unlikely(!skb))
2866 goto out;
2867
2868 if (err == -ENXIO) {
2869 goto out;
2870 } else if (unlikely(err)) {
2871 recv_bds += ring->pending_buf;
2872 clean_count += ring->pending_buf;
2873 ring->skb = NULL;
2874 ring->pending_buf = 0;
2875 continue;
2876 }
2877
2878 rx_fn(ring, skb);
2879 recv_bds += ring->pending_buf;
2880 clean_count += ring->pending_buf;
2881 ring->skb = NULL;
2882 ring->pending_buf = 0;
2883
2884 recv_pkts++;
2885 }
2886
2887out:
2888
2889 if (clean_count + unused_count > 0)
2890 hns3_nic_alloc_rx_buffers(ring,
2891 clean_count + unused_count);
2892
2893 return recv_pkts;
2894}
2895
2896static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2897{
2898 struct hns3_enet_tqp_vector *tqp_vector =
2899 ring_group->ring->tqp_vector;
2900 enum hns3_flow_level_range new_flow_level;
2901 int packets_per_msecs;
2902 int bytes_per_msecs;
2903 u32 time_passed_ms;
2904 u16 new_int_gl;
2905
2906 if (!tqp_vector->last_jiffies)
2907 return false;
2908
2909 if (ring_group->total_packets == 0) {
2910 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2911 ring_group->coal.flow_level = HNS3_FLOW_LOW;
2912 return true;
2913 }
2914
2915
2916
2917
2918
2919
2920
2921 new_flow_level = ring_group->coal.flow_level;
2922 new_int_gl = ring_group->coal.int_gl;
2923 time_passed_ms =
2924 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2925
2926 if (!time_passed_ms)
2927 return false;
2928
2929 do_div(ring_group->total_packets, time_passed_ms);
2930 packets_per_msecs = ring_group->total_packets;
2931
2932 do_div(ring_group->total_bytes, time_passed_ms);
2933 bytes_per_msecs = ring_group->total_bytes;
2934
2935#define HNS3_RX_LOW_BYTE_RATE 10000
2936#define HNS3_RX_MID_BYTE_RATE 20000
2937
2938 switch (new_flow_level) {
2939 case HNS3_FLOW_LOW:
2940 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2941 new_flow_level = HNS3_FLOW_MID;
2942 break;
2943 case HNS3_FLOW_MID:
2944 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2945 new_flow_level = HNS3_FLOW_HIGH;
2946 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2947 new_flow_level = HNS3_FLOW_LOW;
2948 break;
2949 case HNS3_FLOW_HIGH:
2950 case HNS3_FLOW_ULTRA:
2951 default:
2952 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2953 new_flow_level = HNS3_FLOW_MID;
2954 break;
2955 }
2956
2957#define HNS3_RX_ULTRA_PACKET_RATE 40
2958
2959 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2960 &tqp_vector->rx_group == ring_group)
2961 new_flow_level = HNS3_FLOW_ULTRA;
2962
2963 switch (new_flow_level) {
2964 case HNS3_FLOW_LOW:
2965 new_int_gl = HNS3_INT_GL_50K;
2966 break;
2967 case HNS3_FLOW_MID:
2968 new_int_gl = HNS3_INT_GL_20K;
2969 break;
2970 case HNS3_FLOW_HIGH:
2971 new_int_gl = HNS3_INT_GL_18K;
2972 break;
2973 case HNS3_FLOW_ULTRA:
2974 new_int_gl = HNS3_INT_GL_8K;
2975 break;
2976 default:
2977 break;
2978 }
2979
2980 ring_group->total_bytes = 0;
2981 ring_group->total_packets = 0;
2982 ring_group->coal.flow_level = new_flow_level;
2983 if (new_int_gl != ring_group->coal.int_gl) {
2984 ring_group->coal.int_gl = new_int_gl;
2985 return true;
2986 }
2987 return false;
2988}
2989
2990static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2991{
2992 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2993 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2994 bool rx_update, tx_update;
2995
2996
2997 if (time_before(jiffies,
2998 tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
2999 return;
3000
3001 if (rx_group->coal.gl_adapt_enable) {
3002 rx_update = hns3_get_new_int_gl(rx_group);
3003 if (rx_update)
3004 hns3_set_vector_coalesce_rx_gl(tqp_vector,
3005 rx_group->coal.int_gl);
3006 }
3007
3008 if (tx_group->coal.gl_adapt_enable) {
3009 tx_update = hns3_get_new_int_gl(tx_group);
3010 if (tx_update)
3011 hns3_set_vector_coalesce_tx_gl(tqp_vector,
3012 tx_group->coal.int_gl);
3013 }
3014
3015 tqp_vector->last_jiffies = jiffies;
3016}
3017
3018static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
3019{
3020 struct hns3_nic_priv *priv = netdev_priv(napi->dev);
3021 struct hns3_enet_ring *ring;
3022 int rx_pkt_total = 0;
3023
3024 struct hns3_enet_tqp_vector *tqp_vector =
3025 container_of(napi, struct hns3_enet_tqp_vector, napi);
3026 bool clean_complete = true;
3027 int rx_budget = budget;
3028
3029 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3030 napi_complete(napi);
3031 return 0;
3032 }
3033
3034
3035
3036
3037 hns3_for_each_ring(ring, tqp_vector->tx_group)
3038 hns3_clean_tx_ring(ring);
3039
3040
3041 if (tqp_vector->num_tqps > 1)
3042 rx_budget = max(budget / tqp_vector->num_tqps, 1);
3043
3044 hns3_for_each_ring(ring, tqp_vector->rx_group) {
3045 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
3046 hns3_rx_skb);
3047
3048 if (rx_cleaned >= rx_budget)
3049 clean_complete = false;
3050
3051 rx_pkt_total += rx_cleaned;
3052 }
3053
3054 tqp_vector->rx_group.total_packets += rx_pkt_total;
3055
3056 if (!clean_complete)
3057 return budget;
3058
3059 if (napi_complete(napi) &&
3060 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3061 hns3_update_new_int_gl(tqp_vector);
3062 hns3_mask_vector_irq(tqp_vector, 1);
3063 }
3064
3065 return rx_pkt_total;
3066}
3067
3068static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3069 struct hnae3_ring_chain_node *head)
3070{
3071 struct pci_dev *pdev = tqp_vector->handle->pdev;
3072 struct hnae3_ring_chain_node *cur_chain = head;
3073 struct hnae3_ring_chain_node *chain;
3074 struct hns3_enet_ring *tx_ring;
3075 struct hns3_enet_ring *rx_ring;
3076
3077 tx_ring = tqp_vector->tx_group.ring;
3078 if (tx_ring) {
3079 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
3080 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3081 HNAE3_RING_TYPE_TX);
3082 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3083 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
3084
3085 cur_chain->next = NULL;
3086
3087 while (tx_ring->next) {
3088 tx_ring = tx_ring->next;
3089
3090 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
3091 GFP_KERNEL);
3092 if (!chain)
3093 goto err_free_chain;
3094
3095 cur_chain->next = chain;
3096 chain->tqp_index = tx_ring->tqp->tqp_index;
3097 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3098 HNAE3_RING_TYPE_TX);
3099 hnae3_set_field(chain->int_gl_idx,
3100 HNAE3_RING_GL_IDX_M,
3101 HNAE3_RING_GL_IDX_S,
3102 HNAE3_RING_GL_TX);
3103
3104 cur_chain = chain;
3105 }
3106 }
3107
3108 rx_ring = tqp_vector->rx_group.ring;
3109 if (!tx_ring && rx_ring) {
3110 cur_chain->next = NULL;
3111 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
3112 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3113 HNAE3_RING_TYPE_RX);
3114 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3115 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3116
3117 rx_ring = rx_ring->next;
3118 }
3119
3120 while (rx_ring) {
3121 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
3122 if (!chain)
3123 goto err_free_chain;
3124
3125 cur_chain->next = chain;
3126 chain->tqp_index = rx_ring->tqp->tqp_index;
3127 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3128 HNAE3_RING_TYPE_RX);
3129 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3130 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3131
3132 cur_chain = chain;
3133
3134 rx_ring = rx_ring->next;
3135 }
3136
3137 return 0;
3138
3139err_free_chain:
3140 cur_chain = head->next;
3141 while (cur_chain) {
3142 chain = cur_chain->next;
3143 devm_kfree(&pdev->dev, cur_chain);
3144 cur_chain = chain;
3145 }
3146 head->next = NULL;
3147
3148 return -ENOMEM;
3149}
3150
3151static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3152 struct hnae3_ring_chain_node *head)
3153{
3154 struct pci_dev *pdev = tqp_vector->handle->pdev;
3155 struct hnae3_ring_chain_node *chain_tmp, *chain;
3156
3157 chain = head->next;
3158
3159 while (chain) {
3160 chain_tmp = chain->next;
3161 devm_kfree(&pdev->dev, chain);
3162 chain = chain_tmp;
3163 }
3164}
3165
3166static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
3167 struct hns3_enet_ring *ring)
3168{
3169 ring->next = group->ring;
3170 group->ring = ring;
3171
3172 group->count++;
3173}
3174
3175static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
3176{
3177 struct pci_dev *pdev = priv->ae_handle->pdev;
3178 struct hns3_enet_tqp_vector *tqp_vector;
3179 int num_vectors = priv->vector_num;
3180 int numa_node;
3181 int vector_i;
3182
3183 numa_node = dev_to_node(&pdev->dev);
3184
3185 for (vector_i = 0; vector_i < num_vectors; vector_i++) {
3186 tqp_vector = &priv->tqp_vector[vector_i];
3187 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
3188 &tqp_vector->affinity_mask);
3189 }
3190}
3191
3192static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
3193{
3194 struct hnae3_ring_chain_node vector_ring_chain;
3195 struct hnae3_handle *h = priv->ae_handle;
3196 struct hns3_enet_tqp_vector *tqp_vector;
3197 int ret = 0;
3198 int i;
3199
3200 hns3_nic_set_cpumask(priv);
3201
3202 for (i = 0; i < priv->vector_num; i++) {
3203 tqp_vector = &priv->tqp_vector[i];
3204 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
3205 tqp_vector->num_tqps = 0;
3206 }
3207
3208 for (i = 0; i < h->kinfo.num_tqps; i++) {
3209 u16 vector_i = i % priv->vector_num;
3210 u16 tqp_num = h->kinfo.num_tqps;
3211
3212 tqp_vector = &priv->tqp_vector[vector_i];
3213
3214 hns3_add_ring_to_group(&tqp_vector->tx_group,
3215 priv->ring_data[i].ring);
3216
3217 hns3_add_ring_to_group(&tqp_vector->rx_group,
3218 priv->ring_data[i + tqp_num].ring);
3219
3220 priv->ring_data[i].ring->tqp_vector = tqp_vector;
3221 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
3222 tqp_vector->num_tqps++;
3223 }
3224
3225 for (i = 0; i < priv->vector_num; i++) {
3226 tqp_vector = &priv->tqp_vector[i];
3227
3228 tqp_vector->rx_group.total_bytes = 0;
3229 tqp_vector->rx_group.total_packets = 0;
3230 tqp_vector->tx_group.total_bytes = 0;
3231 tqp_vector->tx_group.total_packets = 0;
3232 tqp_vector->handle = h;
3233
3234 ret = hns3_get_vector_ring_chain(tqp_vector,
3235 &vector_ring_chain);
3236 if (ret)
3237 goto map_ring_fail;
3238
3239 ret = h->ae_algo->ops->map_ring_to_vector(h,
3240 tqp_vector->vector_irq, &vector_ring_chain);
3241
3242 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3243
3244 if (ret)
3245 goto map_ring_fail;
3246
3247 netif_napi_add(priv->netdev, &tqp_vector->napi,
3248 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
3249 }
3250
3251 return 0;
3252
3253map_ring_fail:
3254 while (i--)
3255 netif_napi_del(&priv->tqp_vector[i].napi);
3256
3257 return ret;
3258}
3259
3260static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
3261{
3262#define HNS3_VECTOR_PF_MAX_NUM 64
3263
3264 struct hnae3_handle *h = priv->ae_handle;
3265 struct hns3_enet_tqp_vector *tqp_vector;
3266 struct hnae3_vector_info *vector;
3267 struct pci_dev *pdev = h->pdev;
3268 u16 tqp_num = h->kinfo.num_tqps;
3269 u16 vector_num;
3270 int ret = 0;
3271 u16 i;
3272
3273
3274
3275 vector_num = min_t(u16, num_online_cpus(), tqp_num);
3276 vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
3277
3278 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
3279 GFP_KERNEL);
3280 if (!vector)
3281 return -ENOMEM;
3282
3283 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
3284
3285 priv->vector_num = vector_num;
3286 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
3287 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
3288 GFP_KERNEL);
3289 if (!priv->tqp_vector) {
3290 ret = -ENOMEM;
3291 goto out;
3292 }
3293
3294 for (i = 0; i < priv->vector_num; i++) {
3295 tqp_vector = &priv->tqp_vector[i];
3296 tqp_vector->idx = i;
3297 tqp_vector->mask_addr = vector[i].io_addr;
3298 tqp_vector->vector_irq = vector[i].vector;
3299 hns3_vector_gl_rl_init(tqp_vector, priv);
3300 }
3301
3302out:
3303 devm_kfree(&pdev->dev, vector);
3304 return ret;
3305}
3306
3307static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
3308{
3309 group->ring = NULL;
3310 group->count = 0;
3311}
3312
3313static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
3314{
3315 struct hnae3_ring_chain_node vector_ring_chain;
3316 struct hnae3_handle *h = priv->ae_handle;
3317 struct hns3_enet_tqp_vector *tqp_vector;
3318 int i;
3319
3320 for (i = 0; i < priv->vector_num; i++) {
3321 tqp_vector = &priv->tqp_vector[i];
3322
3323 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
3324 continue;
3325
3326 hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
3327
3328 h->ae_algo->ops->unmap_ring_from_vector(h,
3329 tqp_vector->vector_irq, &vector_ring_chain);
3330
3331 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3332
3333 if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
3334 irq_set_affinity_notifier(tqp_vector->vector_irq,
3335 NULL);
3336 irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
3337 free_irq(tqp_vector->vector_irq, tqp_vector);
3338 tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
3339 }
3340
3341 hns3_clear_ring_group(&tqp_vector->rx_group);
3342 hns3_clear_ring_group(&tqp_vector->tx_group);
3343 netif_napi_del(&priv->tqp_vector[i].napi);
3344 }
3345}
3346
3347static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
3348{
3349 struct hnae3_handle *h = priv->ae_handle;
3350 struct pci_dev *pdev = h->pdev;
3351 int i, ret;
3352
3353 for (i = 0; i < priv->vector_num; i++) {
3354 struct hns3_enet_tqp_vector *tqp_vector;
3355
3356 tqp_vector = &priv->tqp_vector[i];
3357 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
3358 if (ret)
3359 return ret;
3360 }
3361
3362 devm_kfree(&pdev->dev, priv->tqp_vector);
3363 return 0;
3364}
3365
3366static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
3367 int ring_type)
3368{
3369 struct hns3_nic_ring_data *ring_data = priv->ring_data;
3370 int queue_num = priv->ae_handle->kinfo.num_tqps;
3371 struct pci_dev *pdev = priv->ae_handle->pdev;
3372 struct hns3_enet_ring *ring;
3373 int desc_num;
3374
3375 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
3376 if (!ring)
3377 return -ENOMEM;
3378
3379 if (ring_type == HNAE3_RING_TYPE_TX) {
3380 desc_num = priv->ae_handle->kinfo.num_tx_desc;
3381 ring_data[q->tqp_index].ring = ring;
3382 ring_data[q->tqp_index].queue_index = q->tqp_index;
3383 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
3384 } else {
3385 desc_num = priv->ae_handle->kinfo.num_rx_desc;
3386 ring_data[q->tqp_index + queue_num].ring = ring;
3387 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
3388 ring->io_base = q->io_base;
3389 }
3390
3391 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
3392
3393 ring->tqp = q;
3394 ring->desc = NULL;
3395 ring->desc_cb = NULL;
3396 ring->dev = priv->dev;
3397 ring->desc_dma_addr = 0;
3398 ring->buf_size = q->buf_size;
3399 ring->desc_num = desc_num;
3400 ring->next_to_use = 0;
3401 ring->next_to_clean = 0;
3402
3403 return 0;
3404}
3405
3406static int hns3_queue_to_ring(struct hnae3_queue *tqp,
3407 struct hns3_nic_priv *priv)
3408{
3409 int ret;
3410
3411 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
3412 if (ret)
3413 return ret;
3414
3415 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3416 if (ret) {
3417 devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
3418 return ret;
3419 }
3420
3421 return 0;
3422}
3423
3424static int hns3_get_ring_config(struct hns3_nic_priv *priv)
3425{
3426 struct hnae3_handle *h = priv->ae_handle;
3427 struct pci_dev *pdev = h->pdev;
3428 int i, ret;
3429
3430 priv->ring_data = devm_kzalloc(&pdev->dev,
3431 array3_size(h->kinfo.num_tqps,
3432 sizeof(*priv->ring_data),
3433 2),
3434 GFP_KERNEL);
3435 if (!priv->ring_data)
3436 return -ENOMEM;
3437
3438 for (i = 0; i < h->kinfo.num_tqps; i++) {
3439 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
3440 if (ret)
3441 goto err;
3442 }
3443
3444 return 0;
3445err:
3446 while (i--) {
3447 devm_kfree(priv->dev, priv->ring_data[i].ring);
3448 devm_kfree(priv->dev,
3449 priv->ring_data[i + h->kinfo.num_tqps].ring);
3450 }
3451
3452 devm_kfree(&pdev->dev, priv->ring_data);
3453 priv->ring_data = NULL;
3454 return ret;
3455}
3456
3457static void hns3_put_ring_config(struct hns3_nic_priv *priv)
3458{
3459 struct hnae3_handle *h = priv->ae_handle;
3460 int i;
3461
3462 if (!priv->ring_data)
3463 return;
3464
3465 for (i = 0; i < h->kinfo.num_tqps; i++) {
3466 devm_kfree(priv->dev, priv->ring_data[i].ring);
3467 devm_kfree(priv->dev,
3468 priv->ring_data[i + h->kinfo.num_tqps].ring);
3469 }
3470 devm_kfree(priv->dev, priv->ring_data);
3471 priv->ring_data = NULL;
3472}
3473
3474static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
3475{
3476 int ret;
3477
3478 if (ring->desc_num <= 0 || ring->buf_size <= 0)
3479 return -EINVAL;
3480
3481 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
3482 sizeof(ring->desc_cb[0]), GFP_KERNEL);
3483 if (!ring->desc_cb) {
3484 ret = -ENOMEM;
3485 goto out;
3486 }
3487
3488 ret = hns3_alloc_desc(ring);
3489 if (ret)
3490 goto out_with_desc_cb;
3491
3492 if (!HNAE3_IS_TX_RING(ring)) {
3493 ret = hns3_alloc_ring_buffers(ring);
3494 if (ret)
3495 goto out_with_desc;
3496 }
3497
3498 return 0;
3499
3500out_with_desc:
3501 hns3_free_desc(ring);
3502out_with_desc_cb:
3503 devm_kfree(ring_to_dev(ring), ring->desc_cb);
3504 ring->desc_cb = NULL;
3505out:
3506 return ret;
3507}
3508
3509static void hns3_fini_ring(struct hns3_enet_ring *ring)
3510{
3511 hns3_free_desc(ring);
3512 devm_kfree(ring_to_dev(ring), ring->desc_cb);
3513 ring->desc_cb = NULL;
3514 ring->next_to_clean = 0;
3515 ring->next_to_use = 0;
3516 ring->pending_buf = 0;
3517 if (ring->skb) {
3518 dev_kfree_skb_any(ring->skb);
3519 ring->skb = NULL;
3520 }
3521}
3522
3523static int hns3_buf_size2type(u32 buf_size)
3524{
3525 int bd_size_type;
3526
3527 switch (buf_size) {
3528 case 512:
3529 bd_size_type = HNS3_BD_SIZE_512_TYPE;
3530 break;
3531 case 1024:
3532 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3533 break;
3534 case 2048:
3535 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3536 break;
3537 case 4096:
3538 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3539 break;
3540 default:
3541 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3542 }
3543
3544 return bd_size_type;
3545}
3546
3547static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3548{
3549 dma_addr_t dma = ring->desc_dma_addr;
3550 struct hnae3_queue *q = ring->tqp;
3551
3552 if (!HNAE3_IS_TX_RING(ring)) {
3553 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3554 (u32)dma);
3555 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3556 (u32)((dma >> 31) >> 1));
3557
3558 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3559 hns3_buf_size2type(ring->buf_size));
3560 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3561 ring->desc_num / 8 - 1);
3562
3563 } else {
3564 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3565 (u32)dma);
3566 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3567 (u32)((dma >> 31) >> 1));
3568
3569 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3570 ring->desc_num / 8 - 1);
3571 }
3572}
3573
3574static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3575{
3576 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3577 int i;
3578
3579 for (i = 0; i < HNAE3_MAX_TC; i++) {
3580 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3581 int j;
3582
3583 if (!tc_info->enable)
3584 continue;
3585
3586 for (j = 0; j < tc_info->tqp_count; j++) {
3587 struct hnae3_queue *q;
3588
3589 q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3590 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3591 tc_info->tc);
3592 }
3593 }
3594}
3595
3596int hns3_init_all_ring(struct hns3_nic_priv *priv)
3597{
3598 struct hnae3_handle *h = priv->ae_handle;
3599 int ring_num = h->kinfo.num_tqps * 2;
3600 int i, j;
3601 int ret;
3602
3603 for (i = 0; i < ring_num; i++) {
3604 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3605 if (ret) {
3606 dev_err(priv->dev,
3607 "Alloc ring memory fail! ret=%d\n", ret);
3608 goto out_when_alloc_ring_memory;
3609 }
3610
3611 u64_stats_init(&priv->ring_data[i].ring->syncp);
3612 }
3613
3614 return 0;
3615
3616out_when_alloc_ring_memory:
3617 for (j = i - 1; j >= 0; j--)
3618 hns3_fini_ring(priv->ring_data[j].ring);
3619
3620 return -ENOMEM;
3621}
3622
3623int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3624{
3625 struct hnae3_handle *h = priv->ae_handle;
3626 int i;
3627
3628 for (i = 0; i < h->kinfo.num_tqps; i++) {
3629 hns3_fini_ring(priv->ring_data[i].ring);
3630 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3631 }
3632 return 0;
3633}
3634
3635
3636static int hns3_init_mac_addr(struct net_device *netdev, bool init)
3637{
3638 struct hns3_nic_priv *priv = netdev_priv(netdev);
3639 struct hnae3_handle *h = priv->ae_handle;
3640 u8 mac_addr_temp[ETH_ALEN];
3641 int ret = 0;
3642
3643 if (h->ae_algo->ops->get_mac_addr && init) {
3644 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3645 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3646 }
3647
3648
3649 if (!is_valid_ether_addr(netdev->dev_addr)) {
3650 eth_hw_addr_random(netdev);
3651 dev_warn(priv->dev, "using random MAC address %pM\n",
3652 netdev->dev_addr);
3653 }
3654
3655 if (h->ae_algo->ops->set_mac_addr)
3656 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3657
3658 return ret;
3659}
3660
3661static int hns3_init_phy(struct net_device *netdev)
3662{
3663 struct hnae3_handle *h = hns3_get_handle(netdev);
3664 int ret = 0;
3665
3666 if (h->ae_algo->ops->mac_connect_phy)
3667 ret = h->ae_algo->ops->mac_connect_phy(h);
3668
3669 return ret;
3670}
3671
3672static void hns3_uninit_phy(struct net_device *netdev)
3673{
3674 struct hnae3_handle *h = hns3_get_handle(netdev);
3675
3676 if (h->ae_algo->ops->mac_disconnect_phy)
3677 h->ae_algo->ops->mac_disconnect_phy(h);
3678}
3679
3680static int hns3_restore_fd_rules(struct net_device *netdev)
3681{
3682 struct hnae3_handle *h = hns3_get_handle(netdev);
3683 int ret = 0;
3684
3685 if (h->ae_algo->ops->restore_fd_rules)
3686 ret = h->ae_algo->ops->restore_fd_rules(h);
3687
3688 return ret;
3689}
3690
3691static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3692{
3693 struct hnae3_handle *h = hns3_get_handle(netdev);
3694
3695 if (h->ae_algo->ops->del_all_fd_entries)
3696 h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3697}
3698
3699static int hns3_client_start(struct hnae3_handle *handle)
3700{
3701 if (!handle->ae_algo->ops->client_start)
3702 return 0;
3703
3704 return handle->ae_algo->ops->client_start(handle);
3705}
3706
3707static void hns3_client_stop(struct hnae3_handle *handle)
3708{
3709 if (!handle->ae_algo->ops->client_stop)
3710 return;
3711
3712 handle->ae_algo->ops->client_stop(handle);
3713}
3714
3715static void hns3_info_show(struct hns3_nic_priv *priv)
3716{
3717 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3718
3719 dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
3720 dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
3721 dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
3722 dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
3723 dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
3724 dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
3725 dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
3726 dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
3727 dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
3728}
3729
3730static int hns3_client_init(struct hnae3_handle *handle)
3731{
3732 struct pci_dev *pdev = handle->pdev;
3733 u16 alloc_tqps, max_rss_size;
3734 struct hns3_nic_priv *priv;
3735 struct net_device *netdev;
3736 int ret;
3737
3738 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3739 &max_rss_size);
3740 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3741 if (!netdev)
3742 return -ENOMEM;
3743
3744 priv = netdev_priv(netdev);
3745 priv->dev = &pdev->dev;
3746 priv->netdev = netdev;
3747 priv->ae_handle = handle;
3748 priv->tx_timeout_count = 0;
3749 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
3750
3751 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
3752
3753 handle->kinfo.netdev = netdev;
3754 handle->priv = (void *)priv;
3755
3756 hns3_init_mac_addr(netdev, true);
3757
3758 hns3_set_default_feature(netdev);
3759
3760 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3761 netdev->priv_flags |= IFF_UNICAST_FLT;
3762 netdev->netdev_ops = &hns3_nic_netdev_ops;
3763 SET_NETDEV_DEV(netdev, &pdev->dev);
3764 hns3_ethtool_set_ops(netdev);
3765
3766
3767 netif_carrier_off(netdev);
3768
3769 ret = hns3_get_ring_config(priv);
3770 if (ret) {
3771 ret = -ENOMEM;
3772 goto out_get_ring_cfg;
3773 }
3774
3775 ret = hns3_nic_alloc_vector_data(priv);
3776 if (ret) {
3777 ret = -ENOMEM;
3778 goto out_alloc_vector_data;
3779 }
3780
3781 ret = hns3_nic_init_vector_data(priv);
3782 if (ret) {
3783 ret = -ENOMEM;
3784 goto out_init_vector_data;
3785 }
3786
3787 ret = hns3_init_all_ring(priv);
3788 if (ret) {
3789 ret = -ENOMEM;
3790 goto out_init_ring_data;
3791 }
3792
3793 ret = hns3_init_phy(netdev);
3794 if (ret)
3795 goto out_init_phy;
3796
3797 ret = register_netdev(netdev);
3798 if (ret) {
3799 dev_err(priv->dev, "probe register netdev fail!\n");
3800 goto out_reg_netdev_fail;
3801 }
3802
3803 ret = hns3_client_start(handle);
3804 if (ret) {
3805 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
3806 goto out_client_start;
3807 }
3808
3809 hns3_dcbnl_setup(handle);
3810
3811 hns3_dbg_init(handle);
3812
3813
3814 netdev->max_mtu = HNS3_MAX_MTU;
3815
3816 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
3817
3818 if (netif_msg_drv(handle))
3819 hns3_info_show(priv);
3820
3821 return ret;
3822
3823out_client_start:
3824 unregister_netdev(netdev);
3825out_reg_netdev_fail:
3826 hns3_uninit_phy(netdev);
3827out_init_phy:
3828 hns3_uninit_all_ring(priv);
3829out_init_ring_data:
3830 hns3_nic_uninit_vector_data(priv);
3831out_init_vector_data:
3832 hns3_nic_dealloc_vector_data(priv);
3833out_alloc_vector_data:
3834 priv->ring_data = NULL;
3835out_get_ring_cfg:
3836 priv->ae_handle = NULL;
3837 free_netdev(netdev);
3838 return ret;
3839}
3840
3841static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3842{
3843 struct net_device *netdev = handle->kinfo.netdev;
3844 struct hns3_nic_priv *priv = netdev_priv(netdev);
3845 int ret;
3846
3847 hns3_remove_hw_addr(netdev);
3848
3849 if (netdev->reg_state != NETREG_UNINITIALIZED)
3850 unregister_netdev(netdev);
3851
3852 hns3_client_stop(handle);
3853
3854 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
3855 netdev_warn(netdev, "already uninitialized\n");
3856 goto out_netdev_free;
3857 }
3858
3859 hns3_del_all_fd_rules(netdev, true);
3860
3861 hns3_force_clear_all_rx_ring(handle);
3862
3863 hns3_uninit_phy(netdev);
3864
3865 hns3_nic_uninit_vector_data(priv);
3866
3867 ret = hns3_nic_dealloc_vector_data(priv);
3868 if (ret)
3869 netdev_err(netdev, "dealloc vector error\n");
3870
3871 ret = hns3_uninit_all_ring(priv);
3872 if (ret)
3873 netdev_err(netdev, "uninit ring error\n");
3874
3875 hns3_put_ring_config(priv);
3876
3877 hns3_dbg_uninit(handle);
3878
3879out_netdev_free:
3880 free_netdev(netdev);
3881}
3882
3883static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3884{
3885 struct net_device *netdev = handle->kinfo.netdev;
3886
3887 if (!netdev)
3888 return;
3889
3890 if (linkup) {
3891 netif_carrier_on(netdev);
3892 netif_tx_wake_all_queues(netdev);
3893 if (netif_msg_link(handle))
3894 netdev_info(netdev, "link up\n");
3895 } else {
3896 netif_carrier_off(netdev);
3897 netif_tx_stop_all_queues(netdev);
3898 if (netif_msg_link(handle))
3899 netdev_info(netdev, "link down\n");
3900 }
3901}
3902
3903static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3904{
3905 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3906 struct net_device *ndev = kinfo->netdev;
3907
3908 if (tc > HNAE3_MAX_TC)
3909 return -EINVAL;
3910
3911 if (!ndev)
3912 return -ENODEV;
3913
3914 return hns3_nic_set_real_num_queue(ndev);
3915}
3916
3917static int hns3_recover_hw_addr(struct net_device *ndev)
3918{
3919 struct netdev_hw_addr_list *list;
3920 struct netdev_hw_addr *ha, *tmp;
3921 int ret = 0;
3922
3923 netif_addr_lock_bh(ndev);
3924
3925 list = &ndev->uc;
3926 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3927 ret = hns3_nic_uc_sync(ndev, ha->addr);
3928 if (ret)
3929 goto out;
3930 }
3931
3932
3933 list = &ndev->mc;
3934 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3935 ret = hns3_nic_mc_sync(ndev, ha->addr);
3936 if (ret)
3937 goto out;
3938 }
3939
3940out:
3941 netif_addr_unlock_bh(ndev);
3942 return ret;
3943}
3944
3945static void hns3_remove_hw_addr(struct net_device *netdev)
3946{
3947 struct netdev_hw_addr_list *list;
3948 struct netdev_hw_addr *ha, *tmp;
3949
3950 hns3_nic_uc_unsync(netdev, netdev->dev_addr);
3951
3952 netif_addr_lock_bh(netdev);
3953
3954 list = &netdev->uc;
3955 list_for_each_entry_safe(ha, tmp, &list->list, list)
3956 hns3_nic_uc_unsync(netdev, ha->addr);
3957
3958
3959 list = &netdev->mc;
3960 list_for_each_entry_safe(ha, tmp, &list->list, list)
3961 if (ha->refcount > 1)
3962 hns3_nic_mc_unsync(netdev, ha->addr);
3963
3964 netif_addr_unlock_bh(netdev);
3965}
3966
3967static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
3968{
3969 while (ring->next_to_clean != ring->next_to_use) {
3970 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
3971 hns3_free_buffer_detach(ring, ring->next_to_clean);
3972 ring_ptr_move_fw(ring, next_to_clean);
3973 }
3974}
3975
3976static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3977{
3978 struct hns3_desc_cb res_cbs;
3979 int ret;
3980
3981 while (ring->next_to_use != ring->next_to_clean) {
3982
3983
3984
3985
3986 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3987 ret = hns3_reserve_buffer_map(ring, &res_cbs);
3988 if (ret) {
3989 u64_stats_update_begin(&ring->syncp);
3990 ring->stats.sw_err_cnt++;
3991 u64_stats_update_end(&ring->syncp);
3992
3993
3994
3995 netdev_warn(ring->tqp->handle->kinfo.netdev,
3996 "reserve buffer map failed, ret = %d\n",
3997 ret);
3998 return ret;
3999 }
4000 hns3_replace_buffer(ring, ring->next_to_use,
4001 &res_cbs);
4002 }
4003 ring_ptr_move_fw(ring, next_to_use);
4004 }
4005
4006
4007 if (ring->skb) {
4008 dev_kfree_skb_any(ring->skb);
4009 ring->skb = NULL;
4010 ring->pending_buf = 0;
4011 }
4012
4013 return 0;
4014}
4015
4016static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
4017{
4018 while (ring->next_to_use != ring->next_to_clean) {
4019
4020
4021
4022
4023 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4024 hns3_unmap_buffer(ring,
4025 &ring->desc_cb[ring->next_to_use]);
4026 ring->desc_cb[ring->next_to_use].dma = 0;
4027 }
4028
4029 ring_ptr_move_fw(ring, next_to_use);
4030 }
4031}
4032
4033static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
4034{
4035 struct net_device *ndev = h->kinfo.netdev;
4036 struct hns3_nic_priv *priv = netdev_priv(ndev);
4037 struct hns3_enet_ring *ring;
4038 u32 i;
4039
4040 for (i = 0; i < h->kinfo.num_tqps; i++) {
4041 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4042 hns3_force_clear_rx_ring(ring);
4043 }
4044}
4045
4046static void hns3_clear_all_ring(struct hnae3_handle *h)
4047{
4048 struct net_device *ndev = h->kinfo.netdev;
4049 struct hns3_nic_priv *priv = netdev_priv(ndev);
4050 u32 i;
4051
4052 for (i = 0; i < h->kinfo.num_tqps; i++) {
4053 struct netdev_queue *dev_queue;
4054 struct hns3_enet_ring *ring;
4055
4056 ring = priv->ring_data[i].ring;
4057 hns3_clear_tx_ring(ring);
4058 dev_queue = netdev_get_tx_queue(ndev,
4059 priv->ring_data[i].queue_index);
4060 netdev_tx_reset_queue(dev_queue);
4061
4062 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4063
4064
4065
4066 hns3_clear_rx_ring(ring);
4067 }
4068}
4069
4070int hns3_nic_reset_all_ring(struct hnae3_handle *h)
4071{
4072 struct net_device *ndev = h->kinfo.netdev;
4073 struct hns3_nic_priv *priv = netdev_priv(ndev);
4074 struct hns3_enet_ring *rx_ring;
4075 int i, j;
4076 int ret;
4077
4078 for (i = 0; i < h->kinfo.num_tqps; i++) {
4079 ret = h->ae_algo->ops->reset_queue(h, i);
4080 if (ret)
4081 return ret;
4082
4083 hns3_init_ring_hw(priv->ring_data[i].ring);
4084
4085
4086
4087
4088 hns3_clear_tx_ring(priv->ring_data[i].ring);
4089 priv->ring_data[i].ring->next_to_clean = 0;
4090 priv->ring_data[i].ring->next_to_use = 0;
4091
4092 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4093 hns3_init_ring_hw(rx_ring);
4094 ret = hns3_clear_rx_ring(rx_ring);
4095 if (ret)
4096 return ret;
4097
4098
4099
4100
4101 for (j = 0; j < rx_ring->desc_num; j++)
4102 hns3_reuse_buffer(rx_ring, j);
4103
4104 rx_ring->next_to_clean = 0;
4105 rx_ring->next_to_use = 0;
4106 }
4107
4108 hns3_init_tx_ring_tc(priv);
4109
4110 return 0;
4111}
4112
4113static void hns3_store_coal(struct hns3_nic_priv *priv)
4114{
4115
4116
4117
4118
4119 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
4120 sizeof(struct hns3_enet_coalesce));
4121 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
4122 sizeof(struct hns3_enet_coalesce));
4123}
4124
4125static void hns3_restore_coal(struct hns3_nic_priv *priv)
4126{
4127 u16 vector_num = priv->vector_num;
4128 int i;
4129
4130 for (i = 0; i < vector_num; i++) {
4131 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
4132 sizeof(struct hns3_enet_coalesce));
4133 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
4134 sizeof(struct hns3_enet_coalesce));
4135 }
4136}
4137
4138static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
4139{
4140 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4141 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4142 struct net_device *ndev = kinfo->netdev;
4143 struct hns3_nic_priv *priv = netdev_priv(ndev);
4144
4145 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
4146 return 0;
4147
4148
4149
4150
4151
4152 if (hns3_dev_ongoing_func_reset(ae_dev)) {
4153 hns3_remove_hw_addr(ndev);
4154 hns3_del_all_fd_rules(ndev, false);
4155 }
4156
4157 if (!netif_running(ndev))
4158 return 0;
4159
4160 return hns3_nic_net_stop(ndev);
4161}
4162
4163static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
4164{
4165 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4166 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
4167 int ret = 0;
4168
4169 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4170
4171 if (netif_running(kinfo->netdev)) {
4172 ret = hns3_nic_net_open(kinfo->netdev);
4173 if (ret) {
4174 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4175 netdev_err(kinfo->netdev,
4176 "hns net up fail, ret=%d!\n", ret);
4177 return ret;
4178 }
4179 }
4180
4181 return ret;
4182}
4183
4184static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
4185{
4186 struct net_device *netdev = handle->kinfo.netdev;
4187 struct hns3_nic_priv *priv = netdev_priv(netdev);
4188 int ret;
4189
4190
4191 netif_carrier_off(netdev);
4192
4193 ret = hns3_get_ring_config(priv);
4194 if (ret)
4195 return ret;
4196
4197 ret = hns3_nic_alloc_vector_data(priv);
4198 if (ret)
4199 goto err_put_ring;
4200
4201 hns3_restore_coal(priv);
4202
4203 ret = hns3_nic_init_vector_data(priv);
4204 if (ret)
4205 goto err_dealloc_vector;
4206
4207 ret = hns3_init_all_ring(priv);
4208 if (ret)
4209 goto err_uninit_vector;
4210
4211 ret = hns3_client_start(handle);
4212 if (ret) {
4213 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
4214 goto err_uninit_ring;
4215 }
4216
4217 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
4218
4219 return ret;
4220
4221err_uninit_ring:
4222 hns3_uninit_all_ring(priv);
4223err_uninit_vector:
4224 hns3_nic_uninit_vector_data(priv);
4225err_dealloc_vector:
4226 hns3_nic_dealloc_vector_data(priv);
4227err_put_ring:
4228 hns3_put_ring_config(priv);
4229
4230 return ret;
4231}
4232
4233static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
4234{
4235 struct net_device *netdev = handle->kinfo.netdev;
4236 bool vlan_filter_enable;
4237 int ret;
4238
4239 ret = hns3_init_mac_addr(netdev, false);
4240 if (ret)
4241 return ret;
4242
4243 ret = hns3_recover_hw_addr(netdev);
4244 if (ret)
4245 return ret;
4246
4247 ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
4248 if (ret)
4249 return ret;
4250
4251 vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
4252 hns3_enable_vlan_filter(netdev, vlan_filter_enable);
4253
4254
4255 if (!(handle->flags & HNAE3_SUPPORT_VF)) {
4256 ret = hns3_restore_vlan(netdev);
4257 if (ret)
4258 return ret;
4259 }
4260
4261 return hns3_restore_fd_rules(netdev);
4262}
4263
4264static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
4265{
4266 struct net_device *netdev = handle->kinfo.netdev;
4267 struct hns3_nic_priv *priv = netdev_priv(netdev);
4268 int ret;
4269
4270 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
4271 netdev_warn(netdev, "already uninitialized\n");
4272 return 0;
4273 }
4274
4275 hns3_force_clear_all_rx_ring(handle);
4276
4277 hns3_nic_uninit_vector_data(priv);
4278
4279 hns3_store_coal(priv);
4280
4281 ret = hns3_nic_dealloc_vector_data(priv);
4282 if (ret)
4283 netdev_err(netdev, "dealloc vector error\n");
4284
4285 ret = hns3_uninit_all_ring(priv);
4286 if (ret)
4287 netdev_err(netdev, "uninit ring error\n");
4288
4289 hns3_put_ring_config(priv);
4290
4291 return ret;
4292}
4293
4294static int hns3_reset_notify(struct hnae3_handle *handle,
4295 enum hnae3_reset_notify_type type)
4296{
4297 int ret = 0;
4298
4299 switch (type) {
4300 case HNAE3_UP_CLIENT:
4301 ret = hns3_reset_notify_up_enet(handle);
4302 break;
4303 case HNAE3_DOWN_CLIENT:
4304 ret = hns3_reset_notify_down_enet(handle);
4305 break;
4306 case HNAE3_INIT_CLIENT:
4307 ret = hns3_reset_notify_init_enet(handle);
4308 break;
4309 case HNAE3_UNINIT_CLIENT:
4310 ret = hns3_reset_notify_uninit_enet(handle);
4311 break;
4312 case HNAE3_RESTORE_CLIENT:
4313 ret = hns3_reset_notify_restore_enet(handle);
4314 break;
4315 default:
4316 break;
4317 }
4318
4319 return ret;
4320}
4321
4322int hns3_set_channels(struct net_device *netdev,
4323 struct ethtool_channels *ch)
4324{
4325 struct hnae3_handle *h = hns3_get_handle(netdev);
4326 struct hnae3_knic_private_info *kinfo = &h->kinfo;
4327 bool rxfh_configured = netif_is_rxfh_configured(netdev);
4328 u32 new_tqp_num = ch->combined_count;
4329 u16 org_tqp_num;
4330 int ret;
4331
4332 if (ch->rx_count || ch->tx_count)
4333 return -EINVAL;
4334
4335 if (new_tqp_num > hns3_get_max_available_channels(h) ||
4336 new_tqp_num < 1) {
4337 dev_err(&netdev->dev,
4338 "Change tqps fail, the tqp range is from 1 to %d",
4339 hns3_get_max_available_channels(h));
4340 return -EINVAL;
4341 }
4342
4343 if (kinfo->rss_size == new_tqp_num)
4344 return 0;
4345
4346 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
4347 if (ret)
4348 return ret;
4349
4350 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
4351 if (ret)
4352 return ret;
4353
4354 org_tqp_num = h->kinfo.num_tqps;
4355 ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured);
4356 if (ret) {
4357 ret = h->ae_algo->ops->set_channels(h, org_tqp_num,
4358 rxfh_configured);
4359 if (ret) {
4360
4361 dev_err(&netdev->dev,
4362 "Revert to old tqp num fail, ret=%d", ret);
4363 return ret;
4364 }
4365 dev_info(&netdev->dev,
4366 "Change tqp num fail, Revert to old tqp num");
4367 }
4368 ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
4369 if (ret)
4370 return ret;
4371
4372 return hns3_reset_notify(h, HNAE3_UP_CLIENT);
4373}
4374
4375static const struct hnae3_client_ops client_ops = {
4376 .init_instance = hns3_client_init,
4377 .uninit_instance = hns3_client_uninit,
4378 .link_status_change = hns3_link_status_change,
4379 .setup_tc = hns3_client_setup_tc,
4380 .reset_notify = hns3_reset_notify,
4381};
4382
4383
4384
4385
4386
4387static int __init hns3_init_module(void)
4388{
4389 int ret;
4390
4391 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
4392 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
4393
4394 client.type = HNAE3_CLIENT_KNIC;
4395 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
4396 hns3_driver_name);
4397
4398 client.ops = &client_ops;
4399
4400 INIT_LIST_HEAD(&client.node);
4401
4402 hns3_dbg_register_debugfs(hns3_driver_name);
4403
4404 ret = hnae3_register_client(&client);
4405 if (ret)
4406 goto err_reg_client;
4407
4408 ret = pci_register_driver(&hns3_driver);
4409 if (ret)
4410 goto err_reg_driver;
4411
4412 return ret;
4413
4414err_reg_driver:
4415 hnae3_unregister_client(&client);
4416err_reg_client:
4417 hns3_dbg_unregister_debugfs();
4418 return ret;
4419}
4420module_init(hns3_init_module);
4421
4422
4423
4424
4425
4426static void __exit hns3_exit_module(void)
4427{
4428 pci_unregister_driver(&hns3_driver);
4429 hnae3_unregister_client(&client);
4430 hns3_dbg_unregister_debugfs();
4431}
4432module_exit(hns3_exit_module);
4433
4434MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
4435MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4436MODULE_LICENSE("GPL");
4437MODULE_ALIAS("pci:hns-nic");
4438MODULE_VERSION(HNS3_MOD_VERSION);
4439