1
2
3
4
5
6#include <linux/clk.h>
7#include <linux/cpumask.h>
8#include <linux/etherdevice.h>
9#include <linux/if_vlan.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/ip.h>
13#include <linux/ipv6.h>
14#include <linux/module.h>
15#include <linux/phy.h>
16#include <linux/platform_device.h>
17#include <linux/skbuff.h>
18
19#include "hnae.h"
20#include "hns_enet.h"
21#include "hns_dsaf_mac.h"
22
23#define NIC_MAX_Q_PER_VF 16
24#define HNS_NIC_TX_TIMEOUT (5 * HZ)
25
26#define SERVICE_TIMER_HZ (1 * HZ)
27
28#define RCB_IRQ_NOT_INITED 0
29#define RCB_IRQ_INITED 1
30#define HNS_BUFFER_SIZE_2048 2048
31
32#define BD_MAX_SEND_SIZE 8191
33#define SKB_TMP_LEN(SKB) \
34 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
35
36static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
37 int send_sz, dma_addr_t dma, int frag_end,
38 int buf_num, enum hns_desc_type type, int mtu)
39{
40 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
41 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
42 struct iphdr *iphdr;
43 struct ipv6hdr *ipv6hdr;
44 struct sk_buff *skb;
45 __be16 protocol;
46 u8 bn_pid = 0;
47 u8 rrcfv = 0;
48 u8 ip_offset = 0;
49 u8 tvsvsn = 0;
50 u16 mss = 0;
51 u8 l4_len = 0;
52 u16 paylen = 0;
53
54 desc_cb->priv = priv;
55 desc_cb->length = size;
56 desc_cb->dma = dma;
57 desc_cb->type = type;
58
59 desc->addr = cpu_to_le64(dma);
60 desc->tx.send_size = cpu_to_le16((u16)send_sz);
61
62
63 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
64 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
65
66
67 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
68 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
69
70 if (type == DESC_TYPE_SKB) {
71 skb = (struct sk_buff *)priv;
72
73 if (skb->ip_summed == CHECKSUM_PARTIAL) {
74 skb_reset_mac_len(skb);
75 protocol = skb->protocol;
76 ip_offset = ETH_HLEN;
77
78 if (protocol == htons(ETH_P_8021Q)) {
79 ip_offset += VLAN_HLEN;
80 protocol = vlan_get_protocol(skb);
81 skb->protocol = protocol;
82 }
83
84 if (skb->protocol == htons(ETH_P_IP)) {
85 iphdr = ip_hdr(skb);
86 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
87 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
88
89
90 if (iphdr->protocol == IPPROTO_TCP &&
91 skb_is_gso(skb)) {
92 hnae_set_bit(tvsvsn,
93 HNSV2_TXD_TSE_B, 1);
94 l4_len = tcp_hdrlen(skb);
95 mss = skb_shinfo(skb)->gso_size;
96 paylen = skb->len - SKB_TMP_LEN(skb);
97 }
98 } else if (skb->protocol == htons(ETH_P_IPV6)) {
99 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
100 ipv6hdr = ipv6_hdr(skb);
101 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
102
103
104 if (ipv6hdr->nexthdr == IPPROTO_TCP &&
105 skb_is_gso(skb) && skb_is_gso_v6(skb)) {
106 hnae_set_bit(tvsvsn,
107 HNSV2_TXD_TSE_B, 1);
108 l4_len = tcp_hdrlen(skb);
109 mss = skb_shinfo(skb)->gso_size;
110 paylen = skb->len - SKB_TMP_LEN(skb);
111 }
112 }
113 desc->tx.ip_offset = ip_offset;
114 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
115 desc->tx.mss = cpu_to_le16(mss);
116 desc->tx.l4_len = l4_len;
117 desc->tx.paylen = cpu_to_le16(paylen);
118 }
119 }
120
121 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
122
123 desc->tx.bn_pid = bn_pid;
124 desc->tx.ra_ri_cs_fe_vld = rrcfv;
125
126 ring_ptr_move_fw(ring, next_to_use);
127}
128
129static void fill_v2_desc(struct hnae_ring *ring, void *priv,
130 int size, dma_addr_t dma, int frag_end,
131 int buf_num, enum hns_desc_type type, int mtu)
132{
133 fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
134 buf_num, type, mtu);
135}
136
137static const struct acpi_device_id hns_enet_acpi_match[] = {
138 { "HISI00C1", 0 },
139 { "HISI00C2", 0 },
140 { },
141};
142MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
143
144static void fill_desc(struct hnae_ring *ring, void *priv,
145 int size, dma_addr_t dma, int frag_end,
146 int buf_num, enum hns_desc_type type, int mtu)
147{
148 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
149 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
150 struct sk_buff *skb;
151 __be16 protocol;
152 u32 ip_offset;
153 u32 asid_bufnum_pid = 0;
154 u32 flag_ipoffset = 0;
155
156 desc_cb->priv = priv;
157 desc_cb->length = size;
158 desc_cb->dma = dma;
159 desc_cb->type = type;
160
161 desc->addr = cpu_to_le64(dma);
162 desc->tx.send_size = cpu_to_le16((u16)size);
163
164
165 flag_ipoffset |= 1 << HNS_TXD_VLD_B;
166
167 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
168
169 if (type == DESC_TYPE_SKB) {
170 skb = (struct sk_buff *)priv;
171
172 if (skb->ip_summed == CHECKSUM_PARTIAL) {
173 protocol = skb->protocol;
174 ip_offset = ETH_HLEN;
175
176
177 if (protocol == htons(ETH_P_8021Q)) {
178 ip_offset += VLAN_HLEN;
179 protocol = vlan_get_protocol(skb);
180 skb->protocol = protocol;
181 }
182
183 if (skb->protocol == htons(ETH_P_IP)) {
184 flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
185
186 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
187
188 } else if (skb->protocol == htons(ETH_P_IPV6)) {
189
190 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
191 }
192
193 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
194 }
195 }
196
197 flag_ipoffset |= frag_end << HNS_TXD_FE_B;
198
199 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
200 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
201
202 ring_ptr_move_fw(ring, next_to_use);
203}
204
205static void unfill_desc(struct hnae_ring *ring)
206{
207 ring_ptr_move_bw(ring, next_to_use);
208}
209
210static int hns_nic_maybe_stop_tx(
211 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
212{
213 struct sk_buff *skb = *out_skb;
214 struct sk_buff *new_skb = NULL;
215 int buf_num;
216
217
218 buf_num = skb_shinfo(skb)->nr_frags + 1;
219
220 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
221 if (ring_space(ring) < 1)
222 return -EBUSY;
223
224 new_skb = skb_copy(skb, GFP_ATOMIC);
225 if (!new_skb)
226 return -ENOMEM;
227
228 dev_kfree_skb_any(skb);
229 *out_skb = new_skb;
230 buf_num = 1;
231 } else if (buf_num > ring_space(ring)) {
232 return -EBUSY;
233 }
234
235 *bnum = buf_num;
236 return 0;
237}
238
239static int hns_nic_maybe_stop_tso(
240 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
241{
242 int i;
243 int size;
244 int buf_num;
245 int frag_num;
246 struct sk_buff *skb = *out_skb;
247 struct sk_buff *new_skb = NULL;
248 struct skb_frag_struct *frag;
249
250 size = skb_headlen(skb);
251 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
252
253 frag_num = skb_shinfo(skb)->nr_frags;
254 for (i = 0; i < frag_num; i++) {
255 frag = &skb_shinfo(skb)->frags[i];
256 size = skb_frag_size(frag);
257 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
258 }
259
260 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
261 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
262 if (ring_space(ring) < buf_num)
263 return -EBUSY;
264
265 new_skb = skb_copy(skb, GFP_ATOMIC);
266 if (!new_skb)
267 return -ENOMEM;
268 dev_kfree_skb_any(skb);
269 *out_skb = new_skb;
270
271 } else if (ring_space(ring) < buf_num) {
272 return -EBUSY;
273 }
274
275 *bnum = buf_num;
276 return 0;
277}
278
279static void fill_tso_desc(struct hnae_ring *ring, void *priv,
280 int size, dma_addr_t dma, int frag_end,
281 int buf_num, enum hns_desc_type type, int mtu)
282{
283 int frag_buf_num;
284 int sizeoflast;
285 int k;
286
287 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
288 sizeoflast = size % BD_MAX_SEND_SIZE;
289 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
290
291
292 for (k = 0; k < frag_buf_num; k++)
293 fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
294 (k == frag_buf_num - 1) ?
295 sizeoflast : BD_MAX_SEND_SIZE,
296 dma + BD_MAX_SEND_SIZE * k,
297 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
298 buf_num,
299 (type == DESC_TYPE_SKB && !k) ?
300 DESC_TYPE_SKB : DESC_TYPE_PAGE,
301 mtu);
302}
303
304netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
305 struct sk_buff *skb,
306 struct hns_nic_ring_data *ring_data)
307{
308 struct hns_nic_priv *priv = netdev_priv(ndev);
309 struct hnae_ring *ring = ring_data->ring;
310 struct device *dev = ring_to_dev(ring);
311 struct netdev_queue *dev_queue;
312 struct skb_frag_struct *frag;
313 int buf_num;
314 int seg_num;
315 dma_addr_t dma;
316 int size, next_to_use;
317 int i;
318
319 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
320 case -EBUSY:
321 ring->stats.tx_busy++;
322 goto out_net_tx_busy;
323 case -ENOMEM:
324 ring->stats.sw_err_cnt++;
325 netdev_err(ndev, "no memory to xmit!\n");
326 goto out_err_tx_ok;
327 default:
328 break;
329 }
330
331
332 seg_num = skb_shinfo(skb)->nr_frags + 1;
333 next_to_use = ring->next_to_use;
334
335
336 size = skb_headlen(skb);
337 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
338 if (dma_mapping_error(dev, dma)) {
339 netdev_err(ndev, "TX head DMA map failed\n");
340 ring->stats.sw_err_cnt++;
341 goto out_err_tx_ok;
342 }
343 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
344 buf_num, DESC_TYPE_SKB, ndev->mtu);
345
346
347 for (i = 1; i < seg_num; i++) {
348 frag = &skb_shinfo(skb)->frags[i - 1];
349 size = skb_frag_size(frag);
350 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
351 if (dma_mapping_error(dev, dma)) {
352 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
353 ring->stats.sw_err_cnt++;
354 goto out_map_frag_fail;
355 }
356 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
357 seg_num - 1 == i ? 1 : 0, buf_num,
358 DESC_TYPE_PAGE, ndev->mtu);
359 }
360
361
362 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
363 netdev_tx_sent_queue(dev_queue, skb->len);
364
365 netif_trans_update(ndev);
366 ndev->stats.tx_bytes += skb->len;
367 ndev->stats.tx_packets++;
368
369 wmb();
370 assert(skb->queue_mapping < priv->ae_handle->q_num);
371 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
372
373 return NETDEV_TX_OK;
374
375out_map_frag_fail:
376
377 while (ring->next_to_use != next_to_use) {
378 unfill_desc(ring);
379 if (ring->next_to_use != next_to_use)
380 dma_unmap_page(dev,
381 ring->desc_cb[ring->next_to_use].dma,
382 ring->desc_cb[ring->next_to_use].length,
383 DMA_TO_DEVICE);
384 else
385 dma_unmap_single(dev,
386 ring->desc_cb[next_to_use].dma,
387 ring->desc_cb[next_to_use].length,
388 DMA_TO_DEVICE);
389 }
390
391out_err_tx_ok:
392
393 dev_kfree_skb_any(skb);
394 return NETDEV_TX_OK;
395
396out_net_tx_busy:
397
398 netif_stop_subqueue(ndev, skb->queue_mapping);
399
400
401
402
403
404 smp_mb();
405 return NETDEV_TX_BUSY;
406}
407
408static void hns_nic_reuse_page(struct sk_buff *skb, int i,
409 struct hnae_ring *ring, int pull_len,
410 struct hnae_desc_cb *desc_cb)
411{
412 struct hnae_desc *desc;
413 u32 truesize;
414 int size;
415 int last_offset;
416 bool twobufs;
417
418 twobufs = ((PAGE_SIZE < 8192) &&
419 hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
420
421 desc = &ring->desc[ring->next_to_clean];
422 size = le16_to_cpu(desc->rx.size);
423
424 if (twobufs) {
425 truesize = hnae_buf_size(ring);
426 } else {
427 truesize = ALIGN(size, L1_CACHE_BYTES);
428 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
429 }
430
431 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
432 size - pull_len, truesize);
433
434
435 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
436 return;
437
438 if (twobufs) {
439
440 if (likely(page_count(desc_cb->priv) == 1)) {
441
442 desc_cb->page_offset ^= truesize;
443
444 desc_cb->reuse_flag = 1;
445
446 get_page(desc_cb->priv);
447 }
448 return;
449 }
450
451
452 desc_cb->page_offset += truesize;
453
454 if (desc_cb->page_offset <= last_offset) {
455 desc_cb->reuse_flag = 1;
456
457 get_page(desc_cb->priv);
458 }
459}
460
461static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
462{
463 *out_bnum = hnae_get_field(bnum_flag,
464 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
465}
466
467static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
468{
469 *out_bnum = hnae_get_field(bnum_flag,
470 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
471}
472
473static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
474 struct sk_buff *skb, u32 flag)
475{
476 struct net_device *netdev = ring_data->napi.dev;
477 u32 l3id;
478 u32 l4id;
479
480
481 if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
482 return;
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509 l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
510 l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
511
512
513 if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
514 return;
515
516
517 if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
518 return;
519
520
521 if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
522 return;
523
524
525 if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
526 (l4id != HNS_RX_FLAG_L4ID_UDP) &&
527 (l4id != HNS_RX_FLAG_L4ID_SCTP))
528 return;
529
530
531 if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
532 return;
533
534
535 skb->ip_summed = CHECKSUM_UNNECESSARY;
536}
537
538static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
539 struct sk_buff **out_skb, int *out_bnum)
540{
541 struct hnae_ring *ring = ring_data->ring;
542 struct net_device *ndev = ring_data->napi.dev;
543 struct hns_nic_priv *priv = netdev_priv(ndev);
544 struct sk_buff *skb;
545 struct hnae_desc *desc;
546 struct hnae_desc_cb *desc_cb;
547 unsigned char *va;
548 int bnum, length, i;
549 int pull_len;
550 u32 bnum_flag;
551
552 desc = &ring->desc[ring->next_to_clean];
553 desc_cb = &ring->desc_cb[ring->next_to_clean];
554
555 prefetch(desc);
556
557 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
558
559
560 prefetch(va);
561#if L1_CACHE_BYTES < 128
562 prefetch(va + L1_CACHE_BYTES);
563#endif
564
565 skb = *out_skb = napi_alloc_skb(&ring_data->napi,
566 HNS_RX_HEAD_SIZE);
567 if (unlikely(!skb)) {
568 netdev_err(ndev, "alloc rx skb fail\n");
569 ring->stats.sw_err_cnt++;
570 return -ENOMEM;
571 }
572
573 prefetchw(skb->data);
574 length = le16_to_cpu(desc->rx.pkt_len);
575 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
576 priv->ops.get_rxd_bnum(bnum_flag, &bnum);
577 *out_bnum = bnum;
578
579 if (length <= HNS_RX_HEAD_SIZE) {
580 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
581
582
583 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
584 desc_cb->reuse_flag = 1;
585 else
586 put_page(desc_cb->priv);
587
588 ring_ptr_move_fw(ring, next_to_clean);
589
590 if (unlikely(bnum != 1)) {
591 *out_bnum = 1;
592 goto out_bnum_err;
593 }
594 } else {
595 ring->stats.seg_pkt_cnt++;
596
597 pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE);
598 memcpy(__skb_put(skb, pull_len), va,
599 ALIGN(pull_len, sizeof(long)));
600
601 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
602 ring_ptr_move_fw(ring, next_to_clean);
603
604 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) {
605 *out_bnum = 1;
606 goto out_bnum_err;
607 }
608 for (i = 1; i < bnum; i++) {
609 desc = &ring->desc[ring->next_to_clean];
610 desc_cb = &ring->desc_cb[ring->next_to_clean];
611
612 hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
613 ring_ptr_move_fw(ring, next_to_clean);
614 }
615 }
616
617
618 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
619out_bnum_err:
620 *out_bnum = *out_bnum ? *out_bnum : 1;
621 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
622 bnum, ring->max_desc_num_per_pkt,
623 length, (int)MAX_SKB_FRAGS,
624 ((u64 *)desc)[0], ((u64 *)desc)[1]);
625 ring->stats.err_bd_num++;
626 dev_kfree_skb_any(skb);
627 return -EDOM;
628 }
629
630 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
631
632 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
633 netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
634 ((u64 *)desc)[0], ((u64 *)desc)[1]);
635 ring->stats.non_vld_descs++;
636 dev_kfree_skb_any(skb);
637 return -EINVAL;
638 }
639
640 if (unlikely((!desc->rx.pkt_len) ||
641 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
642 ring->stats.err_pkt_len++;
643 dev_kfree_skb_any(skb);
644 return -EFAULT;
645 }
646
647 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
648 ring->stats.l2_err++;
649 dev_kfree_skb_any(skb);
650 return -EFAULT;
651 }
652
653 ring->stats.rx_pkts++;
654 ring->stats.rx_bytes += skb->len;
655
656
657
658
659 hns_nic_rx_checksum(ring_data, skb, bnum_flag);
660
661 return 0;
662}
663
664static void
665hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
666{
667 int i, ret;
668 struct hnae_desc_cb res_cbs;
669 struct hnae_desc_cb *desc_cb;
670 struct hnae_ring *ring = ring_data->ring;
671 struct net_device *ndev = ring_data->napi.dev;
672
673 for (i = 0; i < cleand_count; i++) {
674 desc_cb = &ring->desc_cb[ring->next_to_use];
675 if (desc_cb->reuse_flag) {
676 ring->stats.reuse_pg_cnt++;
677 hnae_reuse_buffer(ring, ring->next_to_use);
678 } else {
679 ret = hnae_reserve_buffer_map(ring, &res_cbs);
680 if (ret) {
681 ring->stats.sw_err_cnt++;
682 netdev_err(ndev, "hnae reserve buffer map failed.\n");
683 break;
684 }
685 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
686 }
687
688 ring_ptr_move_fw(ring, next_to_use);
689 }
690
691 wmb();
692 writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
693}
694
695
696
697static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
698 struct sk_buff *skb)
699{
700 struct net_device *ndev = ring_data->napi.dev;
701
702 skb->protocol = eth_type_trans(skb, ndev);
703 (void)napi_gro_receive(&ring_data->napi, skb);
704}
705
706static int hns_desc_unused(struct hnae_ring *ring)
707{
708 int ntc = ring->next_to_clean;
709 int ntu = ring->next_to_use;
710
711 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
712}
713
714#define HNS_LOWEST_LATENCY_RATE 27
715#define HNS_LOW_LATENCY_RATE 80
716
717#define HNS_COAL_BDNUM 3
718
719static u32 hns_coal_rx_bdnum(struct hnae_ring *ring)
720{
721 bool coal_enable = ring->q->handle->coal_adapt_en;
722
723 if (coal_enable &&
724 ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE)
725 return HNS_COAL_BDNUM;
726 else
727 return 0;
728}
729
730static void hns_update_rx_rate(struct hnae_ring *ring)
731{
732 bool coal_enable = ring->q->handle->coal_adapt_en;
733 u32 time_passed_ms;
734 u64 total_bytes;
735
736 if (!coal_enable ||
737 time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4)))
738 return;
739
740
741 if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) {
742 ring->coal_last_rx_bytes = ring->stats.rx_bytes;
743 ring->coal_last_jiffies = jiffies;
744 return;
745 }
746
747 total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes;
748 time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies);
749 do_div(total_bytes, time_passed_ms);
750 ring->coal_rx_rate = total_bytes >> 10;
751
752 ring->coal_last_rx_bytes = ring->stats.rx_bytes;
753 ring->coal_last_jiffies = jiffies;
754}
755
756
757
758
759static u32 smooth_alg(u32 new_param, u32 old_param)
760{
761 u32 gap = (new_param > old_param) ? new_param - old_param
762 : old_param - new_param;
763
764 if (gap > 8)
765 gap >>= 3;
766
767 if (new_param > old_param)
768 return old_param + gap;
769 else
770 return old_param - gap;
771}
772
773
774
775
776
777static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data)
778{
779 struct hnae_ring *ring = ring_data->ring;
780 struct hnae_handle *handle = ring->q->handle;
781 u32 new_coal_param, old_coal_param = ring->coal_param;
782
783 if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE)
784 new_coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM;
785 else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE)
786 new_coal_param = HNAE_LOW_LATENCY_COAL_PARAM;
787 else
788 new_coal_param = HNAE_BULK_LATENCY_COAL_PARAM;
789
790 if (new_coal_param == old_coal_param &&
791 new_coal_param == handle->coal_param)
792 return;
793
794 new_coal_param = smooth_alg(new_coal_param, old_coal_param);
795 ring->coal_param = new_coal_param;
796
797
798
799
800
801
802
803
804
805 if (new_coal_param == handle->coal_param) {
806 handle->coal_last_jiffies = jiffies;
807 handle->coal_ring_idx = ring_data->queue_index;
808 } else if (new_coal_param > handle->coal_param ||
809 handle->coal_ring_idx == ring_data->queue_index ||
810 time_after(jiffies, handle->coal_last_jiffies + (HZ >> 4))) {
811 handle->dev->ops->set_coalesce_usecs(handle,
812 new_coal_param);
813 handle->dev->ops->set_coalesce_frames(handle,
814 1, new_coal_param);
815 handle->coal_param = new_coal_param;
816 handle->coal_ring_idx = ring_data->queue_index;
817 handle->coal_last_jiffies = jiffies;
818 }
819}
820
821static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
822 int budget, void *v)
823{
824 struct hnae_ring *ring = ring_data->ring;
825 struct sk_buff *skb;
826 int num, bnum;
827#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
828 int recv_pkts, recv_bds, clean_count, err;
829 int unused_count = hns_desc_unused(ring);
830
831 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
832 rmb();
833
834 recv_pkts = 0, recv_bds = 0, clean_count = 0;
835 num -= unused_count;
836
837 while (recv_pkts < budget && recv_bds < num) {
838
839 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
840 hns_nic_alloc_rx_buffers(ring_data,
841 clean_count + unused_count);
842 clean_count = 0;
843 unused_count = hns_desc_unused(ring);
844 }
845
846
847 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
848 if (unlikely(!skb))
849 goto out;
850
851 recv_bds += bnum;
852 clean_count += bnum;
853 if (unlikely(err)) {
854 recv_pkts++;
855 continue;
856 }
857
858
859 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
860 ring_data, skb);
861 recv_pkts++;
862 }
863
864out:
865
866 if (clean_count + unused_count > 0)
867 hns_nic_alloc_rx_buffers(ring_data,
868 clean_count + unused_count);
869
870 return recv_pkts;
871}
872
873static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
874{
875 struct hnae_ring *ring = ring_data->ring;
876 int num = 0;
877 bool rx_stopped;
878
879 hns_update_rx_rate(ring);
880
881
882 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
883 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
884
885 if (num <= hns_coal_rx_bdnum(ring)) {
886 if (ring->q->handle->coal_adapt_en)
887 hns_nic_adpt_coalesce(ring_data);
888
889 rx_stopped = true;
890 } else {
891 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
892 ring_data->ring, 1);
893
894 rx_stopped = false;
895 }
896
897 return rx_stopped;
898}
899
900static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
901{
902 struct hnae_ring *ring = ring_data->ring;
903 int num;
904
905 hns_update_rx_rate(ring);
906 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
907
908 if (num <= hns_coal_rx_bdnum(ring)) {
909 if (ring->q->handle->coal_adapt_en)
910 hns_nic_adpt_coalesce(ring_data);
911
912 return true;
913 }
914
915 return false;
916}
917
918static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
919 int *bytes, int *pkts)
920{
921 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
922
923 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
924 (*bytes) += desc_cb->length;
925
926 hnae_free_buffer_detach(ring, ring->next_to_clean);
927
928 ring_ptr_move_fw(ring, next_to_clean);
929}
930
931static int is_valid_clean_head(struct hnae_ring *ring, int h)
932{
933 int u = ring->next_to_use;
934 int c = ring->next_to_clean;
935
936 if (unlikely(h > ring->desc_num))
937 return 0;
938
939 assert(u > 0 && u < ring->desc_num);
940 assert(c > 0 && c < ring->desc_num);
941 assert(u != c && h != c);
942
943 return u > c ? (h > c && h <= u) : (h > c || h <= u);
944}
945
946
947#ifdef CONFIG_NET_POLL_CONTROLLER
948#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
949#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
950#else
951#define NETIF_TX_LOCK(ring)
952#define NETIF_TX_UNLOCK(ring)
953#endif
954
955
956
957
958static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
959 int budget, void *v)
960{
961 struct hnae_ring *ring = ring_data->ring;
962 struct net_device *ndev = ring_data->napi.dev;
963 struct netdev_queue *dev_queue;
964 struct hns_nic_priv *priv = netdev_priv(ndev);
965 int head;
966 int bytes, pkts;
967
968 NETIF_TX_LOCK(ring);
969
970 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
971 rmb();
972
973 if (is_ring_empty(ring) || head == ring->next_to_clean) {
974 NETIF_TX_UNLOCK(ring);
975 return 0;
976 }
977
978 if (!is_valid_clean_head(ring, head)) {
979 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
980 ring->next_to_use, ring->next_to_clean);
981 ring->stats.io_err_cnt++;
982 NETIF_TX_UNLOCK(ring);
983 return -EIO;
984 }
985
986 bytes = 0;
987 pkts = 0;
988 while (head != ring->next_to_clean) {
989 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
990
991 prefetch(&ring->desc_cb[ring->next_to_clean]);
992 }
993
994 ring->stats.tx_pkts += pkts;
995 ring->stats.tx_bytes += bytes;
996
997 NETIF_TX_UNLOCK(ring);
998
999 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1000 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1001
1002 if (unlikely(priv->link && !netif_carrier_ok(ndev)))
1003 netif_carrier_on(ndev);
1004
1005 if (unlikely(pkts && netif_carrier_ok(ndev) &&
1006 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
1007
1008
1009
1010 smp_mb();
1011 if (netif_tx_queue_stopped(dev_queue) &&
1012 !test_bit(NIC_STATE_DOWN, &priv->state)) {
1013 netif_tx_wake_queue(dev_queue);
1014 ring->stats.restart_queue++;
1015 }
1016 }
1017 return 0;
1018}
1019
1020static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
1021{
1022 struct hnae_ring *ring = ring_data->ring;
1023 int head;
1024
1025 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1026
1027 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1028
1029 if (head != ring->next_to_clean) {
1030 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1031 ring_data->ring, 1);
1032
1033 return false;
1034 } else {
1035 return true;
1036 }
1037}
1038
1039static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
1040{
1041 struct hnae_ring *ring = ring_data->ring;
1042 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1043
1044 if (head == ring->next_to_clean)
1045 return true;
1046 else
1047 return false;
1048}
1049
1050static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
1051{
1052 struct hnae_ring *ring = ring_data->ring;
1053 struct net_device *ndev = ring_data->napi.dev;
1054 struct netdev_queue *dev_queue;
1055 int head;
1056 int bytes, pkts;
1057
1058 NETIF_TX_LOCK(ring);
1059
1060 head = ring->next_to_use;
1061 bytes = 0;
1062 pkts = 0;
1063 while (head != ring->next_to_clean)
1064 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
1065
1066 NETIF_TX_UNLOCK(ring);
1067
1068 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1069 netdev_tx_reset_queue(dev_queue);
1070}
1071
1072static int hns_nic_common_poll(struct napi_struct *napi, int budget)
1073{
1074 int clean_complete = 0;
1075 struct hns_nic_ring_data *ring_data =
1076 container_of(napi, struct hns_nic_ring_data, napi);
1077 struct hnae_ring *ring = ring_data->ring;
1078
1079try_again:
1080 clean_complete += ring_data->poll_one(
1081 ring_data, budget - clean_complete,
1082 ring_data->ex_process);
1083
1084 if (clean_complete < budget) {
1085 if (ring_data->fini_process(ring_data)) {
1086 napi_complete(napi);
1087 ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1088 } else {
1089 goto try_again;
1090 }
1091 }
1092
1093 return clean_complete;
1094}
1095
1096static irqreturn_t hns_irq_handle(int irq, void *dev)
1097{
1098 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
1099
1100 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1101 ring_data->ring, 1);
1102 napi_schedule(&ring_data->napi);
1103
1104 return IRQ_HANDLED;
1105}
1106
1107
1108
1109
1110
1111static void hns_nic_adjust_link(struct net_device *ndev)
1112{
1113 struct hns_nic_priv *priv = netdev_priv(ndev);
1114 struct hnae_handle *h = priv->ae_handle;
1115 int state = 1;
1116
1117
1118 if (ndev->phydev) {
1119
1120 if (ndev->phydev->link == 0)
1121 return;
1122
1123 if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
1124 ndev->phydev->duplex)) {
1125
1126
1127
1128
1129 netif_carrier_off(ndev);
1130 msleep(200);
1131 h->dev->ops->adjust_link(h, ndev->phydev->speed,
1132 ndev->phydev->duplex);
1133 netif_carrier_on(ndev);
1134 }
1135 }
1136
1137 state = state && h->dev->ops->get_status(h);
1138
1139 if (state != priv->link) {
1140 if (state) {
1141 netif_carrier_on(ndev);
1142 netif_tx_wake_all_queues(ndev);
1143 netdev_info(ndev, "link up\n");
1144 } else {
1145 netif_carrier_off(ndev);
1146 netdev_info(ndev, "link down\n");
1147 }
1148 priv->link = state;
1149 }
1150}
1151
1152
1153
1154
1155
1156
1157
1158int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1159{
1160 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
1161 struct phy_device *phy_dev = h->phy_dev;
1162 int ret;
1163
1164 if (!h->phy_dev)
1165 return 0;
1166
1167 ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
1168 linkmode_and(phy_dev->supported, phy_dev->supported, supported);
1169 linkmode_copy(phy_dev->advertising, phy_dev->supported);
1170
1171 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1172 phy_dev->autoneg = false;
1173
1174 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1175 phy_dev->dev_flags = 0;
1176
1177 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
1178 h->phy_if);
1179 } else {
1180 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
1181 }
1182 if (unlikely(ret))
1183 return -ENODEV;
1184
1185 return 0;
1186}
1187
1188static int hns_nic_ring_open(struct net_device *netdev, int idx)
1189{
1190 struct hns_nic_priv *priv = netdev_priv(netdev);
1191 struct hnae_handle *h = priv->ae_handle;
1192
1193 napi_enable(&priv->ring_data[idx].napi);
1194
1195 enable_irq(priv->ring_data[idx].ring->irq);
1196 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
1197
1198 return 0;
1199}
1200
1201static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
1202{
1203 struct hns_nic_priv *priv = netdev_priv(ndev);
1204 struct hnae_handle *h = priv->ae_handle;
1205 struct sockaddr *mac_addr = p;
1206 int ret;
1207
1208 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1209 return -EADDRNOTAVAIL;
1210
1211 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
1212 if (ret) {
1213 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
1214 return ret;
1215 }
1216
1217 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
1218
1219 return 0;
1220}
1221
1222static void hns_nic_update_stats(struct net_device *netdev)
1223{
1224 struct hns_nic_priv *priv = netdev_priv(netdev);
1225 struct hnae_handle *h = priv->ae_handle;
1226
1227 h->dev->ops->update_stats(h, &netdev->stats);
1228}
1229
1230
1231static void hns_init_mac_addr(struct net_device *ndev)
1232{
1233 struct hns_nic_priv *priv = netdev_priv(ndev);
1234
1235 if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
1236 eth_hw_addr_random(ndev);
1237 dev_warn(priv->dev, "No valid mac, use random mac %pM",
1238 ndev->dev_addr);
1239 }
1240}
1241
1242static void hns_nic_ring_close(struct net_device *netdev, int idx)
1243{
1244 struct hns_nic_priv *priv = netdev_priv(netdev);
1245 struct hnae_handle *h = priv->ae_handle;
1246
1247 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
1248 disable_irq(priv->ring_data[idx].ring->irq);
1249
1250 napi_disable(&priv->ring_data[idx].napi);
1251}
1252
1253static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
1254 struct hnae_ring *ring, cpumask_t *mask)
1255{
1256 int cpu;
1257
1258
1259
1260
1261
1262 if (q_num == num_possible_cpus()) {
1263 if (is_tx_ring(ring))
1264 cpu = ring_idx;
1265 else
1266 cpu = ring_idx - q_num;
1267 } else {
1268 if (is_tx_ring(ring))
1269 cpu = ring_idx * 2;
1270 else
1271 cpu = (ring_idx - q_num) * 2 + 1;
1272 }
1273
1274 cpumask_clear(mask);
1275 cpumask_set_cpu(cpu, mask);
1276
1277 return cpu;
1278}
1279
1280static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
1281{
1282 int i;
1283
1284 for (i = 0; i < q_num * 2; i++) {
1285 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
1286 irq_set_affinity_hint(priv->ring_data[i].ring->irq,
1287 NULL);
1288 free_irq(priv->ring_data[i].ring->irq,
1289 &priv->ring_data[i]);
1290 priv->ring_data[i].ring->irq_init_flag =
1291 RCB_IRQ_NOT_INITED;
1292 }
1293 }
1294}
1295
1296static int hns_nic_init_irq(struct hns_nic_priv *priv)
1297{
1298 struct hnae_handle *h = priv->ae_handle;
1299 struct hns_nic_ring_data *rd;
1300 int i;
1301 int ret;
1302 int cpu;
1303
1304 for (i = 0; i < h->q_num * 2; i++) {
1305 rd = &priv->ring_data[i];
1306
1307 if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
1308 break;
1309
1310 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
1311 "%s-%s%d", priv->netdev->name,
1312 (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
1313
1314 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
1315
1316 ret = request_irq(rd->ring->irq,
1317 hns_irq_handle, 0, rd->ring->ring_name, rd);
1318 if (ret) {
1319 netdev_err(priv->netdev, "request irq(%d) fail\n",
1320 rd->ring->irq);
1321 goto out_free_irq;
1322 }
1323 disable_irq(rd->ring->irq);
1324
1325 cpu = hns_nic_init_affinity_mask(h->q_num, i,
1326 rd->ring, &rd->mask);
1327
1328 if (cpu_online(cpu))
1329 irq_set_affinity_hint(rd->ring->irq,
1330 &rd->mask);
1331
1332 rd->ring->irq_init_flag = RCB_IRQ_INITED;
1333 }
1334
1335 return 0;
1336
1337out_free_irq:
1338 hns_nic_free_irq(h->q_num, priv);
1339 return ret;
1340}
1341
1342static int hns_nic_net_up(struct net_device *ndev)
1343{
1344 struct hns_nic_priv *priv = netdev_priv(ndev);
1345 struct hnae_handle *h = priv->ae_handle;
1346 int i, j;
1347 int ret;
1348
1349 if (!test_bit(NIC_STATE_DOWN, &priv->state))
1350 return 0;
1351
1352 ret = hns_nic_init_irq(priv);
1353 if (ret != 0) {
1354 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
1355 return ret;
1356 }
1357
1358 for (i = 0; i < h->q_num * 2; i++) {
1359 ret = hns_nic_ring_open(ndev, i);
1360 if (ret)
1361 goto out_has_some_queues;
1362 }
1363
1364 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1365 if (ret)
1366 goto out_set_mac_addr_err;
1367
1368 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
1369 if (ret)
1370 goto out_start_err;
1371
1372 if (ndev->phydev)
1373 phy_start(ndev->phydev);
1374
1375 clear_bit(NIC_STATE_DOWN, &priv->state);
1376 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1377
1378 return 0;
1379
1380out_start_err:
1381 netif_stop_queue(ndev);
1382out_set_mac_addr_err:
1383out_has_some_queues:
1384 for (j = i - 1; j >= 0; j--)
1385 hns_nic_ring_close(ndev, j);
1386
1387 hns_nic_free_irq(h->q_num, priv);
1388 set_bit(NIC_STATE_DOWN, &priv->state);
1389
1390 return ret;
1391}
1392
1393static void hns_nic_net_down(struct net_device *ndev)
1394{
1395 int i;
1396 struct hnae_ae_ops *ops;
1397 struct hns_nic_priv *priv = netdev_priv(ndev);
1398
1399 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
1400 return;
1401
1402 (void)del_timer_sync(&priv->service_timer);
1403 netif_tx_stop_all_queues(ndev);
1404 netif_carrier_off(ndev);
1405 netif_tx_disable(ndev);
1406 priv->link = 0;
1407
1408 if (ndev->phydev)
1409 phy_stop(ndev->phydev);
1410
1411 ops = priv->ae_handle->dev->ops;
1412
1413 if (ops->stop)
1414 ops->stop(priv->ae_handle);
1415
1416 netif_tx_stop_all_queues(ndev);
1417
1418 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
1419 hns_nic_ring_close(ndev, i);
1420 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
1421
1422
1423 hns_nic_tx_clr_all_bufs(priv->ring_data + i);
1424 }
1425}
1426
1427void hns_nic_net_reset(struct net_device *ndev)
1428{
1429 struct hns_nic_priv *priv = netdev_priv(ndev);
1430 struct hnae_handle *handle = priv->ae_handle;
1431
1432 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1433 usleep_range(1000, 2000);
1434
1435 (void)hnae_reinit_handle(handle);
1436
1437 clear_bit(NIC_STATE_RESETTING, &priv->state);
1438}
1439
1440void hns_nic_net_reinit(struct net_device *netdev)
1441{
1442 struct hns_nic_priv *priv = netdev_priv(netdev);
1443 enum hnae_port_type type = priv->ae_handle->port_type;
1444
1445 netif_trans_update(priv->netdev);
1446 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1447 usleep_range(1000, 2000);
1448
1449 hns_nic_net_down(netdev);
1450
1451
1452
1453
1454 if (type == HNAE_PORT_DEBUG)
1455 hns_nic_net_reset(netdev);
1456
1457 (void)hns_nic_net_up(netdev);
1458 clear_bit(NIC_STATE_REINITING, &priv->state);
1459}
1460
1461static int hns_nic_net_open(struct net_device *ndev)
1462{
1463 struct hns_nic_priv *priv = netdev_priv(ndev);
1464 struct hnae_handle *h = priv->ae_handle;
1465 int ret;
1466
1467 if (test_bit(NIC_STATE_TESTING, &priv->state))
1468 return -EBUSY;
1469
1470 priv->link = 0;
1471 netif_carrier_off(ndev);
1472
1473 ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1474 if (ret < 0) {
1475 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1476 ret);
1477 return ret;
1478 }
1479
1480 ret = netif_set_real_num_rx_queues(ndev, h->q_num);
1481 if (ret < 0) {
1482 netdev_err(ndev,
1483 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1484 return ret;
1485 }
1486
1487 ret = hns_nic_net_up(ndev);
1488 if (ret) {
1489 netdev_err(ndev,
1490 "hns net up fail, ret=%d!\n", ret);
1491 return ret;
1492 }
1493
1494 return 0;
1495}
1496
1497static int hns_nic_net_stop(struct net_device *ndev)
1498{
1499 hns_nic_net_down(ndev);
1500
1501 return 0;
1502}
1503
1504static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1505#define HNS_TX_TIMEO_LIMIT (40 * HZ)
1506static void hns_nic_net_timeout(struct net_device *ndev)
1507{
1508 struct hns_nic_priv *priv = netdev_priv(ndev);
1509
1510 if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
1511 ndev->watchdog_timeo *= 2;
1512 netdev_info(ndev, "watchdog_timo changed to %d.\n",
1513 ndev->watchdog_timeo);
1514 } else {
1515 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
1516 hns_tx_timeout_reset(priv);
1517 }
1518}
1519
1520static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1521 int cmd)
1522{
1523 struct phy_device *phy_dev = netdev->phydev;
1524
1525 if (!netif_running(netdev))
1526 return -EINVAL;
1527
1528 if (!phy_dev)
1529 return -ENOTSUPP;
1530
1531 return phy_mii_ioctl(phy_dev, ifr, cmd);
1532}
1533
1534static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1535 struct net_device *ndev)
1536{
1537 struct hns_nic_priv *priv = netdev_priv(ndev);
1538
1539 assert(skb->queue_mapping < ndev->ae_handle->q_num);
1540
1541 return hns_nic_net_xmit_hw(ndev, skb,
1542 &tx_ring_data(priv, skb->queue_mapping));
1543}
1544
1545static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
1546 struct sk_buff *skb)
1547{
1548 dev_kfree_skb_any(skb);
1549}
1550
1551#define HNS_LB_TX_RING 0
1552static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
1553{
1554 struct sk_buff *skb;
1555 struct ethhdr *ethhdr;
1556 int frame_len;
1557
1558
1559 skb = alloc_skb(64, GFP_KERNEL);
1560 if (!skb)
1561 return NULL;
1562
1563 skb_put(skb, 64);
1564 skb->dev = ndev;
1565 memset(skb->data, 0xFF, skb->len);
1566
1567
1568 ethhdr = (struct ethhdr *)skb->data;
1569 ethhdr->h_proto = htons(ETH_P_IP);
1570
1571 frame_len = skb->len & (~1ul);
1572 memset(&skb->data[frame_len / 2], 0xAA,
1573 frame_len / 2 - 1);
1574
1575 skb->queue_mapping = HNS_LB_TX_RING;
1576
1577 return skb;
1578}
1579
1580static int hns_enable_serdes_lb(struct net_device *ndev)
1581{
1582 struct hns_nic_priv *priv = netdev_priv(ndev);
1583 struct hnae_handle *h = priv->ae_handle;
1584 struct hnae_ae_ops *ops = h->dev->ops;
1585 int speed, duplex;
1586 int ret;
1587
1588 ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
1589 if (ret)
1590 return ret;
1591
1592 ret = ops->start ? ops->start(h) : 0;
1593 if (ret)
1594 return ret;
1595
1596
1597 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1598 speed = 1000;
1599 else
1600 speed = 10000;
1601 duplex = 1;
1602
1603 ops->adjust_link(h, speed, duplex);
1604
1605
1606 mdelay(300);
1607
1608 return 0;
1609}
1610
1611static void hns_disable_serdes_lb(struct net_device *ndev)
1612{
1613 struct hns_nic_priv *priv = netdev_priv(ndev);
1614 struct hnae_handle *h = priv->ae_handle;
1615 struct hnae_ae_ops *ops = h->dev->ops;
1616
1617 ops->stop(h);
1618 ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
1619}
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
1635{
1636 struct hns_nic_priv *priv = netdev_priv(ndev);
1637 struct hnae_handle *h = priv->ae_handle;
1638 struct hnae_ae_ops *ops = h->dev->ops;
1639 struct hns_nic_ring_data *rd;
1640 struct hnae_ring *ring;
1641 struct sk_buff *skb;
1642 u32 *org_indir;
1643 u32 *cur_indir;
1644 int indir_size;
1645 int head, tail;
1646 int fetch_num;
1647 int i, j;
1648 bool found;
1649 int retry_times;
1650 int ret = 0;
1651
1652
1653 indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
1654 org_indir = kzalloc(indir_size, GFP_KERNEL);
1655 if (!org_indir)
1656 return -ENOMEM;
1657
1658
1659 ops->get_rss(h, org_indir, NULL, NULL);
1660
1661 cur_indir = kzalloc(indir_size, GFP_KERNEL);
1662 if (!cur_indir) {
1663 ret = -ENOMEM;
1664 goto cur_indir_alloc_err;
1665 }
1666
1667
1668 if (hns_enable_serdes_lb(ndev)) {
1669 ret = -EINVAL;
1670 goto enable_serdes_lb_err;
1671 }
1672
1673
1674 for (i = 0; i < h->q_num; i++) {
1675 ring = &h->qs[i]->rx_ring;
1676 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1677 tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
1678 found = false;
1679 fetch_num = ring_dist(ring, head, tail);
1680
1681 while (head != tail) {
1682 if (ring->desc_cb[head].page_offset != 0) {
1683 found = true;
1684 break;
1685 }
1686
1687 head++;
1688 if (head == ring->desc_num)
1689 head = 0;
1690 }
1691
1692 if (found) {
1693 for (j = 0; j < indir_size / sizeof(*org_indir); j++)
1694 cur_indir[j] = i;
1695 ops->set_rss(h, cur_indir, NULL, 0);
1696
1697 for (j = 0; j < fetch_num; j++) {
1698
1699 skb = hns_assemble_skb(ndev);
1700 if (!skb)
1701 goto out;
1702 rd = &tx_ring_data(priv, skb->queue_mapping);
1703 hns_nic_net_xmit_hw(ndev, skb, rd);
1704
1705 retry_times = 0;
1706 while (retry_times++ < 10) {
1707 mdelay(10);
1708
1709 rd = &rx_ring_data(priv, i);
1710 if (rd->poll_one(rd, fetch_num,
1711 hns_nic_drop_rx_fetch))
1712 break;
1713 }
1714
1715 retry_times = 0;
1716 while (retry_times++ < 10) {
1717 mdelay(10);
1718
1719 rd = &tx_ring_data(priv,
1720 HNS_LB_TX_RING);
1721 if (rd->poll_one(rd, fetch_num, NULL))
1722 break;
1723 }
1724 }
1725 }
1726 }
1727
1728out:
1729
1730 ops->set_rss(h, org_indir, NULL, 0);
1731 hns_disable_serdes_lb(ndev);
1732enable_serdes_lb_err:
1733 kfree(cur_indir);
1734cur_indir_alloc_err:
1735 kfree(org_indir);
1736
1737 return ret;
1738}
1739
1740static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1741{
1742 struct hns_nic_priv *priv = netdev_priv(ndev);
1743 struct hnae_handle *h = priv->ae_handle;
1744 bool if_running = netif_running(ndev);
1745 int ret;
1746
1747
1748 if (new_mtu < 68)
1749 return -EINVAL;
1750
1751
1752 if (new_mtu == ndev->mtu)
1753 return 0;
1754
1755 if (!h->dev->ops->set_mtu)
1756 return -ENOTSUPP;
1757
1758 if (if_running) {
1759 (void)hns_nic_net_stop(ndev);
1760 msleep(100);
1761 }
1762
1763 if (priv->enet_ver != AE_VERSION_1 &&
1764 ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
1765 new_mtu > BD_SIZE_2048_MAX_MTU) {
1766
1767 hnae_reinit_all_ring_desc(h);
1768
1769
1770 ret = hns_nic_clear_all_rx_fetch(ndev);
1771
1772
1773 hnae_reinit_all_ring_page_off(h);
1774
1775 if (ret) {
1776 netdev_err(ndev, "clear the fetched desc fail\n");
1777 goto out;
1778 }
1779 }
1780
1781 ret = h->dev->ops->set_mtu(h, new_mtu);
1782 if (ret) {
1783 netdev_err(ndev, "set mtu fail, return value %d\n",
1784 ret);
1785 goto out;
1786 }
1787
1788
1789 ndev->mtu = new_mtu;
1790
1791out:
1792 if (if_running) {
1793 if (hns_nic_net_open(ndev)) {
1794 netdev_err(ndev, "hns net open fail\n");
1795 ret = -EINVAL;
1796 }
1797 }
1798
1799 return ret;
1800}
1801
1802static int hns_nic_set_features(struct net_device *netdev,
1803 netdev_features_t features)
1804{
1805 struct hns_nic_priv *priv = netdev_priv(netdev);
1806
1807 switch (priv->enet_ver) {
1808 case AE_VERSION_1:
1809 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1810 netdev_info(netdev, "enet v1 do not support tso!\n");
1811 break;
1812 default:
1813 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1814 priv->ops.fill_desc = fill_tso_desc;
1815 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1816
1817 netif_set_gso_max_size(netdev, 7 * 4096);
1818 } else {
1819 priv->ops.fill_desc = fill_v2_desc;
1820 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1821 }
1822 break;
1823 }
1824 netdev->features = features;
1825 return 0;
1826}
1827
1828static netdev_features_t hns_nic_fix_features(
1829 struct net_device *netdev, netdev_features_t features)
1830{
1831 struct hns_nic_priv *priv = netdev_priv(netdev);
1832
1833 switch (priv->enet_ver) {
1834 case AE_VERSION_1:
1835 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
1836 NETIF_F_HW_VLAN_CTAG_FILTER);
1837 break;
1838 default:
1839 break;
1840 }
1841 return features;
1842}
1843
1844static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
1845{
1846 struct hns_nic_priv *priv = netdev_priv(netdev);
1847 struct hnae_handle *h = priv->ae_handle;
1848
1849 if (h->dev->ops->add_uc_addr)
1850 return h->dev->ops->add_uc_addr(h, addr);
1851
1852 return 0;
1853}
1854
1855static int hns_nic_uc_unsync(struct net_device *netdev,
1856 const unsigned char *addr)
1857{
1858 struct hns_nic_priv *priv = netdev_priv(netdev);
1859 struct hnae_handle *h = priv->ae_handle;
1860
1861 if (h->dev->ops->rm_uc_addr)
1862 return h->dev->ops->rm_uc_addr(h, addr);
1863
1864 return 0;
1865}
1866
1867
1868
1869
1870
1871
1872
1873
1874static void hns_set_multicast_list(struct net_device *ndev)
1875{
1876 struct hns_nic_priv *priv = netdev_priv(ndev);
1877 struct hnae_handle *h = priv->ae_handle;
1878 struct netdev_hw_addr *ha = NULL;
1879
1880 if (!h) {
1881 netdev_err(ndev, "hnae handle is null\n");
1882 return;
1883 }
1884
1885 if (h->dev->ops->clr_mc_addr)
1886 if (h->dev->ops->clr_mc_addr(h))
1887 netdev_err(ndev, "clear multicast address fail\n");
1888
1889 if (h->dev->ops->set_mc_addr) {
1890 netdev_for_each_mc_addr(ha, ndev)
1891 if (h->dev->ops->set_mc_addr(h, ha->addr))
1892 netdev_err(ndev, "set multicast fail\n");
1893 }
1894}
1895
1896static void hns_nic_set_rx_mode(struct net_device *ndev)
1897{
1898 struct hns_nic_priv *priv = netdev_priv(ndev);
1899 struct hnae_handle *h = priv->ae_handle;
1900
1901 if (h->dev->ops->set_promisc_mode) {
1902 if (ndev->flags & IFF_PROMISC)
1903 h->dev->ops->set_promisc_mode(h, 1);
1904 else
1905 h->dev->ops->set_promisc_mode(h, 0);
1906 }
1907
1908 hns_set_multicast_list(ndev);
1909
1910 if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
1911 netdev_err(ndev, "sync uc address fail\n");
1912}
1913
1914static void hns_nic_get_stats64(struct net_device *ndev,
1915 struct rtnl_link_stats64 *stats)
1916{
1917 int idx = 0;
1918 u64 tx_bytes = 0;
1919 u64 rx_bytes = 0;
1920 u64 tx_pkts = 0;
1921 u64 rx_pkts = 0;
1922 struct hns_nic_priv *priv = netdev_priv(ndev);
1923 struct hnae_handle *h = priv->ae_handle;
1924
1925 for (idx = 0; idx < h->q_num; idx++) {
1926 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1927 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1928 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1929 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1930 }
1931
1932 stats->tx_bytes = tx_bytes;
1933 stats->tx_packets = tx_pkts;
1934 stats->rx_bytes = rx_bytes;
1935 stats->rx_packets = rx_pkts;
1936
1937 stats->rx_errors = ndev->stats.rx_errors;
1938 stats->multicast = ndev->stats.multicast;
1939 stats->rx_length_errors = ndev->stats.rx_length_errors;
1940 stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1941 stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1942
1943 stats->tx_errors = ndev->stats.tx_errors;
1944 stats->rx_dropped = ndev->stats.rx_dropped;
1945 stats->tx_dropped = ndev->stats.tx_dropped;
1946 stats->collisions = ndev->stats.collisions;
1947 stats->rx_over_errors = ndev->stats.rx_over_errors;
1948 stats->rx_frame_errors = ndev->stats.rx_frame_errors;
1949 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
1950 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
1951 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
1952 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
1953 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
1954 stats->tx_window_errors = ndev->stats.tx_window_errors;
1955 stats->rx_compressed = ndev->stats.rx_compressed;
1956 stats->tx_compressed = ndev->stats.tx_compressed;
1957}
1958
1959static u16
1960hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
1961 struct net_device *sb_dev)
1962{
1963 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
1964 struct hns_nic_priv *priv = netdev_priv(ndev);
1965
1966
1967 if (!AE_IS_VER1(priv->enet_ver) &&
1968 is_multicast_ether_addr(eth_hdr->h_dest))
1969 return 0;
1970 else
1971 return netdev_pick_tx(ndev, skb, NULL);
1972}
1973
1974static const struct net_device_ops hns_nic_netdev_ops = {
1975 .ndo_open = hns_nic_net_open,
1976 .ndo_stop = hns_nic_net_stop,
1977 .ndo_start_xmit = hns_nic_net_xmit,
1978 .ndo_tx_timeout = hns_nic_net_timeout,
1979 .ndo_set_mac_address = hns_nic_net_set_mac_address,
1980 .ndo_change_mtu = hns_nic_change_mtu,
1981 .ndo_do_ioctl = hns_nic_do_ioctl,
1982 .ndo_set_features = hns_nic_set_features,
1983 .ndo_fix_features = hns_nic_fix_features,
1984 .ndo_get_stats64 = hns_nic_get_stats64,
1985 .ndo_set_rx_mode = hns_nic_set_rx_mode,
1986 .ndo_select_queue = hns_nic_select_queue,
1987};
1988
1989static void hns_nic_update_link_status(struct net_device *netdev)
1990{
1991 struct hns_nic_priv *priv = netdev_priv(netdev);
1992
1993 struct hnae_handle *h = priv->ae_handle;
1994
1995 if (h->phy_dev) {
1996 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1997 return;
1998
1999 (void)genphy_read_status(h->phy_dev);
2000 }
2001 hns_nic_adjust_link(netdev);
2002}
2003
2004
2005static void hns_nic_dump(struct hns_nic_priv *priv)
2006{
2007 struct hnae_handle *h = priv->ae_handle;
2008 struct hnae_ae_ops *ops = h->dev->ops;
2009 u32 *data, reg_num, i;
2010
2011 if (ops->get_regs_len && ops->get_regs) {
2012 reg_num = ops->get_regs_len(priv->ae_handle);
2013 reg_num = (reg_num + 3ul) & ~3ul;
2014 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
2015 if (data) {
2016 ops->get_regs(priv->ae_handle, data);
2017 for (i = 0; i < reg_num; i += 4)
2018 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
2019 i, data[i], data[i + 1],
2020 data[i + 2], data[i + 3]);
2021 kfree(data);
2022 }
2023 }
2024
2025 for (i = 0; i < h->q_num; i++) {
2026 pr_info("tx_queue%d_next_to_clean:%d\n",
2027 i, h->qs[i]->tx_ring.next_to_clean);
2028 pr_info("tx_queue%d_next_to_use:%d\n",
2029 i, h->qs[i]->tx_ring.next_to_use);
2030 pr_info("rx_queue%d_next_to_clean:%d\n",
2031 i, h->qs[i]->rx_ring.next_to_clean);
2032 pr_info("rx_queue%d_next_to_use:%d\n",
2033 i, h->qs[i]->rx_ring.next_to_use);
2034 }
2035}
2036
2037
2038static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
2039{
2040 enum hnae_port_type type = priv->ae_handle->port_type;
2041
2042 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
2043 return;
2044 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
2045
2046
2047 if (test_bit(NIC_STATE_DOWN, &priv->state) ||
2048 test_bit(NIC_STATE_REMOVING, &priv->state) ||
2049 test_bit(NIC_STATE_RESETTING, &priv->state))
2050 return;
2051
2052 hns_nic_dump(priv);
2053 netdev_info(priv->netdev, "try to reset %s port!\n",
2054 (type == HNAE_PORT_DEBUG ? "debug" : "service"));
2055
2056 rtnl_lock();
2057
2058 netif_trans_update(priv->netdev);
2059 hns_nic_net_reinit(priv->netdev);
2060
2061 rtnl_unlock();
2062}
2063
2064
2065static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
2066{
2067 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
2068
2069 smp_mb__before_atomic();
2070 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2071}
2072
2073static void hns_nic_service_task(struct work_struct *work)
2074{
2075 struct hns_nic_priv *priv
2076 = container_of(work, struct hns_nic_priv, service_task);
2077 struct hnae_handle *h = priv->ae_handle;
2078
2079 hns_nic_reset_subtask(priv);
2080 hns_nic_update_link_status(priv->netdev);
2081 h->dev->ops->update_led_status(h);
2082 hns_nic_update_stats(priv->netdev);
2083
2084 hns_nic_service_event_complete(priv);
2085}
2086
2087static void hns_nic_task_schedule(struct hns_nic_priv *priv)
2088{
2089 if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
2090 !test_bit(NIC_STATE_REMOVING, &priv->state) &&
2091 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
2092 (void)schedule_work(&priv->service_task);
2093}
2094
2095static void hns_nic_service_timer(struct timer_list *t)
2096{
2097 struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
2098
2099 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
2100
2101 hns_nic_task_schedule(priv);
2102}
2103
2104
2105
2106
2107
2108static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
2109{
2110
2111 if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
2112 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
2113 netdev_warn(priv->netdev,
2114 "initiating reset due to tx timeout(%llu,0x%lx)\n",
2115 priv->tx_timeout_count, priv->state);
2116 priv->tx_timeout_count++;
2117 hns_nic_task_schedule(priv);
2118 }
2119}
2120
2121static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2122{
2123 struct hnae_handle *h = priv->ae_handle;
2124 struct hns_nic_ring_data *rd;
2125 bool is_ver1 = AE_IS_VER1(priv->enet_ver);
2126 int i;
2127
2128 if (h->q_num > NIC_MAX_Q_PER_VF) {
2129 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
2130 return -EINVAL;
2131 }
2132
2133 priv->ring_data = kzalloc(array3_size(h->q_num,
2134 sizeof(*priv->ring_data), 2),
2135 GFP_KERNEL);
2136 if (!priv->ring_data)
2137 return -ENOMEM;
2138
2139 for (i = 0; i < h->q_num; i++) {
2140 rd = &priv->ring_data[i];
2141 rd->queue_index = i;
2142 rd->ring = &h->qs[i]->tx_ring;
2143 rd->poll_one = hns_nic_tx_poll_one;
2144 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
2145 hns_nic_tx_fini_pro_v2;
2146
2147 netif_napi_add(priv->netdev, &rd->napi,
2148 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2149 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2150 }
2151 for (i = h->q_num; i < h->q_num * 2; i++) {
2152 rd = &priv->ring_data[i];
2153 rd->queue_index = i - h->q_num;
2154 rd->ring = &h->qs[i - h->q_num]->rx_ring;
2155 rd->poll_one = hns_nic_rx_poll_one;
2156 rd->ex_process = hns_nic_rx_up_pro;
2157 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
2158 hns_nic_rx_fini_pro_v2;
2159
2160 netif_napi_add(priv->netdev, &rd->napi,
2161 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2162 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2163 }
2164
2165 return 0;
2166}
2167
2168static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
2169{
2170 struct hnae_handle *h = priv->ae_handle;
2171 int i;
2172
2173 for (i = 0; i < h->q_num * 2; i++) {
2174 netif_napi_del(&priv->ring_data[i].napi);
2175 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
2176 (void)irq_set_affinity_hint(
2177 priv->ring_data[i].ring->irq,
2178 NULL);
2179 free_irq(priv->ring_data[i].ring->irq,
2180 &priv->ring_data[i]);
2181 }
2182
2183 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2184 }
2185 kfree(priv->ring_data);
2186}
2187
2188static void hns_nic_set_priv_ops(struct net_device *netdev)
2189{
2190 struct hns_nic_priv *priv = netdev_priv(netdev);
2191 struct hnae_handle *h = priv->ae_handle;
2192
2193 if (AE_IS_VER1(priv->enet_ver)) {
2194 priv->ops.fill_desc = fill_desc;
2195 priv->ops.get_rxd_bnum = get_rx_desc_bnum;
2196 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
2197 } else {
2198 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
2199 if ((netdev->features & NETIF_F_TSO) ||
2200 (netdev->features & NETIF_F_TSO6)) {
2201 priv->ops.fill_desc = fill_tso_desc;
2202 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
2203
2204 netif_set_gso_max_size(netdev, 7 * 4096);
2205 } else {
2206 priv->ops.fill_desc = fill_v2_desc;
2207 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
2208 }
2209
2210
2211
2212 h->dev->ops->set_tso_stats(h, 1);
2213 }
2214}
2215
2216static int hns_nic_try_get_ae(struct net_device *ndev)
2217{
2218 struct hns_nic_priv *priv = netdev_priv(ndev);
2219 struct hnae_handle *h;
2220 int ret;
2221
2222 h = hnae_get_handle(&priv->netdev->dev,
2223 priv->fwnode, priv->port_id, NULL);
2224 if (IS_ERR_OR_NULL(h)) {
2225 ret = -ENODEV;
2226 dev_dbg(priv->dev, "has not handle, register notifier!\n");
2227 goto out;
2228 }
2229 priv->ae_handle = h;
2230
2231 ret = hns_nic_init_phy(ndev, h);
2232 if (ret) {
2233 dev_err(priv->dev, "probe phy device fail!\n");
2234 goto out_init_phy;
2235 }
2236
2237 ret = hns_nic_init_ring_data(priv);
2238 if (ret) {
2239 ret = -ENOMEM;
2240 goto out_init_ring_data;
2241 }
2242
2243 hns_nic_set_priv_ops(ndev);
2244
2245 ret = register_netdev(ndev);
2246 if (ret) {
2247 dev_err(priv->dev, "probe register netdev fail!\n");
2248 goto out_reg_ndev_fail;
2249 }
2250 return 0;
2251
2252out_reg_ndev_fail:
2253 hns_nic_uninit_ring_data(priv);
2254 priv->ring_data = NULL;
2255out_init_phy:
2256out_init_ring_data:
2257 hnae_put_handle(priv->ae_handle);
2258 priv->ae_handle = NULL;
2259out:
2260 return ret;
2261}
2262
2263static int hns_nic_notifier_action(struct notifier_block *nb,
2264 unsigned long action, void *data)
2265{
2266 struct hns_nic_priv *priv =
2267 container_of(nb, struct hns_nic_priv, notifier_block);
2268
2269 assert(action == HNAE_AE_REGISTER);
2270
2271 if (!hns_nic_try_get_ae(priv->netdev)) {
2272 hnae_unregister_notifier(&priv->notifier_block);
2273 priv->notifier_block.notifier_call = NULL;
2274 }
2275 return 0;
2276}
2277
2278static int hns_nic_dev_probe(struct platform_device *pdev)
2279{
2280 struct device *dev = &pdev->dev;
2281 struct net_device *ndev;
2282 struct hns_nic_priv *priv;
2283 u32 port_id;
2284 int ret;
2285
2286 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
2287 if (!ndev)
2288 return -ENOMEM;
2289
2290 platform_set_drvdata(pdev, ndev);
2291
2292 priv = netdev_priv(ndev);
2293 priv->dev = dev;
2294 priv->netdev = ndev;
2295
2296 if (dev_of_node(dev)) {
2297 struct device_node *ae_node;
2298
2299 if (of_device_is_compatible(dev->of_node,
2300 "hisilicon,hns-nic-v1"))
2301 priv->enet_ver = AE_VERSION_1;
2302 else
2303 priv->enet_ver = AE_VERSION_2;
2304
2305 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
2306 if (!ae_node) {
2307 ret = -ENODEV;
2308 dev_err(dev, "not find ae-handle\n");
2309 goto out_read_prop_fail;
2310 }
2311 priv->fwnode = &ae_node->fwnode;
2312 } else if (is_acpi_node(dev->fwnode)) {
2313 struct fwnode_reference_args args;
2314
2315 if (acpi_dev_found(hns_enet_acpi_match[0].id))
2316 priv->enet_ver = AE_VERSION_1;
2317 else if (acpi_dev_found(hns_enet_acpi_match[1].id))
2318 priv->enet_ver = AE_VERSION_2;
2319 else
2320 return -ENXIO;
2321
2322
2323 ret = acpi_node_get_property_reference(dev->fwnode,
2324 "ae-handle", 0, &args);
2325 if (ret) {
2326 dev_err(dev, "not find ae-handle\n");
2327 goto out_read_prop_fail;
2328 }
2329 if (!is_acpi_device_node(args.fwnode)) {
2330 ret = -EINVAL;
2331 goto out_read_prop_fail;
2332 }
2333 priv->fwnode = args.fwnode;
2334 } else {
2335 dev_err(dev, "cannot read cfg data from OF or acpi\n");
2336 return -ENXIO;
2337 }
2338
2339 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
2340 if (ret) {
2341
2342 ret = device_property_read_u32(dev, "port-id", &port_id);
2343 if (ret)
2344 goto out_read_prop_fail;
2345
2346 port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
2347 : port_id - HNS_SRV_OFFSET;
2348 }
2349 priv->port_id = port_id;
2350
2351 hns_init_mac_addr(ndev);
2352
2353 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
2354 ndev->priv_flags |= IFF_UNICAST_FLT;
2355 ndev->netdev_ops = &hns_nic_netdev_ops;
2356 hns_ethtool_set_ops(ndev);
2357
2358 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2359 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2360 NETIF_F_GRO;
2361 ndev->vlan_features |=
2362 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2363 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
2364
2365
2366 ndev->min_mtu = MAC_MIN_MTU;
2367 switch (priv->enet_ver) {
2368 case AE_VERSION_2:
2369 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
2370 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2371 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2372 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
2373 ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
2374 ndev->max_mtu = MAC_MAX_MTU_V2 -
2375 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2376 break;
2377 default:
2378 ndev->max_mtu = MAC_MAX_MTU -
2379 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2380 break;
2381 }
2382
2383 SET_NETDEV_DEV(ndev, dev);
2384
2385 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
2386 dev_dbg(dev, "set mask to 64bit\n");
2387 else
2388 dev_err(dev, "set mask to 64bit fail!\n");
2389
2390
2391 netif_carrier_off(ndev);
2392
2393 timer_setup(&priv->service_timer, hns_nic_service_timer, 0);
2394 INIT_WORK(&priv->service_task, hns_nic_service_task);
2395
2396 set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
2397 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2398 set_bit(NIC_STATE_DOWN, &priv->state);
2399
2400 if (hns_nic_try_get_ae(priv->netdev)) {
2401 priv->notifier_block.notifier_call = hns_nic_notifier_action;
2402 ret = hnae_register_notifier(&priv->notifier_block);
2403 if (ret) {
2404 dev_err(dev, "register notifier fail!\n");
2405 goto out_notify_fail;
2406 }
2407 dev_dbg(dev, "has not handle, register notifier!\n");
2408 }
2409
2410 return 0;
2411
2412out_notify_fail:
2413 (void)cancel_work_sync(&priv->service_task);
2414out_read_prop_fail:
2415
2416 of_node_put(to_of_node(priv->fwnode));
2417 free_netdev(ndev);
2418 return ret;
2419}
2420
2421static int hns_nic_dev_remove(struct platform_device *pdev)
2422{
2423 struct net_device *ndev = platform_get_drvdata(pdev);
2424 struct hns_nic_priv *priv = netdev_priv(ndev);
2425
2426 if (ndev->reg_state != NETREG_UNINITIALIZED)
2427 unregister_netdev(ndev);
2428
2429 if (priv->ring_data)
2430 hns_nic_uninit_ring_data(priv);
2431 priv->ring_data = NULL;
2432
2433 if (ndev->phydev)
2434 phy_disconnect(ndev->phydev);
2435
2436 if (!IS_ERR_OR_NULL(priv->ae_handle))
2437 hnae_put_handle(priv->ae_handle);
2438 priv->ae_handle = NULL;
2439 if (priv->notifier_block.notifier_call)
2440 hnae_unregister_notifier(&priv->notifier_block);
2441 priv->notifier_block.notifier_call = NULL;
2442
2443 set_bit(NIC_STATE_REMOVING, &priv->state);
2444 (void)cancel_work_sync(&priv->service_task);
2445
2446
2447 of_node_put(to_of_node(priv->fwnode));
2448
2449 free_netdev(ndev);
2450 return 0;
2451}
2452
2453static const struct of_device_id hns_enet_of_match[] = {
2454 {.compatible = "hisilicon,hns-nic-v1",},
2455 {.compatible = "hisilicon,hns-nic-v2",},
2456 {},
2457};
2458
2459MODULE_DEVICE_TABLE(of, hns_enet_of_match);
2460
2461static struct platform_driver hns_nic_dev_driver = {
2462 .driver = {
2463 .name = "hns-nic",
2464 .of_match_table = hns_enet_of_match,
2465 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
2466 },
2467 .probe = hns_nic_dev_probe,
2468 .remove = hns_nic_dev_remove,
2469};
2470
2471module_platform_driver(hns_nic_dev_driver);
2472
2473MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2474MODULE_AUTHOR("Hisilicon, Inc.");
2475MODULE_LICENSE("GPL");
2476MODULE_ALIAS("platform:hns-nic");
2477