1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/ip.h>
41#include <net/ipv6.h>
42#include <net/tcp.h>
43#include <linux/dma-mapping.h>
44#include <linux/prefetch.h>
45
46#include "t4vf_common.h"
47#include "t4vf_defs.h"
48
49#include "../cxgb4/t4_regs.h"
50#include "../cxgb4/t4_values.h"
51#include "../cxgb4/t4fw_api.h"
52#include "../cxgb4/t4_msg.h"
53
54
55
56
57enum {
58
59
60
61
62
63
64
65 EQ_UNIT = SGE_EQ_IDXSIZE,
66 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
67 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
68
69
70
71
72
73
74
75 MAX_TX_RECLAIM = 16,
76
77
78
79
80
81 MAX_RX_REFILL = 16,
82
83
84
85
86
87
88 RX_QCHECK_PERIOD = (HZ / 2),
89
90
91
92
93
94 TX_QCHECK_PERIOD = (HZ / 2),
95 MAX_TIMER_TX_RECLAIM = 100,
96
97
98
99
100
101
102
103
104
105 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
106 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
107 ((ETHTXQ_MAX_FRAGS-1) & 1) +
108 2),
109 ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
110 sizeof(struct cpl_tx_pkt_lso_core) +
111 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
112 ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
113
114 ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
115
116
117
118
119
120
121
122 MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
123
124
125
126
127 MAX_CTRL_WR_LEN = 256,
128
129
130
131
132
133 MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
134 ? MAX_IMM_TX_PKT_LEN
135 : MAX_CTRL_WR_LEN),
136
137
138
139
140
141
142
143 RX_COPY_THRES = 256,
144 RX_PULL_LEN = 128,
145
146
147
148
149
150
151 RX_SKB_LEN = 512,
152};
153
154
155
156
157struct tx_sw_desc {
158 struct sk_buff *skb;
159 struct ulptx_sgl *sgl;
160};
161
162
163
164
165
166
167
168struct rx_sw_desc {
169 struct page *page;
170 dma_addr_t dma_addr;
171
172};
173
174
175
176
177
178
179
180
181
182
183enum {
184 RX_LARGE_BUF = 1 << 0,
185 RX_UNMAPPED_BUF = 1 << 1,
186};
187
188
189
190
191
192
193
194
195static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
196{
197 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
198}
199
200
201
202
203
204
205
206
207static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
208{
209 return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
210}
211
212
213
214
215
216
217
218static inline int need_skb_unmap(void)
219{
220#ifdef CONFIG_NEED_DMA_MAP_STATE
221 return 1;
222#else
223 return 0;
224#endif
225}
226
227
228
229
230
231
232
233static inline unsigned int txq_avail(const struct sge_txq *tq)
234{
235 return tq->size - 1 - tq->in_use;
236}
237
238
239
240
241
242
243
244
245
246
247static inline unsigned int fl_cap(const struct sge_fl *fl)
248{
249 return fl->size - FL_PER_EQ_UNIT;
250}
251
252
253
254
255
256
257
258
259
260
261static inline bool fl_starving(const struct adapter *adapter,
262 const struct sge_fl *fl)
263{
264 const struct sge *s = &adapter->sge;
265
266 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
267}
268
269
270
271
272
273
274
275
276
277static int map_skb(struct device *dev, const struct sk_buff *skb,
278 dma_addr_t *addr)
279{
280 const skb_frag_t *fp, *end;
281 const struct skb_shared_info *si;
282
283 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
284 if (dma_mapping_error(dev, *addr))
285 goto out_err;
286
287 si = skb_shinfo(skb);
288 end = &si->frags[si->nr_frags];
289 for (fp = si->frags; fp < end; fp++) {
290 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
291 DMA_TO_DEVICE);
292 if (dma_mapping_error(dev, *addr))
293 goto unwind;
294 }
295 return 0;
296
297unwind:
298 while (fp-- > si->frags)
299 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
300 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
301
302out_err:
303 return -ENOMEM;
304}
305
306static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
307 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
308{
309 const struct ulptx_sge_pair *p;
310 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
311
312 if (likely(skb_headlen(skb)))
313 dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
314 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
315 else {
316 dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
317 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
318 nfrags--;
319 }
320
321
322
323
324
325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
326 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
327unmap:
328 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
329 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
330 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
331 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
332 p++;
333 } else if ((u8 *)p == (u8 *)tq->stat) {
334 p = (const struct ulptx_sge_pair *)tq->desc;
335 goto unmap;
336 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
337 const __be64 *addr = (const __be64 *)tq->desc;
338
339 dma_unmap_page(dev, be64_to_cpu(addr[0]),
340 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
341 dma_unmap_page(dev, be64_to_cpu(addr[1]),
342 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
343 p = (const struct ulptx_sge_pair *)&addr[2];
344 } else {
345 const __be64 *addr = (const __be64 *)tq->desc;
346
347 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
348 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
349 dma_unmap_page(dev, be64_to_cpu(addr[0]),
350 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
351 p = (const struct ulptx_sge_pair *)&addr[1];
352 }
353 }
354 if (nfrags) {
355 __be64 addr;
356
357 if ((u8 *)p == (u8 *)tq->stat)
358 p = (const struct ulptx_sge_pair *)tq->desc;
359 addr = ((u8 *)p + 16 <= (u8 *)tq->stat
360 ? p->addr[0]
361 : *(const __be64 *)tq->desc);
362 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
363 DMA_TO_DEVICE);
364 }
365}
366
367
368
369
370
371
372
373
374
375
376
377static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
378 unsigned int n, bool unmap)
379{
380 struct tx_sw_desc *sdesc;
381 unsigned int cidx = tq->cidx;
382 struct device *dev = adapter->pdev_dev;
383
384 const int need_unmap = need_skb_unmap() && unmap;
385
386 sdesc = &tq->sdesc[cidx];
387 while (n--) {
388
389
390
391
392 if (sdesc->skb) {
393 if (need_unmap)
394 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
395 dev_consume_skb_any(sdesc->skb);
396 sdesc->skb = NULL;
397 }
398
399 sdesc++;
400 if (++cidx == tq->size) {
401 cidx = 0;
402 sdesc = tq->sdesc;
403 }
404 }
405 tq->cidx = cidx;
406}
407
408
409
410
411static inline int reclaimable(const struct sge_txq *tq)
412{
413 int hw_cidx = be16_to_cpu(tq->stat->cidx);
414 int reclaimable = hw_cidx - tq->cidx;
415 if (reclaimable < 0)
416 reclaimable += tq->size;
417 return reclaimable;
418}
419
420
421
422
423
424
425
426
427
428
429
430static inline void reclaim_completed_tx(struct adapter *adapter,
431 struct sge_txq *tq,
432 bool unmap)
433{
434 int avail = reclaimable(tq);
435
436 if (avail) {
437
438
439
440
441 if (avail > MAX_TX_RECLAIM)
442 avail = MAX_TX_RECLAIM;
443
444 free_tx_desc(adapter, tq, avail, unmap);
445 tq->in_use -= avail;
446 }
447}
448
449
450
451
452
453
454static inline int get_buf_size(const struct adapter *adapter,
455 const struct rx_sw_desc *sdesc)
456{
457 const struct sge *s = &adapter->sge;
458
459 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
460 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
461}
462
463
464
465
466
467
468
469
470
471
472
473static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
474{
475 while (n--) {
476 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
477
478 if (is_buf_mapped(sdesc))
479 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
480 get_buf_size(adapter, sdesc),
481 PCI_DMA_FROMDEVICE);
482 put_page(sdesc->page);
483 sdesc->page = NULL;
484 if (++fl->cidx == fl->size)
485 fl->cidx = 0;
486 fl->avail--;
487 }
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
504{
505 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
506
507 if (is_buf_mapped(sdesc))
508 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
509 get_buf_size(adapter, sdesc),
510 PCI_DMA_FROMDEVICE);
511 sdesc->page = NULL;
512 if (++fl->cidx == fl->size)
513 fl->cidx = 0;
514 fl->avail--;
515}
516
517
518
519
520
521
522
523
524
525static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
526{
527 u32 val = adapter->params.arch.sge_fl_db;
528
529
530
531
532
533 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
534 if (is_t4(adapter->params.chip))
535 val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
536 else
537 val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
538
539
540
541
542 wmb();
543
544
545
546
547
548 if (unlikely(fl->bar2_addr == NULL)) {
549 t4_write_reg(adapter,
550 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
551 QID_V(fl->cntxt_id) | val);
552 } else {
553 writel(val | QID_V(fl->bar2_qid),
554 fl->bar2_addr + SGE_UDB_KDOORBELL);
555
556
557
558
559 wmb();
560 }
561 fl->pend_cred %= FL_PER_EQ_UNIT;
562 }
563}
564
565
566
567
568
569
570
571static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
572 dma_addr_t dma_addr)
573{
574 sdesc->page = page;
575 sdesc->dma_addr = dma_addr;
576}
577
578
579
580
581#define POISON_BUF_VAL -1
582
583static inline void poison_buf(struct page *page, size_t sz)
584{
585#if POISON_BUF_VAL >= 0
586 memset(page_address(page), POISON_BUF_VAL, sz);
587#endif
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
605 int n, gfp_t gfp)
606{
607 struct sge *s = &adapter->sge;
608 struct page *page;
609 dma_addr_t dma_addr;
610 unsigned int cred = fl->avail;
611 __be64 *d = &fl->desc[fl->pidx];
612 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
613
614
615
616
617
618
619 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
620
621 gfp |= __GFP_NOWARN;
622
623
624
625
626
627
628
629 if (s->fl_pg_order == 0)
630 goto alloc_small_pages;
631
632 while (n) {
633 page = __dev_alloc_pages(gfp, s->fl_pg_order);
634 if (unlikely(!page)) {
635
636
637
638
639
640 fl->large_alloc_failed++;
641 break;
642 }
643 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
644
645 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
646 PAGE_SIZE << s->fl_pg_order,
647 PCI_DMA_FROMDEVICE);
648 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
649
650
651
652
653
654
655
656
657 __free_pages(page, s->fl_pg_order);
658 goto out;
659 }
660 dma_addr |= RX_LARGE_BUF;
661 *d++ = cpu_to_be64(dma_addr);
662
663 set_rx_sw_desc(sdesc, page, dma_addr);
664 sdesc++;
665
666 fl->avail++;
667 if (++fl->pidx == fl->size) {
668 fl->pidx = 0;
669 sdesc = fl->sdesc;
670 d = fl->desc;
671 }
672 n--;
673 }
674
675alloc_small_pages:
676 while (n--) {
677 page = __dev_alloc_page(gfp);
678 if (unlikely(!page)) {
679 fl->alloc_failed++;
680 break;
681 }
682 poison_buf(page, PAGE_SIZE);
683
684 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
685 PCI_DMA_FROMDEVICE);
686 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
687 put_page(page);
688 break;
689 }
690 *d++ = cpu_to_be64(dma_addr);
691
692 set_rx_sw_desc(sdesc, page, dma_addr);
693 sdesc++;
694
695 fl->avail++;
696 if (++fl->pidx == fl->size) {
697 fl->pidx = 0;
698 sdesc = fl->sdesc;
699 d = fl->desc;
700 }
701 }
702
703out:
704
705
706
707
708
709 cred = fl->avail - cred;
710 fl->pend_cred += cred;
711 ring_fl_db(adapter, fl);
712
713 if (unlikely(fl_starving(adapter, fl))) {
714 smp_wmb();
715 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
716 }
717
718 return cred;
719}
720
721
722
723
724
725static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
726{
727 refill_fl(adapter, fl,
728 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
729 GFP_ATOMIC);
730}
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
752 size_t swsize, dma_addr_t *busaddrp, void *swringp,
753 size_t stat_size)
754{
755
756
757
758 size_t hwlen = nelem * hwsize + stat_size;
759 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
760
761 if (!hwring)
762 return NULL;
763
764
765
766
767
768 BUG_ON((swsize != 0) != (swringp != NULL));
769 if (swsize) {
770 void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
771
772 if (!swring) {
773 dma_free_coherent(dev, hwlen, hwring, *busaddrp);
774 return NULL;
775 }
776 *(void **)swringp = swring;
777 }
778
779
780
781
782
783 memset(hwring, 0, hwlen);
784 return hwring;
785}
786
787
788
789
790
791
792
793
794static inline unsigned int sgl_len(unsigned int n)
795{
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813 n--;
814 return (3 * n) / 2 + (n & 1) + 2;
815}
816
817
818
819
820
821
822
823
824static inline unsigned int flits_to_desc(unsigned int flits)
825{
826 BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
827 return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
828}
829
830
831
832
833
834
835
836
837static inline int is_eth_imm(const struct sk_buff *skb)
838{
839
840
841
842
843
844
845
846 return false;
847}
848
849
850
851
852
853
854
855
856static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
857{
858 unsigned int flits;
859
860
861
862
863
864
865 if (is_eth_imm(skb))
866 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
867 sizeof(__be64));
868
869
870
871
872
873
874
875
876
877
878 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
879 if (skb_shinfo(skb)->gso_size)
880 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
881 sizeof(struct cpl_tx_pkt_lso_core) +
882 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
883 else
884 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
885 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
886 return flits;
887}
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
907 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
908 const dma_addr_t *addr)
909{
910 unsigned int i, len;
911 struct ulptx_sge_pair *to;
912 const struct skb_shared_info *si = skb_shinfo(skb);
913 unsigned int nfrags = si->nr_frags;
914 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
915
916 len = skb_headlen(skb) - start;
917 if (likely(len)) {
918 sgl->len0 = htonl(len);
919 sgl->addr0 = cpu_to_be64(addr[0] + start);
920 nfrags++;
921 } else {
922 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
923 sgl->addr0 = cpu_to_be64(addr[1]);
924 }
925
926 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
927 ULPTX_NSGE_V(nfrags));
928 if (likely(--nfrags == 0))
929 return;
930
931
932
933
934
935 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
936
937 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
938 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
939 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
940 to->addr[0] = cpu_to_be64(addr[i]);
941 to->addr[1] = cpu_to_be64(addr[++i]);
942 }
943 if (nfrags) {
944 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
945 to->len[1] = cpu_to_be32(0);
946 to->addr[0] = cpu_to_be64(addr[i + 1]);
947 }
948 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
949 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
950
951 if (likely(part0))
952 memcpy(sgl->sge, buf, part0);
953 part1 = (u8 *)end - (u8 *)tq->stat;
954 memcpy(tq->desc, (u8 *)buf + part0, part1);
955 end = (void *)tq->desc + part1;
956 }
957 if ((uintptr_t)end & 8)
958 *end = 0;
959}
960
961
962
963
964
965
966
967
968
969static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
970 int n)
971{
972
973
974
975 wmb();
976
977
978
979
980 if (unlikely(tq->bar2_addr == NULL)) {
981 u32 val = PIDX_V(n);
982
983 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
984 QID_V(tq->cntxt_id) | val);
985 } else {
986 u32 val = PIDX_T5_V(n);
987
988
989
990
991
992
993
994 WARN_ON(val & DBPRIO_F);
995
996
997
998
999
1000 if (n == 1 && tq->bar2_qid == 0) {
1001 unsigned int index = (tq->pidx
1002 ? (tq->pidx - 1)
1003 : (tq->size - 1));
1004 __be64 *src = (__be64 *)&tq->desc[index];
1005 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1006 SGE_UDB_WCDOORBELL);
1007 unsigned int count = EQ_UNIT / sizeof(__be64);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018 while (count) {
1019
1020
1021
1022
1023 writeq((__force u64)*src, dst);
1024 src++;
1025 dst++;
1026 count--;
1027 }
1028 } else
1029 writel(val | QID_V(tq->bar2_qid),
1030 tq->bar2_addr + SGE_UDB_KDOORBELL);
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 wmb();
1043 }
1044}
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1058 void *pos)
1059{
1060 u64 *p;
1061 int left = (void *)tq->stat - pos;
1062
1063 if (likely(skb->len <= left)) {
1064 if (likely(!skb->data_len))
1065 skb_copy_from_linear_data(skb, pos, skb->len);
1066 else
1067 skb_copy_bits(skb, 0, pos, skb->len);
1068 pos += skb->len;
1069 } else {
1070 skb_copy_bits(skb, 0, pos, left);
1071 skb_copy_bits(skb, left, tq->desc, skb->len - left);
1072 pos = (void *)tq->desc + (skb->len - left);
1073 }
1074
1075
1076 p = PTR_ALIGN(pos, 8);
1077 if ((uintptr_t)p & 8)
1078 *p = 0;
1079}
1080
1081
1082
1083
1084
1085static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1086{
1087 int csum_type;
1088 const struct iphdr *iph = ip_hdr(skb);
1089
1090 if (iph->version == 4) {
1091 if (iph->protocol == IPPROTO_TCP)
1092 csum_type = TX_CSUM_TCPIP;
1093 else if (iph->protocol == IPPROTO_UDP)
1094 csum_type = TX_CSUM_UDPIP;
1095 else {
1096nocsum:
1097
1098
1099
1100
1101 return TXPKT_L4CSUM_DIS_F;
1102 }
1103 } else {
1104
1105
1106
1107 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1108
1109 if (ip6h->nexthdr == IPPROTO_TCP)
1110 csum_type = TX_CSUM_TCPIP6;
1111 else if (ip6h->nexthdr == IPPROTO_UDP)
1112 csum_type = TX_CSUM_UDPIP6;
1113 else
1114 goto nocsum;
1115 }
1116
1117 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1118 u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1119 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1120
1121 if (chip <= CHELSIO_T5)
1122 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1123 else
1124 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1125 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1126 } else {
1127 int start = skb_transport_offset(skb);
1128
1129 return TXPKT_CSUM_TYPE_V(csum_type) |
1130 TXPKT_CSUM_START_V(start) |
1131 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1132 }
1133}
1134
1135
1136
1137
1138static void txq_stop(struct sge_eth_txq *txq)
1139{
1140 netif_tx_stop_queue(txq->txq);
1141 txq->q.stops++;
1142}
1143
1144
1145
1146
1147static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1148{
1149 tq->in_use += n;
1150 tq->pidx += n;
1151 if (tq->pidx >= tq->size)
1152 tq->pidx -= tq->size;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1163{
1164 u32 wr_mid;
1165 u64 cntrl, *end;
1166 int qidx, credits, max_pkt_len;
1167 unsigned int flits, ndesc;
1168 struct adapter *adapter;
1169 struct sge_eth_txq *txq;
1170 const struct port_info *pi;
1171 struct fw_eth_tx_pkt_vm_wr *wr;
1172 struct cpl_tx_pkt_core *cpl;
1173 const struct skb_shared_info *ssi;
1174 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1175 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
1176 sizeof(wr->ethmacsrc) +
1177 sizeof(wr->ethtype) +
1178 sizeof(wr->vlantci));
1179
1180
1181
1182
1183
1184
1185
1186 if (unlikely(skb->len < fw_hdr_copy_len))
1187 goto out_free;
1188
1189
1190 max_pkt_len = ETH_HLEN + dev->mtu;
1191 if (skb_vlan_tag_present(skb))
1192 max_pkt_len += VLAN_HLEN;
1193 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1194 goto out_free;
1195
1196
1197
1198
1199 pi = netdev_priv(dev);
1200 adapter = pi->adapter;
1201 qidx = skb_get_queue_mapping(skb);
1202 BUG_ON(qidx >= pi->nqsets);
1203 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1204
1205
1206
1207
1208
1209 reclaim_completed_tx(adapter, &txq->q, true);
1210
1211
1212
1213
1214
1215
1216 flits = calc_tx_flits(skb);
1217 ndesc = flits_to_desc(flits);
1218 credits = txq_avail(&txq->q) - ndesc;
1219
1220 if (unlikely(credits < 0)) {
1221
1222
1223
1224
1225
1226
1227 txq_stop(txq);
1228 dev_err(adapter->pdev_dev,
1229 "%s: TX ring %u full while queue awake!\n",
1230 dev->name, qidx);
1231 return NETDEV_TX_BUSY;
1232 }
1233
1234 if (!is_eth_imm(skb) &&
1235 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1236
1237
1238
1239
1240
1241 txq->mapping_err++;
1242 goto out_free;
1243 }
1244
1245 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1246 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256 txq_stop(txq);
1257 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1258 }
1259
1260
1261
1262
1263
1264
1265
1266 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1267 wr = (void *)&txq->q.desc[txq->q.pidx];
1268 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1269 wr->r3[0] = cpu_to_be32(0);
1270 wr->r3[1] = cpu_to_be32(0);
1271 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1272 end = (u64 *)wr + flits;
1273
1274
1275
1276
1277
1278
1279 ssi = skb_shinfo(skb);
1280 if (ssi->gso_size) {
1281 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1282 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1283 int l3hdr_len = skb_network_header_len(skb);
1284 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1285
1286 wr->op_immdlen =
1287 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1288 FW_WR_IMMDLEN_V(sizeof(*lso) +
1289 sizeof(*cpl)));
1290
1291
1292
1293 lso->lso_ctrl =
1294 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1295 LSO_FIRST_SLICE_F |
1296 LSO_LAST_SLICE_F |
1297 LSO_IPV6_V(v6) |
1298 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1299 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1300 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1301 lso->ipid_ofst = cpu_to_be16(0);
1302 lso->mss = cpu_to_be16(ssi->gso_size);
1303 lso->seqno_offset = cpu_to_be32(0);
1304 if (is_t4(adapter->params.chip))
1305 lso->len = cpu_to_be32(skb->len);
1306 else
1307 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1308
1309
1310
1311
1312
1313 cpl = (void *)(lso + 1);
1314
1315 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1316 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1317 else
1318 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1319
1320 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1321 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1322 TXPKT_IPHDR_LEN_V(l3hdr_len);
1323 txq->tso++;
1324 txq->tx_cso += ssi->gso_segs;
1325 } else {
1326 int len;
1327
1328 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1329 wr->op_immdlen =
1330 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1331 FW_WR_IMMDLEN_V(len));
1332
1333
1334
1335
1336
1337 cpl = (void *)(wr + 1);
1338 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1339 cntrl = hwcsum(adapter->params.chip, skb) |
1340 TXPKT_IPCSUM_DIS_F;
1341 txq->tx_cso++;
1342 } else
1343 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1344 }
1345
1346
1347
1348
1349
1350 if (skb_vlan_tag_present(skb)) {
1351 txq->vlan_ins++;
1352 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1353 }
1354
1355
1356
1357
1358 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1359 TXPKT_INTF_V(pi->port_id) |
1360 TXPKT_PF_V(0));
1361 cpl->pack = cpu_to_be16(0);
1362 cpl->len = cpu_to_be16(skb->len);
1363 cpl->ctrl1 = cpu_to_be64(cntrl);
1364
1365#ifdef T4_TRACE
1366 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1367 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1368 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1369#endif
1370
1371
1372
1373
1374
1375 if (is_eth_imm(skb)) {
1376
1377
1378
1379
1380 inline_tx_skb(skb, &txq->q, cpl + 1);
1381 dev_consume_skb_any(skb);
1382 } else {
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1421 struct sge_txq *tq = &txq->q;
1422 int last_desc;
1423
1424
1425
1426
1427
1428
1429
1430
1431 if (unlikely((void *)sgl == (void *)tq->stat)) {
1432 sgl = (void *)tq->desc;
1433 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1434 }
1435
1436 write_sgl(skb, tq, sgl, end, 0, addr);
1437 skb_orphan(skb);
1438
1439 last_desc = tq->pidx + ndesc - 1;
1440 if (last_desc >= tq->size)
1441 last_desc -= tq->size;
1442 tq->sdesc[last_desc].skb = skb;
1443 tq->sdesc[last_desc].sgl = sgl;
1444 }
1445
1446
1447
1448
1449
1450 txq_advance(&txq->q, ndesc);
1451 dev->trans_start = jiffies;
1452 ring_tx_db(adapter, &txq->q, ndesc);
1453 return NETDEV_TX_OK;
1454
1455out_free:
1456
1457
1458
1459
1460 dev_kfree_skb_any(skb);
1461 return NETDEV_TX_OK;
1462}
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473static inline void copy_frags(struct sk_buff *skb,
1474 const struct pkt_gl *gl,
1475 unsigned int offset)
1476{
1477 int i;
1478
1479
1480 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1481 gl->frags[0].offset + offset,
1482 gl->frags[0].size - offset);
1483 skb_shinfo(skb)->nr_frags = gl->nfrags;
1484 for (i = 1; i < gl->nfrags; i++)
1485 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1486 gl->frags[i].offset,
1487 gl->frags[i].size);
1488
1489
1490 get_page(gl->frags[gl->nfrags - 1].page);
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1503 unsigned int skb_len,
1504 unsigned int pull_len)
1505{
1506 struct sk_buff *skb;
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519 if (gl->tot_len <= RX_COPY_THRES) {
1520
1521 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1522 if (unlikely(!skb))
1523 goto out;
1524 __skb_put(skb, gl->tot_len);
1525 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1526 } else {
1527 skb = alloc_skb(skb_len, GFP_ATOMIC);
1528 if (unlikely(!skb))
1529 goto out;
1530 __skb_put(skb, pull_len);
1531 skb_copy_to_linear_data(skb, gl->va, pull_len);
1532
1533 copy_frags(skb, gl, pull_len);
1534 skb->len = gl->tot_len;
1535 skb->data_len = skb->len - pull_len;
1536 skb->truesize += skb->data_len;
1537 }
1538
1539out:
1540 return skb;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550static void t4vf_pktgl_free(const struct pkt_gl *gl)
1551{
1552 int frag;
1553
1554 frag = gl->nfrags - 1;
1555 while (frag--)
1556 put_page(gl->frags[frag].page);
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1569 const struct cpl_rx_pkt *pkt)
1570{
1571 struct adapter *adapter = rxq->rspq.adapter;
1572 struct sge *s = &adapter->sge;
1573 int ret;
1574 struct sk_buff *skb;
1575
1576 skb = napi_get_frags(&rxq->rspq.napi);
1577 if (unlikely(!skb)) {
1578 t4vf_pktgl_free(gl);
1579 rxq->stats.rx_drops++;
1580 return;
1581 }
1582
1583 copy_frags(skb, gl, s->pktshift);
1584 skb->len = gl->tot_len - s->pktshift;
1585 skb->data_len = skb->len;
1586 skb->truesize += skb->data_len;
1587 skb->ip_summed = CHECKSUM_UNNECESSARY;
1588 skb_record_rx_queue(skb, rxq->rspq.idx);
1589
1590 if (pkt->vlan_ex) {
1591 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1592 be16_to_cpu(pkt->vlan));
1593 rxq->stats.vlan_ex++;
1594 }
1595 ret = napi_gro_frags(&rxq->rspq.napi);
1596
1597 if (ret == GRO_HELD)
1598 rxq->stats.lro_pkts++;
1599 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1600 rxq->stats.lro_merged++;
1601 rxq->stats.pkts++;
1602 rxq->stats.rx_cso++;
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1614 const struct pkt_gl *gl)
1615{
1616 struct sk_buff *skb;
1617 const struct cpl_rx_pkt *pkt = (void *)rsp;
1618 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1619 (rspq->netdev->features & NETIF_F_RXCSUM);
1620 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1621 struct adapter *adapter = rspq->adapter;
1622 struct sge *s = &adapter->sge;
1623
1624
1625
1626
1627
1628 if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1629 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1630 !pkt->ip_frag) {
1631 do_gro(rxq, gl, pkt);
1632 return 0;
1633 }
1634
1635
1636
1637
1638 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1639 if (unlikely(!skb)) {
1640 t4vf_pktgl_free(gl);
1641 rxq->stats.rx_drops++;
1642 return 0;
1643 }
1644 __skb_pull(skb, s->pktshift);
1645 skb->protocol = eth_type_trans(skb, rspq->netdev);
1646 skb_record_rx_queue(skb, rspq->idx);
1647 rxq->stats.pkts++;
1648
1649 if (csum_ok && !pkt->err_vec &&
1650 (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1651 if (!pkt->ip_frag)
1652 skb->ip_summed = CHECKSUM_UNNECESSARY;
1653 else {
1654 __sum16 c = (__force __sum16)pkt->csum;
1655 skb->csum = csum_unfold(c);
1656 skb->ip_summed = CHECKSUM_COMPLETE;
1657 }
1658 rxq->stats.rx_cso++;
1659 } else
1660 skb_checksum_none_assert(skb);
1661
1662 if (pkt->vlan_ex) {
1663 rxq->stats.vlan_ex++;
1664 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
1665 }
1666
1667 netif_receive_skb(skb);
1668
1669 return 0;
1670}
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680static inline bool is_new_response(const struct rsp_ctrl *rc,
1681 const struct sge_rspq *rspq)
1682{
1683 return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
1684}
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1707 int frags)
1708{
1709 struct rx_sw_desc *sdesc;
1710
1711 while (frags--) {
1712 if (fl->cidx == 0)
1713 fl->cidx = fl->size - 1;
1714 else
1715 fl->cidx--;
1716 sdesc = &fl->sdesc[fl->cidx];
1717 sdesc->page = gl->frags[frags].page;
1718 sdesc->dma_addr |= RX_UNMAPPED_BUF;
1719 fl->avail++;
1720 }
1721}
1722
1723
1724
1725
1726
1727
1728
1729static inline void rspq_next(struct sge_rspq *rspq)
1730{
1731 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1732 if (unlikely(++rspq->cidx == rspq->size)) {
1733 rspq->cidx = 0;
1734 rspq->gen ^= 1;
1735 rspq->cur_desc = rspq->desc;
1736 }
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752static int process_responses(struct sge_rspq *rspq, int budget)
1753{
1754 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1755 struct adapter *adapter = rspq->adapter;
1756 struct sge *s = &adapter->sge;
1757 int budget_left = budget;
1758
1759 while (likely(budget_left)) {
1760 int ret, rsp_type;
1761 const struct rsp_ctrl *rc;
1762
1763 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1764 if (!is_new_response(rc, rspq))
1765 break;
1766
1767
1768
1769
1770
1771 dma_rmb();
1772 rsp_type = RSPD_TYPE_G(rc->type_gen);
1773 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
1774 struct page_frag *fp;
1775 struct pkt_gl gl;
1776 const struct rx_sw_desc *sdesc;
1777 u32 bufsz, frag;
1778 u32 len = be32_to_cpu(rc->pldbuflen_qid);
1779
1780
1781
1782
1783
1784 if (len & RSPD_NEWBUF_F) {
1785
1786
1787
1788
1789
1790 if (likely(rspq->offset > 0)) {
1791 free_rx_bufs(rspq->adapter, &rxq->fl,
1792 1);
1793 rspq->offset = 0;
1794 }
1795 len = RSPD_LEN_G(len);
1796 }
1797 gl.tot_len = len;
1798
1799
1800
1801
1802 for (frag = 0, fp = gl.frags; ; frag++, fp++) {
1803 BUG_ON(frag >= MAX_SKB_FRAGS);
1804 BUG_ON(rxq->fl.avail == 0);
1805 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1806 bufsz = get_buf_size(adapter, sdesc);
1807 fp->page = sdesc->page;
1808 fp->offset = rspq->offset;
1809 fp->size = min(bufsz, len);
1810 len -= fp->size;
1811 if (!len)
1812 break;
1813 unmap_rx_buf(rspq->adapter, &rxq->fl);
1814 }
1815 gl.nfrags = frag+1;
1816
1817
1818
1819
1820
1821
1822 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1823 get_buf_addr(sdesc),
1824 fp->size, DMA_FROM_DEVICE);
1825 gl.va = (page_address(gl.frags[0].page) +
1826 gl.frags[0].offset);
1827 prefetch(gl.va);
1828
1829
1830
1831
1832
1833 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1834 if (likely(ret == 0))
1835 rspq->offset += ALIGN(fp->size, s->fl_align);
1836 else
1837 restore_rx_bufs(&gl, &rxq->fl, frag);
1838 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
1839 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1840 } else {
1841 WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
1842 ret = 0;
1843 }
1844
1845 if (unlikely(ret)) {
1846
1847
1848
1849
1850
1851 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1852 rspq->next_intr_params =
1853 QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
1854 break;
1855 }
1856
1857 rspq_next(rspq);
1858 budget_left--;
1859 }
1860
1861
1862
1863
1864
1865
1866 if (rspq->offset >= 0 &&
1867 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1868 __refill_fl(rspq->adapter, &rxq->fl);
1869 return budget - budget_left;
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883static int napi_rx_handler(struct napi_struct *napi, int budget)
1884{
1885 unsigned int intr_params;
1886 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1887 int work_done = process_responses(rspq, budget);
1888 u32 val;
1889
1890 if (likely(work_done < budget)) {
1891 napi_complete(napi);
1892 intr_params = rspq->next_intr_params;
1893 rspq->next_intr_params = rspq->intr_params;
1894 } else
1895 intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
1896
1897 if (unlikely(work_done == 0))
1898 rspq->unhandled_irqs++;
1899
1900 val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1901
1902
1903
1904 if (unlikely(!rspq->bar2_addr)) {
1905 t4_write_reg(rspq->adapter,
1906 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1907 val | INGRESSQID_V((u32)rspq->cntxt_id));
1908 } else {
1909 writel(val | INGRESSQID_V(rspq->bar2_qid),
1910 rspq->bar2_addr + SGE_UDB_GTS);
1911 wmb();
1912 }
1913 return work_done;
1914}
1915
1916
1917
1918
1919
1920irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1921{
1922 struct sge_rspq *rspq = cookie;
1923
1924 napi_schedule(&rspq->napi);
1925 return IRQ_HANDLED;
1926}
1927
1928
1929
1930
1931
1932static unsigned int process_intrq(struct adapter *adapter)
1933{
1934 struct sge *s = &adapter->sge;
1935 struct sge_rspq *intrq = &s->intrq;
1936 unsigned int work_done;
1937 u32 val;
1938
1939 spin_lock(&adapter->sge.intrq_lock);
1940 for (work_done = 0; ; work_done++) {
1941 const struct rsp_ctrl *rc;
1942 unsigned int qid, iq_idx;
1943 struct sge_rspq *rspq;
1944
1945
1946
1947
1948
1949 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1950 if (!is_new_response(rc, intrq))
1951 break;
1952
1953
1954
1955
1956
1957
1958 dma_rmb();
1959 if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
1960 dev_err(adapter->pdev_dev,
1961 "Unexpected INTRQ response type %d\n",
1962 RSPD_TYPE_G(rc->type_gen));
1963 continue;
1964 }
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
1975 iq_idx = IQ_IDX(s, qid);
1976 if (unlikely(iq_idx >= MAX_INGQ)) {
1977 dev_err(adapter->pdev_dev,
1978 "Ingress QID %d out of range\n", qid);
1979 continue;
1980 }
1981 rspq = s->ingr_map[iq_idx];
1982 if (unlikely(rspq == NULL)) {
1983 dev_err(adapter->pdev_dev,
1984 "Ingress QID %d RSPQ=NULL\n", qid);
1985 continue;
1986 }
1987 if (unlikely(rspq->abs_id != qid)) {
1988 dev_err(adapter->pdev_dev,
1989 "Ingress QID %d refers to RSPQ %d\n",
1990 qid, rspq->abs_id);
1991 continue;
1992 }
1993
1994
1995
1996
1997
1998
1999 napi_schedule(&rspq->napi);
2000 rspq_next(intrq);
2001 }
2002
2003 val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
2004
2005
2006
2007 if (unlikely(!intrq->bar2_addr)) {
2008 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
2009 val | INGRESSQID_V(intrq->cntxt_id));
2010 } else {
2011 writel(val | INGRESSQID_V(intrq->bar2_qid),
2012 intrq->bar2_addr + SGE_UDB_GTS);
2013 wmb();
2014 }
2015
2016 spin_unlock(&adapter->sge.intrq_lock);
2017
2018 return work_done;
2019}
2020
2021
2022
2023
2024
2025static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
2026{
2027 struct adapter *adapter = cookie;
2028
2029 process_intrq(adapter);
2030 return IRQ_HANDLED;
2031}
2032
2033
2034
2035
2036
2037
2038
2039
2040irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2041{
2042 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2043 if (adapter->flags & USING_MSIX)
2044 return t4vf_sge_intr_msix;
2045 else
2046 return t4vf_intr_msi;
2047}
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060static void sge_rx_timer_cb(unsigned long data)
2061{
2062 struct adapter *adapter = (struct adapter *)data;
2063 struct sge *s = &adapter->sge;
2064 unsigned int i;
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2075 unsigned long m;
2076
2077 for (m = s->starving_fl[i]; m; m &= m - 1) {
2078 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2079 struct sge_fl *fl = s->egr_map[id];
2080
2081 clear_bit(id, s->starving_fl);
2082 smp_mb__after_atomic();
2083
2084
2085
2086
2087
2088
2089
2090 if (fl_starving(adapter, fl)) {
2091 struct sge_eth_rxq *rxq;
2092
2093 rxq = container_of(fl, struct sge_eth_rxq, fl);
2094 if (napi_reschedule(&rxq->rspq.napi))
2095 fl->starving++;
2096 else
2097 set_bit(id, s->starving_fl);
2098 }
2099 }
2100 }
2101
2102
2103
2104
2105 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2106}
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119static void sge_tx_timer_cb(unsigned long data)
2120{
2121 struct adapter *adapter = (struct adapter *)data;
2122 struct sge *s = &adapter->sge;
2123 unsigned int i, budget;
2124
2125 budget = MAX_TIMER_TX_RECLAIM;
2126 i = s->ethtxq_rover;
2127 do {
2128 struct sge_eth_txq *txq = &s->ethtxq[i];
2129
2130 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2131 int avail = reclaimable(&txq->q);
2132
2133 if (avail > budget)
2134 avail = budget;
2135
2136 free_tx_desc(adapter, &txq->q, avail, true);
2137 txq->q.in_use -= avail;
2138 __netif_tx_unlock(txq->txq);
2139
2140 budget -= avail;
2141 if (!budget)
2142 break;
2143 }
2144
2145 i++;
2146 if (i >= s->ethqsets)
2147 i = 0;
2148 } while (i != s->ethtxq_rover);
2149 s->ethtxq_rover = i;
2150
2151
2152
2153
2154
2155
2156 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2157}
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172static void __iomem *bar2_address(struct adapter *adapter,
2173 unsigned int qid,
2174 enum t4_bar2_qtype qtype,
2175 unsigned int *pbar2_qid)
2176{
2177 u64 bar2_qoffset;
2178 int ret;
2179
2180 ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
2181 &bar2_qoffset, pbar2_qid);
2182 if (ret)
2183 return NULL;
2184
2185 return adapter->bar2 + bar2_qoffset;
2186}
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2199 bool iqasynch, struct net_device *dev,
2200 int intr_dest,
2201 struct sge_fl *fl, rspq_handler_t hnd)
2202{
2203 struct sge *s = &adapter->sge;
2204 struct port_info *pi = netdev_priv(dev);
2205 struct fw_iq_cmd cmd, rpl;
2206 int ret, iqandst, flsz = 0;
2207
2208
2209
2210
2211
2212
2213
2214
2215 if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
2216 iqandst = SGE_INTRDST_IQ;
2217 intr_dest = adapter->sge.intrq.abs_id;
2218 } else
2219 iqandst = SGE_INTRDST_PCI;
2220
2221
2222
2223
2224
2225
2226
2227 rspq->size = roundup(rspq->size, 16);
2228 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2229 0, &rspq->phys_addr, NULL, 0);
2230 if (!rspq->desc)
2231 return -ENOMEM;
2232
2233
2234
2235
2236
2237
2238
2239
2240 memset(&cmd, 0, sizeof(cmd));
2241 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2242 FW_CMD_REQUEST_F |
2243 FW_CMD_WRITE_F |
2244 FW_CMD_EXEC_F);
2245 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
2246 FW_IQ_CMD_IQSTART_F |
2247 FW_LEN16(cmd));
2248 cmd.type_to_iqandstindex =
2249 cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2250 FW_IQ_CMD_IQASYNCH_V(iqasynch) |
2251 FW_IQ_CMD_VIID_V(pi->viid) |
2252 FW_IQ_CMD_IQANDST_V(iqandst) |
2253 FW_IQ_CMD_IQANUS_V(1) |
2254 FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
2255 FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
2256 cmd.iqdroprss_to_iqesize =
2257 cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
2258 FW_IQ_CMD_IQGTSMODE_F |
2259 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
2260 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
2261 cmd.iqsize = cpu_to_be16(rspq->size);
2262 cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2263
2264 if (fl) {
2265 enum chip_type chip =
2266 CHELSIO_CHIP_VERSION(adapter->params.chip);
2267
2268
2269
2270
2271
2272
2273
2274
2275 if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
2276 fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
2277 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2278 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2279 sizeof(__be64), sizeof(struct rx_sw_desc),
2280 &fl->addr, &fl->sdesc, s->stat_len);
2281 if (!fl->desc) {
2282 ret = -ENOMEM;
2283 goto err;
2284 }
2285
2286
2287
2288
2289
2290
2291 flsz = (fl->size / FL_PER_EQ_UNIT +
2292 s->stat_len / EQ_UNIT);
2293
2294
2295
2296
2297
2298 cmd.iqns_to_fl0congen =
2299 cpu_to_be32(
2300 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2301 FW_IQ_CMD_FL0PACKEN_F |
2302 FW_IQ_CMD_FL0PADEN_F);
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312 cmd.fl0dcaen_to_fl0cidxfthresh =
2313 cpu_to_be16(
2314 FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
2315 FETCHBURSTMIN_128B_X :
2316 FETCHBURSTMIN_64B_X) |
2317 FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
2318 FETCHBURSTMAX_512B_X :
2319 FETCHBURSTMAX_256B_X));
2320 cmd.fl0size = cpu_to_be16(flsz);
2321 cmd.fl0addr = cpu_to_be64(fl->addr);
2322 }
2323
2324
2325
2326
2327
2328 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2329 if (ret)
2330 goto err;
2331
2332 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2333 rspq->cur_desc = rspq->desc;
2334 rspq->cidx = 0;
2335 rspq->gen = 1;
2336 rspq->next_intr_params = rspq->intr_params;
2337 rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2338 rspq->bar2_addr = bar2_address(adapter,
2339 rspq->cntxt_id,
2340 T4_BAR2_QTYPE_INGRESS,
2341 &rspq->bar2_qid);
2342 rspq->abs_id = be16_to_cpu(rpl.physiqid);
2343 rspq->size--;
2344 rspq->adapter = adapter;
2345 rspq->netdev = dev;
2346 rspq->handler = hnd;
2347
2348
2349 rspq->offset = fl ? 0 : -1;
2350
2351 if (fl) {
2352 fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2353 fl->avail = 0;
2354 fl->pend_cred = 0;
2355 fl->pidx = 0;
2356 fl->cidx = 0;
2357 fl->alloc_failed = 0;
2358 fl->large_alloc_failed = 0;
2359 fl->starving = 0;
2360
2361
2362
2363
2364 fl->bar2_addr = bar2_address(adapter,
2365 fl->cntxt_id,
2366 T4_BAR2_QTYPE_EGRESS,
2367 &fl->bar2_qid);
2368
2369 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2370 }
2371
2372 return 0;
2373
2374err:
2375
2376
2377
2378
2379 if (rspq->desc) {
2380 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2381 rspq->desc, rspq->phys_addr);
2382 rspq->desc = NULL;
2383 }
2384 if (fl && fl->desc) {
2385 kfree(fl->sdesc);
2386 fl->sdesc = NULL;
2387 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2388 fl->desc, fl->addr);
2389 fl->desc = NULL;
2390 }
2391 return ret;
2392}
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2403 struct net_device *dev, struct netdev_queue *devq,
2404 unsigned int iqid)
2405{
2406 struct sge *s = &adapter->sge;
2407 int ret, nentries;
2408 struct fw_eq_eth_cmd cmd, rpl;
2409 struct port_info *pi = netdev_priv(dev);
2410
2411
2412
2413
2414
2415 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2416
2417
2418
2419
2420
2421 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2422 sizeof(struct tx_desc),
2423 sizeof(struct tx_sw_desc),
2424 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2425 if (!txq->q.desc)
2426 return -ENOMEM;
2427
2428
2429
2430
2431
2432
2433
2434
2435 memset(&cmd, 0, sizeof(cmd));
2436 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2437 FW_CMD_REQUEST_F |
2438 FW_CMD_WRITE_F |
2439 FW_CMD_EXEC_F);
2440 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
2441 FW_EQ_ETH_CMD_EQSTART_F |
2442 FW_LEN16(cmd));
2443 cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2444 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2445 cmd.fetchszm_to_iqid =
2446 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
2447 FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
2448 FW_EQ_ETH_CMD_IQID_V(iqid));
2449 cmd.dcaen_to_eqsize =
2450 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) |
2451 FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) |
2452 FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2453 SGE_CIDXFLUSHTHRESH_32) |
2454 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2455 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2456
2457
2458
2459
2460
2461 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2462 if (ret) {
2463
2464
2465
2466
2467 kfree(txq->q.sdesc);
2468 txq->q.sdesc = NULL;
2469 dma_free_coherent(adapter->pdev_dev,
2470 nentries * sizeof(struct tx_desc),
2471 txq->q.desc, txq->q.phys_addr);
2472 txq->q.desc = NULL;
2473 return ret;
2474 }
2475
2476 txq->q.in_use = 0;
2477 txq->q.cidx = 0;
2478 txq->q.pidx = 0;
2479 txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2480 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
2481 txq->q.bar2_addr = bar2_address(adapter,
2482 txq->q.cntxt_id,
2483 T4_BAR2_QTYPE_EGRESS,
2484 &txq->q.bar2_qid);
2485 txq->q.abs_id =
2486 FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
2487 txq->txq = devq;
2488 txq->tso = 0;
2489 txq->tx_cso = 0;
2490 txq->vlan_ins = 0;
2491 txq->q.stops = 0;
2492 txq->q.restarts = 0;
2493 txq->mapping_err = 0;
2494 return 0;
2495}
2496
2497
2498
2499
2500static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2501{
2502 struct sge *s = &adapter->sge;
2503
2504 dma_free_coherent(adapter->pdev_dev,
2505 tq->size * sizeof(*tq->desc) + s->stat_len,
2506 tq->desc, tq->phys_addr);
2507 tq->cntxt_id = 0;
2508 tq->sdesc = NULL;
2509 tq->desc = NULL;
2510}
2511
2512
2513
2514
2515
2516static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2517 struct sge_fl *fl)
2518{
2519 struct sge *s = &adapter->sge;
2520 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2521
2522 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2523 rspq->cntxt_id, flid, 0xffff);
2524 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2525 rspq->desc, rspq->phys_addr);
2526 netif_napi_del(&rspq->napi);
2527 rspq->netdev = NULL;
2528 rspq->cntxt_id = 0;
2529 rspq->abs_id = 0;
2530 rspq->desc = NULL;
2531
2532 if (fl) {
2533 free_rx_bufs(adapter, fl, fl->avail);
2534 dma_free_coherent(adapter->pdev_dev,
2535 fl->size * sizeof(*fl->desc) + s->stat_len,
2536 fl->desc, fl->addr);
2537 kfree(fl->sdesc);
2538 fl->sdesc = NULL;
2539 fl->cntxt_id = 0;
2540 fl->desc = NULL;
2541 }
2542}
2543
2544
2545
2546
2547
2548
2549
2550void t4vf_free_sge_resources(struct adapter *adapter)
2551{
2552 struct sge *s = &adapter->sge;
2553 struct sge_eth_rxq *rxq = s->ethrxq;
2554 struct sge_eth_txq *txq = s->ethtxq;
2555 struct sge_rspq *evtq = &s->fw_evtq;
2556 struct sge_rspq *intrq = &s->intrq;
2557 int qs;
2558
2559 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2560 if (rxq->rspq.desc)
2561 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2562 if (txq->q.desc) {
2563 t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2564 free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2565 kfree(txq->q.sdesc);
2566 free_txq(adapter, &txq->q);
2567 }
2568 }
2569 if (evtq->desc)
2570 free_rspq_fl(adapter, evtq, NULL);
2571 if (intrq->desc)
2572 free_rspq_fl(adapter, intrq, NULL);
2573}
2574
2575
2576
2577
2578
2579
2580
2581void t4vf_sge_start(struct adapter *adapter)
2582{
2583 adapter->sge.ethtxq_rover = 0;
2584 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2585 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2586}
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596void t4vf_sge_stop(struct adapter *adapter)
2597{
2598 struct sge *s = &adapter->sge;
2599
2600 if (s->rx_timer.function)
2601 del_timer_sync(&s->rx_timer);
2602 if (s->tx_timer.function)
2603 del_timer_sync(&s->tx_timer);
2604}
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615int t4vf_sge_init(struct adapter *adapter)
2616{
2617 struct sge_params *sge_params = &adapter->params.sge;
2618 u32 fl0 = sge_params->sge_fl_buffer_size[0];
2619 u32 fl1 = sge_params->sge_fl_buffer_size[1];
2620 struct sge *s = &adapter->sge;
2621
2622
2623
2624
2625
2626
2627 if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
2628 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2629 fl0, fl1);
2630 return -EINVAL;
2631 }
2632 if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
2633 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2634 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2635 return -EINVAL;
2636 }
2637
2638
2639
2640
2641 if (fl1)
2642 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2643 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2644 ? 128 : 64);
2645 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2646 s->fl_align = t4vf_fl_pkt_align(adapter);
2647
2648
2649
2650
2651
2652
2653
2654
2655 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
2656 case CHELSIO_T4:
2657 s->fl_starve_thres =
2658 EGRTHRESHOLD_G(sge_params->sge_congestion_control);
2659 break;
2660 case CHELSIO_T5:
2661 s->fl_starve_thres =
2662 EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2663 break;
2664 case CHELSIO_T6:
2665 default:
2666 s->fl_starve_thres =
2667 T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2668 break;
2669 }
2670 s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2671
2672
2673
2674
2675 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
2676 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
2677
2678
2679
2680
2681 spin_lock_init(&s->intrq_lock);
2682
2683 return 0;
2684}
2685