1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/ip.h>
41#include <net/ipv6.h>
42#include <net/tcp.h>
43#include <linux/dma-mapping.h>
44#include <linux/prefetch.h>
45
46#include "t4vf_common.h"
47#include "t4vf_defs.h"
48
49#include "../cxgb4/t4_regs.h"
50#include "../cxgb4/t4_values.h"
51#include "../cxgb4/t4fw_api.h"
52#include "../cxgb4/t4_msg.h"
53
54
55
56
57enum {
58
59
60
61
62
63
64
65 EQ_UNIT = SGE_EQ_IDXSIZE,
66 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
67 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
68
69
70
71
72
73
74
75 MAX_TX_RECLAIM = 16,
76
77
78
79
80
81 MAX_RX_REFILL = 16,
82
83
84
85
86
87
88 RX_QCHECK_PERIOD = (HZ / 2),
89
90
91
92
93
94 TX_QCHECK_PERIOD = (HZ / 2),
95 MAX_TIMER_TX_RECLAIM = 100,
96
97
98
99
100
101
102
103
104
105 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
106 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
107 ((ETHTXQ_MAX_FRAGS-1) & 1) +
108 2),
109 ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
110 sizeof(struct cpl_tx_pkt_lso_core) +
111 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
112 ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
113
114 ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
115
116
117
118
119
120
121
122 MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
123
124
125
126
127 MAX_CTRL_WR_LEN = 256,
128
129
130
131
132
133 MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
134 ? MAX_IMM_TX_PKT_LEN
135 : MAX_CTRL_WR_LEN),
136
137
138
139
140
141
142
143 RX_COPY_THRES = 256,
144 RX_PULL_LEN = 128,
145
146
147
148
149
150
151 RX_SKB_LEN = 512,
152};
153
154
155
156
157struct tx_sw_desc {
158 struct sk_buff *skb;
159 struct ulptx_sgl *sgl;
160};
161
162
163
164
165
166
167
168struct rx_sw_desc {
169 struct page *page;
170 dma_addr_t dma_addr;
171
172};
173
174
175
176
177
178
179
180
181
182
183enum {
184 RX_LARGE_BUF = 1 << 0,
185 RX_UNMAPPED_BUF = 1 << 1,
186};
187
188
189
190
191
192
193
194
195static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
196{
197 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
198}
199
200
201
202
203
204
205
206
207static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
208{
209 return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
210}
211
212
213
214
215
216
217
218static inline int need_skb_unmap(void)
219{
220#ifdef CONFIG_NEED_DMA_MAP_STATE
221 return 1;
222#else
223 return 0;
224#endif
225}
226
227
228
229
230
231
232
233static inline unsigned int txq_avail(const struct sge_txq *tq)
234{
235 return tq->size - 1 - tq->in_use;
236}
237
238
239
240
241
242
243
244
245
246
247static inline unsigned int fl_cap(const struct sge_fl *fl)
248{
249 return fl->size - FL_PER_EQ_UNIT;
250}
251
252
253
254
255
256
257
258
259
260
261static inline bool fl_starving(const struct adapter *adapter,
262 const struct sge_fl *fl)
263{
264 const struct sge *s = &adapter->sge;
265
266 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
267}
268
269
270
271
272
273
274
275
276
277static int map_skb(struct device *dev, const struct sk_buff *skb,
278 dma_addr_t *addr)
279{
280 const skb_frag_t *fp, *end;
281 const struct skb_shared_info *si;
282
283 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
284 if (dma_mapping_error(dev, *addr))
285 goto out_err;
286
287 si = skb_shinfo(skb);
288 end = &si->frags[si->nr_frags];
289 for (fp = si->frags; fp < end; fp++) {
290 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
291 DMA_TO_DEVICE);
292 if (dma_mapping_error(dev, *addr))
293 goto unwind;
294 }
295 return 0;
296
297unwind:
298 while (fp-- > si->frags)
299 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
300 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
301
302out_err:
303 return -ENOMEM;
304}
305
306static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
307 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
308{
309 const struct ulptx_sge_pair *p;
310 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
311
312 if (likely(skb_headlen(skb)))
313 dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
314 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
315 else {
316 dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
317 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
318 nfrags--;
319 }
320
321
322
323
324
325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
326 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
327unmap:
328 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
329 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
330 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
331 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
332 p++;
333 } else if ((u8 *)p == (u8 *)tq->stat) {
334 p = (const struct ulptx_sge_pair *)tq->desc;
335 goto unmap;
336 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
337 const __be64 *addr = (const __be64 *)tq->desc;
338
339 dma_unmap_page(dev, be64_to_cpu(addr[0]),
340 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
341 dma_unmap_page(dev, be64_to_cpu(addr[1]),
342 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
343 p = (const struct ulptx_sge_pair *)&addr[2];
344 } else {
345 const __be64 *addr = (const __be64 *)tq->desc;
346
347 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
348 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
349 dma_unmap_page(dev, be64_to_cpu(addr[0]),
350 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
351 p = (const struct ulptx_sge_pair *)&addr[1];
352 }
353 }
354 if (nfrags) {
355 __be64 addr;
356
357 if ((u8 *)p == (u8 *)tq->stat)
358 p = (const struct ulptx_sge_pair *)tq->desc;
359 addr = ((u8 *)p + 16 <= (u8 *)tq->stat
360 ? p->addr[0]
361 : *(const __be64 *)tq->desc);
362 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
363 DMA_TO_DEVICE);
364 }
365}
366
367
368
369
370
371
372
373
374
375
376
377static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
378 unsigned int n, bool unmap)
379{
380 struct tx_sw_desc *sdesc;
381 unsigned int cidx = tq->cidx;
382 struct device *dev = adapter->pdev_dev;
383
384 const int need_unmap = need_skb_unmap() && unmap;
385
386 sdesc = &tq->sdesc[cidx];
387 while (n--) {
388
389
390
391
392 if (sdesc->skb) {
393 if (need_unmap)
394 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
395 dev_consume_skb_any(sdesc->skb);
396 sdesc->skb = NULL;
397 }
398
399 sdesc++;
400 if (++cidx == tq->size) {
401 cidx = 0;
402 sdesc = tq->sdesc;
403 }
404 }
405 tq->cidx = cidx;
406}
407
408
409
410
411static inline int reclaimable(const struct sge_txq *tq)
412{
413 int hw_cidx = be16_to_cpu(tq->stat->cidx);
414 int reclaimable = hw_cidx - tq->cidx;
415 if (reclaimable < 0)
416 reclaimable += tq->size;
417 return reclaimable;
418}
419
420
421
422
423
424
425
426
427
428
429
430static inline void reclaim_completed_tx(struct adapter *adapter,
431 struct sge_txq *tq,
432 bool unmap)
433{
434 int avail = reclaimable(tq);
435
436 if (avail) {
437
438
439
440
441 if (avail > MAX_TX_RECLAIM)
442 avail = MAX_TX_RECLAIM;
443
444 free_tx_desc(adapter, tq, avail, unmap);
445 tq->in_use -= avail;
446 }
447}
448
449
450
451
452
453
454static inline int get_buf_size(const struct adapter *adapter,
455 const struct rx_sw_desc *sdesc)
456{
457 const struct sge *s = &adapter->sge;
458
459 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
460 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
461}
462
463
464
465
466
467
468
469
470
471
472
473static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
474{
475 while (n--) {
476 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
477
478 if (is_buf_mapped(sdesc))
479 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
480 get_buf_size(adapter, sdesc),
481 PCI_DMA_FROMDEVICE);
482 put_page(sdesc->page);
483 sdesc->page = NULL;
484 if (++fl->cidx == fl->size)
485 fl->cidx = 0;
486 fl->avail--;
487 }
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
504{
505 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
506
507 if (is_buf_mapped(sdesc))
508 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
509 get_buf_size(adapter, sdesc),
510 PCI_DMA_FROMDEVICE);
511 sdesc->page = NULL;
512 if (++fl->cidx == fl->size)
513 fl->cidx = 0;
514 fl->avail--;
515}
516
517
518
519
520
521
522
523
524
525static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
526{
527 u32 val = adapter->params.arch.sge_fl_db;
528
529
530
531
532
533 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
534 if (is_t4(adapter->params.chip))
535 val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
536 else
537 val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
538
539
540
541
542 wmb();
543
544
545
546
547
548 if (unlikely(fl->bar2_addr == NULL)) {
549 t4_write_reg(adapter,
550 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
551 QID_V(fl->cntxt_id) | val);
552 } else {
553 writel(val | QID_V(fl->bar2_qid),
554 fl->bar2_addr + SGE_UDB_KDOORBELL);
555
556
557
558
559 wmb();
560 }
561 fl->pend_cred %= FL_PER_EQ_UNIT;
562 }
563}
564
565
566
567
568
569
570
571static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
572 dma_addr_t dma_addr)
573{
574 sdesc->page = page;
575 sdesc->dma_addr = dma_addr;
576}
577
578
579
580
581#define POISON_BUF_VAL -1
582
583static inline void poison_buf(struct page *page, size_t sz)
584{
585#if POISON_BUF_VAL >= 0
586 memset(page_address(page), POISON_BUF_VAL, sz);
587#endif
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
605 int n, gfp_t gfp)
606{
607 struct sge *s = &adapter->sge;
608 struct page *page;
609 dma_addr_t dma_addr;
610 unsigned int cred = fl->avail;
611 __be64 *d = &fl->desc[fl->pidx];
612 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
613
614
615
616
617
618
619 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
620
621 gfp |= __GFP_NOWARN;
622
623
624
625
626
627
628
629 if (s->fl_pg_order == 0)
630 goto alloc_small_pages;
631
632 while (n) {
633 page = __dev_alloc_pages(gfp, s->fl_pg_order);
634 if (unlikely(!page)) {
635
636
637
638
639
640 fl->large_alloc_failed++;
641 break;
642 }
643 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
644
645 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
646 PAGE_SIZE << s->fl_pg_order,
647 PCI_DMA_FROMDEVICE);
648 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
649
650
651
652
653
654
655
656
657 __free_pages(page, s->fl_pg_order);
658 goto out;
659 }
660 dma_addr |= RX_LARGE_BUF;
661 *d++ = cpu_to_be64(dma_addr);
662
663 set_rx_sw_desc(sdesc, page, dma_addr);
664 sdesc++;
665
666 fl->avail++;
667 if (++fl->pidx == fl->size) {
668 fl->pidx = 0;
669 sdesc = fl->sdesc;
670 d = fl->desc;
671 }
672 n--;
673 }
674
675alloc_small_pages:
676 while (n--) {
677 page = __dev_alloc_page(gfp);
678 if (unlikely(!page)) {
679 fl->alloc_failed++;
680 break;
681 }
682 poison_buf(page, PAGE_SIZE);
683
684 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
685 PCI_DMA_FROMDEVICE);
686 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
687 put_page(page);
688 break;
689 }
690 *d++ = cpu_to_be64(dma_addr);
691
692 set_rx_sw_desc(sdesc, page, dma_addr);
693 sdesc++;
694
695 fl->avail++;
696 if (++fl->pidx == fl->size) {
697 fl->pidx = 0;
698 sdesc = fl->sdesc;
699 d = fl->desc;
700 }
701 }
702
703out:
704
705
706
707
708
709 cred = fl->avail - cred;
710 fl->pend_cred += cred;
711 ring_fl_db(adapter, fl);
712
713 if (unlikely(fl_starving(adapter, fl))) {
714 smp_wmb();
715 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
716 }
717
718 return cred;
719}
720
721
722
723
724
725static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
726{
727 refill_fl(adapter, fl,
728 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
729 GFP_ATOMIC);
730}
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
752 size_t swsize, dma_addr_t *busaddrp, void *swringp,
753 size_t stat_size)
754{
755
756
757
758 size_t hwlen = nelem * hwsize + stat_size;
759 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
760
761 if (!hwring)
762 return NULL;
763
764
765
766
767
768 BUG_ON((swsize != 0) != (swringp != NULL));
769 if (swsize) {
770 void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
771
772 if (!swring) {
773 dma_free_coherent(dev, hwlen, hwring, *busaddrp);
774 return NULL;
775 }
776 *(void **)swringp = swring;
777 }
778
779
780
781
782
783 memset(hwring, 0, hwlen);
784 return hwring;
785}
786
787
788
789
790
791
792
793
794static inline unsigned int sgl_len(unsigned int n)
795{
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813 n--;
814 return (3 * n) / 2 + (n & 1) + 2;
815}
816
817
818
819
820
821
822
823
824static inline unsigned int flits_to_desc(unsigned int flits)
825{
826 BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
827 return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
828}
829
830
831
832
833
834
835
836
837static inline int is_eth_imm(const struct sk_buff *skb)
838{
839
840
841
842
843
844
845
846 return false;
847}
848
849
850
851
852
853
854
855
856static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
857{
858 unsigned int flits;
859
860
861
862
863
864
865 if (is_eth_imm(skb))
866 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
867 sizeof(__be64));
868
869
870
871
872
873
874
875
876
877
878 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
879 if (skb_shinfo(skb)->gso_size)
880 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
881 sizeof(struct cpl_tx_pkt_lso_core) +
882 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
883 else
884 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
885 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
886 return flits;
887}
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
907 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
908 const dma_addr_t *addr)
909{
910 unsigned int i, len;
911 struct ulptx_sge_pair *to;
912 const struct skb_shared_info *si = skb_shinfo(skb);
913 unsigned int nfrags = si->nr_frags;
914 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
915
916 len = skb_headlen(skb) - start;
917 if (likely(len)) {
918 sgl->len0 = htonl(len);
919 sgl->addr0 = cpu_to_be64(addr[0] + start);
920 nfrags++;
921 } else {
922 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
923 sgl->addr0 = cpu_to_be64(addr[1]);
924 }
925
926 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
927 ULPTX_NSGE_V(nfrags));
928 if (likely(--nfrags == 0))
929 return;
930
931
932
933
934
935 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
936
937 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
938 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
939 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
940 to->addr[0] = cpu_to_be64(addr[i]);
941 to->addr[1] = cpu_to_be64(addr[++i]);
942 }
943 if (nfrags) {
944 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
945 to->len[1] = cpu_to_be32(0);
946 to->addr[0] = cpu_to_be64(addr[i + 1]);
947 }
948 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
949 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
950
951 if (likely(part0))
952 memcpy(sgl->sge, buf, part0);
953 part1 = (u8 *)end - (u8 *)tq->stat;
954 memcpy(tq->desc, (u8 *)buf + part0, part1);
955 end = (void *)tq->desc + part1;
956 }
957 if ((uintptr_t)end & 8)
958 *end = 0;
959}
960
961
962
963
964
965
966
967
968
969static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
970 int n)
971{
972
973
974
975 wmb();
976
977
978
979
980 if (unlikely(tq->bar2_addr == NULL)) {
981 u32 val = PIDX_V(n);
982
983 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
984 QID_V(tq->cntxt_id) | val);
985 } else {
986 u32 val = PIDX_T5_V(n);
987
988
989
990
991
992
993
994 WARN_ON(val & DBPRIO_F);
995
996
997
998
999
1000 if (n == 1 && tq->bar2_qid == 0) {
1001 unsigned int index = (tq->pidx
1002 ? (tq->pidx - 1)
1003 : (tq->size - 1));
1004 __be64 *src = (__be64 *)&tq->desc[index];
1005 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1006 SGE_UDB_WCDOORBELL);
1007 unsigned int count = EQ_UNIT / sizeof(__be64);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018 while (count) {
1019
1020
1021
1022
1023 writeq((__force u64)*src, dst);
1024 src++;
1025 dst++;
1026 count--;
1027 }
1028 } else
1029 writel(val | QID_V(tq->bar2_qid),
1030 tq->bar2_addr + SGE_UDB_KDOORBELL);
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 wmb();
1043 }
1044}
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1058 void *pos)
1059{
1060 u64 *p;
1061 int left = (void *)tq->stat - pos;
1062
1063 if (likely(skb->len <= left)) {
1064 if (likely(!skb->data_len))
1065 skb_copy_from_linear_data(skb, pos, skb->len);
1066 else
1067 skb_copy_bits(skb, 0, pos, skb->len);
1068 pos += skb->len;
1069 } else {
1070 skb_copy_bits(skb, 0, pos, left);
1071 skb_copy_bits(skb, left, tq->desc, skb->len - left);
1072 pos = (void *)tq->desc + (skb->len - left);
1073 }
1074
1075
1076 p = PTR_ALIGN(pos, 8);
1077 if ((uintptr_t)p & 8)
1078 *p = 0;
1079}
1080
1081
1082
1083
1084
1085static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1086{
1087 int csum_type;
1088 const struct iphdr *iph = ip_hdr(skb);
1089
1090 if (iph->version == 4) {
1091 if (iph->protocol == IPPROTO_TCP)
1092 csum_type = TX_CSUM_TCPIP;
1093 else if (iph->protocol == IPPROTO_UDP)
1094 csum_type = TX_CSUM_UDPIP;
1095 else {
1096nocsum:
1097
1098
1099
1100
1101 return TXPKT_L4CSUM_DIS_F;
1102 }
1103 } else {
1104
1105
1106
1107 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1108
1109 if (ip6h->nexthdr == IPPROTO_TCP)
1110 csum_type = TX_CSUM_TCPIP6;
1111 else if (ip6h->nexthdr == IPPROTO_UDP)
1112 csum_type = TX_CSUM_UDPIP6;
1113 else
1114 goto nocsum;
1115 }
1116
1117 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1118 u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1119 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1120
1121 if (chip <= CHELSIO_T5)
1122 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1123 else
1124 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1125 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1126 } else {
1127 int start = skb_transport_offset(skb);
1128
1129 return TXPKT_CSUM_TYPE_V(csum_type) |
1130 TXPKT_CSUM_START_V(start) |
1131 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1132 }
1133}
1134
1135
1136
1137
1138static void txq_stop(struct sge_eth_txq *txq)
1139{
1140 netif_tx_stop_queue(txq->txq);
1141 txq->q.stops++;
1142}
1143
1144
1145
1146
1147static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1148{
1149 tq->in_use += n;
1150 tq->pidx += n;
1151 if (tq->pidx >= tq->size)
1152 tq->pidx -= tq->size;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1163{
1164 u32 wr_mid;
1165 u64 cntrl, *end;
1166 int qidx, credits, max_pkt_len;
1167 unsigned int flits, ndesc;
1168 struct adapter *adapter;
1169 struct sge_eth_txq *txq;
1170 const struct port_info *pi;
1171 struct fw_eth_tx_pkt_vm_wr *wr;
1172 struct cpl_tx_pkt_core *cpl;
1173 const struct skb_shared_info *ssi;
1174 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1175 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
1176 sizeof(wr->ethmacsrc) +
1177 sizeof(wr->ethtype) +
1178 sizeof(wr->vlantci));
1179
1180
1181
1182
1183
1184
1185
1186 if (unlikely(skb->len < fw_hdr_copy_len))
1187 goto out_free;
1188
1189
1190 max_pkt_len = ETH_HLEN + dev->mtu;
1191 if (skb_vlan_tagged(skb))
1192 max_pkt_len += VLAN_HLEN;
1193 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1194 goto out_free;
1195
1196
1197
1198
1199 pi = netdev_priv(dev);
1200 adapter = pi->adapter;
1201 qidx = skb_get_queue_mapping(skb);
1202 BUG_ON(qidx >= pi->nqsets);
1203 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1204
1205
1206
1207
1208
1209 reclaim_completed_tx(adapter, &txq->q, true);
1210
1211
1212
1213
1214
1215
1216 flits = calc_tx_flits(skb);
1217 ndesc = flits_to_desc(flits);
1218 credits = txq_avail(&txq->q) - ndesc;
1219
1220 if (unlikely(credits < 0)) {
1221
1222
1223
1224
1225
1226
1227 txq_stop(txq);
1228 dev_err(adapter->pdev_dev,
1229 "%s: TX ring %u full while queue awake!\n",
1230 dev->name, qidx);
1231 return NETDEV_TX_BUSY;
1232 }
1233
1234 if (!is_eth_imm(skb) &&
1235 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1236
1237
1238
1239
1240
1241 txq->mapping_err++;
1242 goto out_free;
1243 }
1244
1245 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1246 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256 txq_stop(txq);
1257 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1258 }
1259
1260
1261
1262
1263
1264
1265
1266 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1267 wr = (void *)&txq->q.desc[txq->q.pidx];
1268 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1269 wr->r3[0] = cpu_to_be32(0);
1270 wr->r3[1] = cpu_to_be32(0);
1271 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1272 end = (u64 *)wr + flits;
1273
1274
1275
1276
1277
1278
1279 ssi = skb_shinfo(skb);
1280 if (ssi->gso_size) {
1281 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1282 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1283 int l3hdr_len = skb_network_header_len(skb);
1284 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1285
1286 wr->op_immdlen =
1287 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1288 FW_WR_IMMDLEN_V(sizeof(*lso) +
1289 sizeof(*cpl)));
1290
1291
1292
1293 lso->lso_ctrl =
1294 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1295 LSO_FIRST_SLICE_F |
1296 LSO_LAST_SLICE_F |
1297 LSO_IPV6_V(v6) |
1298 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1299 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1300 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1301 lso->ipid_ofst = cpu_to_be16(0);
1302 lso->mss = cpu_to_be16(ssi->gso_size);
1303 lso->seqno_offset = cpu_to_be32(0);
1304 if (is_t4(adapter->params.chip))
1305 lso->len = cpu_to_be32(skb->len);
1306 else
1307 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1308
1309
1310
1311
1312
1313 cpl = (void *)(lso + 1);
1314
1315 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1316 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1317 else
1318 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1319
1320 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1321 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1322 TXPKT_IPHDR_LEN_V(l3hdr_len);
1323 txq->tso++;
1324 txq->tx_cso += ssi->gso_segs;
1325 } else {
1326 int len;
1327
1328 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1329 wr->op_immdlen =
1330 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1331 FW_WR_IMMDLEN_V(len));
1332
1333
1334
1335
1336
1337 cpl = (void *)(wr + 1);
1338 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1339 cntrl = hwcsum(adapter->params.chip, skb) |
1340 TXPKT_IPCSUM_DIS_F;
1341 txq->tx_cso++;
1342 } else
1343 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1344 }
1345
1346
1347
1348
1349
1350 if (skb_vlan_tag_present(skb)) {
1351 txq->vlan_ins++;
1352 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1353 }
1354
1355
1356
1357
1358 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1359 TXPKT_INTF_V(pi->port_id) |
1360 TXPKT_PF_V(0));
1361 cpl->pack = cpu_to_be16(0);
1362 cpl->len = cpu_to_be16(skb->len);
1363 cpl->ctrl1 = cpu_to_be64(cntrl);
1364
1365#ifdef T4_TRACE
1366 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1367 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1368 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1369#endif
1370
1371
1372
1373
1374
1375 if (is_eth_imm(skb)) {
1376
1377
1378
1379
1380 inline_tx_skb(skb, &txq->q, cpl + 1);
1381 dev_consume_skb_any(skb);
1382 } else {
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1421 struct sge_txq *tq = &txq->q;
1422 int last_desc;
1423
1424
1425
1426
1427
1428
1429
1430
1431 if (unlikely((void *)sgl == (void *)tq->stat)) {
1432 sgl = (void *)tq->desc;
1433 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1434 }
1435
1436 write_sgl(skb, tq, sgl, end, 0, addr);
1437 skb_orphan(skb);
1438
1439 last_desc = tq->pidx + ndesc - 1;
1440 if (last_desc >= tq->size)
1441 last_desc -= tq->size;
1442 tq->sdesc[last_desc].skb = skb;
1443 tq->sdesc[last_desc].sgl = sgl;
1444 }
1445
1446
1447
1448
1449
1450 txq_advance(&txq->q, ndesc);
1451 netif_trans_update(dev);
1452 ring_tx_db(adapter, &txq->q, ndesc);
1453 return NETDEV_TX_OK;
1454
1455out_free:
1456
1457
1458
1459
1460 dev_kfree_skb_any(skb);
1461 return NETDEV_TX_OK;
1462}
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473static inline void copy_frags(struct sk_buff *skb,
1474 const struct pkt_gl *gl,
1475 unsigned int offset)
1476{
1477 int i;
1478
1479
1480 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1481 gl->frags[0].offset + offset,
1482 gl->frags[0].size - offset);
1483 skb_shinfo(skb)->nr_frags = gl->nfrags;
1484 for (i = 1; i < gl->nfrags; i++)
1485 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1486 gl->frags[i].offset,
1487 gl->frags[i].size);
1488
1489
1490 get_page(gl->frags[gl->nfrags - 1].page);
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1503 unsigned int skb_len,
1504 unsigned int pull_len)
1505{
1506 struct sk_buff *skb;
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519 if (gl->tot_len <= RX_COPY_THRES) {
1520
1521 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1522 if (unlikely(!skb))
1523 goto out;
1524 __skb_put(skb, gl->tot_len);
1525 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1526 } else {
1527 skb = alloc_skb(skb_len, GFP_ATOMIC);
1528 if (unlikely(!skb))
1529 goto out;
1530 __skb_put(skb, pull_len);
1531 skb_copy_to_linear_data(skb, gl->va, pull_len);
1532
1533 copy_frags(skb, gl, pull_len);
1534 skb->len = gl->tot_len;
1535 skb->data_len = skb->len - pull_len;
1536 skb->truesize += skb->data_len;
1537 }
1538
1539out:
1540 return skb;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550static void t4vf_pktgl_free(const struct pkt_gl *gl)
1551{
1552 int frag;
1553
1554 frag = gl->nfrags - 1;
1555 while (frag--)
1556 put_page(gl->frags[frag].page);
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1569 const struct cpl_rx_pkt *pkt)
1570{
1571 struct adapter *adapter = rxq->rspq.adapter;
1572 struct sge *s = &adapter->sge;
1573 int ret;
1574 struct sk_buff *skb;
1575
1576 skb = napi_get_frags(&rxq->rspq.napi);
1577 if (unlikely(!skb)) {
1578 t4vf_pktgl_free(gl);
1579 rxq->stats.rx_drops++;
1580 return;
1581 }
1582
1583 copy_frags(skb, gl, s->pktshift);
1584 skb->len = gl->tot_len - s->pktshift;
1585 skb->data_len = skb->len;
1586 skb->truesize += skb->data_len;
1587 skb->ip_summed = CHECKSUM_UNNECESSARY;
1588 skb_record_rx_queue(skb, rxq->rspq.idx);
1589
1590 if (pkt->vlan_ex) {
1591 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1592 be16_to_cpu(pkt->vlan));
1593 rxq->stats.vlan_ex++;
1594 }
1595 ret = napi_gro_frags(&rxq->rspq.napi);
1596
1597 if (ret == GRO_HELD)
1598 rxq->stats.lro_pkts++;
1599 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1600 rxq->stats.lro_merged++;
1601 rxq->stats.pkts++;
1602 rxq->stats.rx_cso++;
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1614 const struct pkt_gl *gl)
1615{
1616 struct sk_buff *skb;
1617 const struct cpl_rx_pkt *pkt = (void *)rsp;
1618 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1619 (rspq->netdev->features & NETIF_F_RXCSUM);
1620 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1621 struct adapter *adapter = rspq->adapter;
1622 struct sge *s = &adapter->sge;
1623
1624
1625
1626
1627
1628 if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1629 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1630 !pkt->ip_frag) {
1631 do_gro(rxq, gl, pkt);
1632 return 0;
1633 }
1634
1635
1636
1637
1638 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1639 if (unlikely(!skb)) {
1640 t4vf_pktgl_free(gl);
1641 rxq->stats.rx_drops++;
1642 return 0;
1643 }
1644 __skb_pull(skb, s->pktshift);
1645 skb->protocol = eth_type_trans(skb, rspq->netdev);
1646 skb_record_rx_queue(skb, rspq->idx);
1647 rxq->stats.pkts++;
1648
1649 if (csum_ok && !pkt->err_vec &&
1650 (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1651 if (!pkt->ip_frag) {
1652 skb->ip_summed = CHECKSUM_UNNECESSARY;
1653 rxq->stats.rx_cso++;
1654 } else if (pkt->l2info & htonl(RXF_IP_F)) {
1655 __sum16 c = (__force __sum16)pkt->csum;
1656 skb->csum = csum_unfold(c);
1657 skb->ip_summed = CHECKSUM_COMPLETE;
1658 rxq->stats.rx_cso++;
1659 }
1660 } else
1661 skb_checksum_none_assert(skb);
1662
1663 if (pkt->vlan_ex) {
1664 rxq->stats.vlan_ex++;
1665 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
1666 }
1667
1668 netif_receive_skb(skb);
1669
1670 return 0;
1671}
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681static inline bool is_new_response(const struct rsp_ctrl *rc,
1682 const struct sge_rspq *rspq)
1683{
1684 return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
1685}
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1708 int frags)
1709{
1710 struct rx_sw_desc *sdesc;
1711
1712 while (frags--) {
1713 if (fl->cidx == 0)
1714 fl->cidx = fl->size - 1;
1715 else
1716 fl->cidx--;
1717 sdesc = &fl->sdesc[fl->cidx];
1718 sdesc->page = gl->frags[frags].page;
1719 sdesc->dma_addr |= RX_UNMAPPED_BUF;
1720 fl->avail++;
1721 }
1722}
1723
1724
1725
1726
1727
1728
1729
1730static inline void rspq_next(struct sge_rspq *rspq)
1731{
1732 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1733 if (unlikely(++rspq->cidx == rspq->size)) {
1734 rspq->cidx = 0;
1735 rspq->gen ^= 1;
1736 rspq->cur_desc = rspq->desc;
1737 }
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753static int process_responses(struct sge_rspq *rspq, int budget)
1754{
1755 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1756 struct adapter *adapter = rspq->adapter;
1757 struct sge *s = &adapter->sge;
1758 int budget_left = budget;
1759
1760 while (likely(budget_left)) {
1761 int ret, rsp_type;
1762 const struct rsp_ctrl *rc;
1763
1764 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1765 if (!is_new_response(rc, rspq))
1766 break;
1767
1768
1769
1770
1771
1772 dma_rmb();
1773 rsp_type = RSPD_TYPE_G(rc->type_gen);
1774 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
1775 struct page_frag *fp;
1776 struct pkt_gl gl;
1777 const struct rx_sw_desc *sdesc;
1778 u32 bufsz, frag;
1779 u32 len = be32_to_cpu(rc->pldbuflen_qid);
1780
1781
1782
1783
1784
1785 if (len & RSPD_NEWBUF_F) {
1786
1787
1788
1789
1790
1791 if (likely(rspq->offset > 0)) {
1792 free_rx_bufs(rspq->adapter, &rxq->fl,
1793 1);
1794 rspq->offset = 0;
1795 }
1796 len = RSPD_LEN_G(len);
1797 }
1798 gl.tot_len = len;
1799
1800
1801
1802
1803 for (frag = 0, fp = gl.frags; ; frag++, fp++) {
1804 BUG_ON(frag >= MAX_SKB_FRAGS);
1805 BUG_ON(rxq->fl.avail == 0);
1806 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1807 bufsz = get_buf_size(adapter, sdesc);
1808 fp->page = sdesc->page;
1809 fp->offset = rspq->offset;
1810 fp->size = min(bufsz, len);
1811 len -= fp->size;
1812 if (!len)
1813 break;
1814 unmap_rx_buf(rspq->adapter, &rxq->fl);
1815 }
1816 gl.nfrags = frag+1;
1817
1818
1819
1820
1821
1822
1823 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1824 get_buf_addr(sdesc),
1825 fp->size, DMA_FROM_DEVICE);
1826 gl.va = (page_address(gl.frags[0].page) +
1827 gl.frags[0].offset);
1828 prefetch(gl.va);
1829
1830
1831
1832
1833
1834 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1835 if (likely(ret == 0))
1836 rspq->offset += ALIGN(fp->size, s->fl_align);
1837 else
1838 restore_rx_bufs(&gl, &rxq->fl, frag);
1839 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
1840 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1841 } else {
1842 WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
1843 ret = 0;
1844 }
1845
1846 if (unlikely(ret)) {
1847
1848
1849
1850
1851
1852 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1853 rspq->next_intr_params =
1854 QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
1855 break;
1856 }
1857
1858 rspq_next(rspq);
1859 budget_left--;
1860 }
1861
1862
1863
1864
1865
1866
1867 if (rspq->offset >= 0 &&
1868 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1869 __refill_fl(rspq->adapter, &rxq->fl);
1870 return budget - budget_left;
1871}
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884static int napi_rx_handler(struct napi_struct *napi, int budget)
1885{
1886 unsigned int intr_params;
1887 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1888 int work_done = process_responses(rspq, budget);
1889 u32 val;
1890
1891 if (likely(work_done < budget)) {
1892 napi_complete(napi);
1893 intr_params = rspq->next_intr_params;
1894 rspq->next_intr_params = rspq->intr_params;
1895 } else
1896 intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
1897
1898 if (unlikely(work_done == 0))
1899 rspq->unhandled_irqs++;
1900
1901 val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1902
1903
1904
1905 if (unlikely(!rspq->bar2_addr)) {
1906 t4_write_reg(rspq->adapter,
1907 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1908 val | INGRESSQID_V((u32)rspq->cntxt_id));
1909 } else {
1910 writel(val | INGRESSQID_V(rspq->bar2_qid),
1911 rspq->bar2_addr + SGE_UDB_GTS);
1912 wmb();
1913 }
1914 return work_done;
1915}
1916
1917
1918
1919
1920
1921irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1922{
1923 struct sge_rspq *rspq = cookie;
1924
1925 napi_schedule(&rspq->napi);
1926 return IRQ_HANDLED;
1927}
1928
1929
1930
1931
1932
1933static unsigned int process_intrq(struct adapter *adapter)
1934{
1935 struct sge *s = &adapter->sge;
1936 struct sge_rspq *intrq = &s->intrq;
1937 unsigned int work_done;
1938 u32 val;
1939
1940 spin_lock(&adapter->sge.intrq_lock);
1941 for (work_done = 0; ; work_done++) {
1942 const struct rsp_ctrl *rc;
1943 unsigned int qid, iq_idx;
1944 struct sge_rspq *rspq;
1945
1946
1947
1948
1949
1950 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1951 if (!is_new_response(rc, intrq))
1952 break;
1953
1954
1955
1956
1957
1958
1959 dma_rmb();
1960 if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
1961 dev_err(adapter->pdev_dev,
1962 "Unexpected INTRQ response type %d\n",
1963 RSPD_TYPE_G(rc->type_gen));
1964 continue;
1965 }
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
1976 iq_idx = IQ_IDX(s, qid);
1977 if (unlikely(iq_idx >= MAX_INGQ)) {
1978 dev_err(adapter->pdev_dev,
1979 "Ingress QID %d out of range\n", qid);
1980 continue;
1981 }
1982 rspq = s->ingr_map[iq_idx];
1983 if (unlikely(rspq == NULL)) {
1984 dev_err(adapter->pdev_dev,
1985 "Ingress QID %d RSPQ=NULL\n", qid);
1986 continue;
1987 }
1988 if (unlikely(rspq->abs_id != qid)) {
1989 dev_err(adapter->pdev_dev,
1990 "Ingress QID %d refers to RSPQ %d\n",
1991 qid, rspq->abs_id);
1992 continue;
1993 }
1994
1995
1996
1997
1998
1999
2000 napi_schedule(&rspq->napi);
2001 rspq_next(intrq);
2002 }
2003
2004 val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
2005
2006
2007
2008 if (unlikely(!intrq->bar2_addr)) {
2009 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
2010 val | INGRESSQID_V(intrq->cntxt_id));
2011 } else {
2012 writel(val | INGRESSQID_V(intrq->bar2_qid),
2013 intrq->bar2_addr + SGE_UDB_GTS);
2014 wmb();
2015 }
2016
2017 spin_unlock(&adapter->sge.intrq_lock);
2018
2019 return work_done;
2020}
2021
2022
2023
2024
2025
2026static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
2027{
2028 struct adapter *adapter = cookie;
2029
2030 process_intrq(adapter);
2031 return IRQ_HANDLED;
2032}
2033
2034
2035
2036
2037
2038
2039
2040
2041irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2042{
2043 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2044 if (adapter->flags & USING_MSIX)
2045 return t4vf_sge_intr_msix;
2046 else
2047 return t4vf_intr_msi;
2048}
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061static void sge_rx_timer_cb(unsigned long data)
2062{
2063 struct adapter *adapter = (struct adapter *)data;
2064 struct sge *s = &adapter->sge;
2065 unsigned int i;
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2076 unsigned long m;
2077
2078 for (m = s->starving_fl[i]; m; m &= m - 1) {
2079 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2080 struct sge_fl *fl = s->egr_map[id];
2081
2082 clear_bit(id, s->starving_fl);
2083 smp_mb__after_atomic();
2084
2085
2086
2087
2088
2089
2090
2091 if (fl_starving(adapter, fl)) {
2092 struct sge_eth_rxq *rxq;
2093
2094 rxq = container_of(fl, struct sge_eth_rxq, fl);
2095 if (napi_reschedule(&rxq->rspq.napi))
2096 fl->starving++;
2097 else
2098 set_bit(id, s->starving_fl);
2099 }
2100 }
2101 }
2102
2103
2104
2105
2106 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120static void sge_tx_timer_cb(unsigned long data)
2121{
2122 struct adapter *adapter = (struct adapter *)data;
2123 struct sge *s = &adapter->sge;
2124 unsigned int i, budget;
2125
2126 budget = MAX_TIMER_TX_RECLAIM;
2127 i = s->ethtxq_rover;
2128 do {
2129 struct sge_eth_txq *txq = &s->ethtxq[i];
2130
2131 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2132 int avail = reclaimable(&txq->q);
2133
2134 if (avail > budget)
2135 avail = budget;
2136
2137 free_tx_desc(adapter, &txq->q, avail, true);
2138 txq->q.in_use -= avail;
2139 __netif_tx_unlock(txq->txq);
2140
2141 budget -= avail;
2142 if (!budget)
2143 break;
2144 }
2145
2146 i++;
2147 if (i >= s->ethqsets)
2148 i = 0;
2149 } while (i != s->ethtxq_rover);
2150 s->ethtxq_rover = i;
2151
2152
2153
2154
2155
2156
2157 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173static void __iomem *bar2_address(struct adapter *adapter,
2174 unsigned int qid,
2175 enum t4_bar2_qtype qtype,
2176 unsigned int *pbar2_qid)
2177{
2178 u64 bar2_qoffset;
2179 int ret;
2180
2181 ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
2182 &bar2_qoffset, pbar2_qid);
2183 if (ret)
2184 return NULL;
2185
2186 return adapter->bar2 + bar2_qoffset;
2187}
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2200 bool iqasynch, struct net_device *dev,
2201 int intr_dest,
2202 struct sge_fl *fl, rspq_handler_t hnd)
2203{
2204 struct sge *s = &adapter->sge;
2205 struct port_info *pi = netdev_priv(dev);
2206 struct fw_iq_cmd cmd, rpl;
2207 int ret, iqandst, flsz = 0;
2208
2209
2210
2211
2212
2213
2214
2215
2216 if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
2217 iqandst = SGE_INTRDST_IQ;
2218 intr_dest = adapter->sge.intrq.abs_id;
2219 } else
2220 iqandst = SGE_INTRDST_PCI;
2221
2222
2223
2224
2225
2226
2227
2228 rspq->size = roundup(rspq->size, 16);
2229 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2230 0, &rspq->phys_addr, NULL, 0);
2231 if (!rspq->desc)
2232 return -ENOMEM;
2233
2234
2235
2236
2237
2238
2239
2240
2241 memset(&cmd, 0, sizeof(cmd));
2242 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2243 FW_CMD_REQUEST_F |
2244 FW_CMD_WRITE_F |
2245 FW_CMD_EXEC_F);
2246 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
2247 FW_IQ_CMD_IQSTART_F |
2248 FW_LEN16(cmd));
2249 cmd.type_to_iqandstindex =
2250 cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2251 FW_IQ_CMD_IQASYNCH_V(iqasynch) |
2252 FW_IQ_CMD_VIID_V(pi->viid) |
2253 FW_IQ_CMD_IQANDST_V(iqandst) |
2254 FW_IQ_CMD_IQANUS_V(1) |
2255 FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
2256 FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
2257 cmd.iqdroprss_to_iqesize =
2258 cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
2259 FW_IQ_CMD_IQGTSMODE_F |
2260 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
2261 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
2262 cmd.iqsize = cpu_to_be16(rspq->size);
2263 cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2264
2265 if (fl) {
2266 enum chip_type chip =
2267 CHELSIO_CHIP_VERSION(adapter->params.chip);
2268
2269
2270
2271
2272
2273
2274
2275
2276 if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
2277 fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
2278 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2279 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2280 sizeof(__be64), sizeof(struct rx_sw_desc),
2281 &fl->addr, &fl->sdesc, s->stat_len);
2282 if (!fl->desc) {
2283 ret = -ENOMEM;
2284 goto err;
2285 }
2286
2287
2288
2289
2290
2291
2292 flsz = (fl->size / FL_PER_EQ_UNIT +
2293 s->stat_len / EQ_UNIT);
2294
2295
2296
2297
2298
2299 cmd.iqns_to_fl0congen =
2300 cpu_to_be32(
2301 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2302 FW_IQ_CMD_FL0PACKEN_F |
2303 FW_IQ_CMD_FL0PADEN_F);
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313 cmd.fl0dcaen_to_fl0cidxfthresh =
2314 cpu_to_be16(
2315 FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
2316 FETCHBURSTMIN_128B_X :
2317 FETCHBURSTMIN_64B_X) |
2318 FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
2319 FETCHBURSTMAX_512B_X :
2320 FETCHBURSTMAX_256B_X));
2321 cmd.fl0size = cpu_to_be16(flsz);
2322 cmd.fl0addr = cpu_to_be64(fl->addr);
2323 }
2324
2325
2326
2327
2328
2329 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2330 if (ret)
2331 goto err;
2332
2333 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2334 rspq->cur_desc = rspq->desc;
2335 rspq->cidx = 0;
2336 rspq->gen = 1;
2337 rspq->next_intr_params = rspq->intr_params;
2338 rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2339 rspq->bar2_addr = bar2_address(adapter,
2340 rspq->cntxt_id,
2341 T4_BAR2_QTYPE_INGRESS,
2342 &rspq->bar2_qid);
2343 rspq->abs_id = be16_to_cpu(rpl.physiqid);
2344 rspq->size--;
2345 rspq->adapter = adapter;
2346 rspq->netdev = dev;
2347 rspq->handler = hnd;
2348
2349
2350 rspq->offset = fl ? 0 : -1;
2351
2352 if (fl) {
2353 fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2354 fl->avail = 0;
2355 fl->pend_cred = 0;
2356 fl->pidx = 0;
2357 fl->cidx = 0;
2358 fl->alloc_failed = 0;
2359 fl->large_alloc_failed = 0;
2360 fl->starving = 0;
2361
2362
2363
2364
2365 fl->bar2_addr = bar2_address(adapter,
2366 fl->cntxt_id,
2367 T4_BAR2_QTYPE_EGRESS,
2368 &fl->bar2_qid);
2369
2370 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2371 }
2372
2373 return 0;
2374
2375err:
2376
2377
2378
2379
2380 if (rspq->desc) {
2381 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2382 rspq->desc, rspq->phys_addr);
2383 rspq->desc = NULL;
2384 }
2385 if (fl && fl->desc) {
2386 kfree(fl->sdesc);
2387 fl->sdesc = NULL;
2388 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2389 fl->desc, fl->addr);
2390 fl->desc = NULL;
2391 }
2392 return ret;
2393}
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2404 struct net_device *dev, struct netdev_queue *devq,
2405 unsigned int iqid)
2406{
2407 struct sge *s = &adapter->sge;
2408 int ret, nentries;
2409 struct fw_eq_eth_cmd cmd, rpl;
2410 struct port_info *pi = netdev_priv(dev);
2411
2412
2413
2414
2415
2416 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2417
2418
2419
2420
2421
2422 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2423 sizeof(struct tx_desc),
2424 sizeof(struct tx_sw_desc),
2425 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2426 if (!txq->q.desc)
2427 return -ENOMEM;
2428
2429
2430
2431
2432
2433
2434
2435
2436 memset(&cmd, 0, sizeof(cmd));
2437 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2438 FW_CMD_REQUEST_F |
2439 FW_CMD_WRITE_F |
2440 FW_CMD_EXEC_F);
2441 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
2442 FW_EQ_ETH_CMD_EQSTART_F |
2443 FW_LEN16(cmd));
2444 cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2445 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2446 cmd.fetchszm_to_iqid =
2447 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
2448 FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
2449 FW_EQ_ETH_CMD_IQID_V(iqid));
2450 cmd.dcaen_to_eqsize =
2451 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) |
2452 FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) |
2453 FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2454 SGE_CIDXFLUSHTHRESH_32) |
2455 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2456 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2457
2458
2459
2460
2461
2462 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2463 if (ret) {
2464
2465
2466
2467
2468 kfree(txq->q.sdesc);
2469 txq->q.sdesc = NULL;
2470 dma_free_coherent(adapter->pdev_dev,
2471 nentries * sizeof(struct tx_desc),
2472 txq->q.desc, txq->q.phys_addr);
2473 txq->q.desc = NULL;
2474 return ret;
2475 }
2476
2477 txq->q.in_use = 0;
2478 txq->q.cidx = 0;
2479 txq->q.pidx = 0;
2480 txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2481 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
2482 txq->q.bar2_addr = bar2_address(adapter,
2483 txq->q.cntxt_id,
2484 T4_BAR2_QTYPE_EGRESS,
2485 &txq->q.bar2_qid);
2486 txq->q.abs_id =
2487 FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
2488 txq->txq = devq;
2489 txq->tso = 0;
2490 txq->tx_cso = 0;
2491 txq->vlan_ins = 0;
2492 txq->q.stops = 0;
2493 txq->q.restarts = 0;
2494 txq->mapping_err = 0;
2495 return 0;
2496}
2497
2498
2499
2500
2501static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2502{
2503 struct sge *s = &adapter->sge;
2504
2505 dma_free_coherent(adapter->pdev_dev,
2506 tq->size * sizeof(*tq->desc) + s->stat_len,
2507 tq->desc, tq->phys_addr);
2508 tq->cntxt_id = 0;
2509 tq->sdesc = NULL;
2510 tq->desc = NULL;
2511}
2512
2513
2514
2515
2516
2517static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2518 struct sge_fl *fl)
2519{
2520 struct sge *s = &adapter->sge;
2521 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2522
2523 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2524 rspq->cntxt_id, flid, 0xffff);
2525 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2526 rspq->desc, rspq->phys_addr);
2527 netif_napi_del(&rspq->napi);
2528 rspq->netdev = NULL;
2529 rspq->cntxt_id = 0;
2530 rspq->abs_id = 0;
2531 rspq->desc = NULL;
2532
2533 if (fl) {
2534 free_rx_bufs(adapter, fl, fl->avail);
2535 dma_free_coherent(adapter->pdev_dev,
2536 fl->size * sizeof(*fl->desc) + s->stat_len,
2537 fl->desc, fl->addr);
2538 kfree(fl->sdesc);
2539 fl->sdesc = NULL;
2540 fl->cntxt_id = 0;
2541 fl->desc = NULL;
2542 }
2543}
2544
2545
2546
2547
2548
2549
2550
2551void t4vf_free_sge_resources(struct adapter *adapter)
2552{
2553 struct sge *s = &adapter->sge;
2554 struct sge_eth_rxq *rxq = s->ethrxq;
2555 struct sge_eth_txq *txq = s->ethtxq;
2556 struct sge_rspq *evtq = &s->fw_evtq;
2557 struct sge_rspq *intrq = &s->intrq;
2558 int qs;
2559
2560 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2561 if (rxq->rspq.desc)
2562 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2563 if (txq->q.desc) {
2564 t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2565 free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2566 kfree(txq->q.sdesc);
2567 free_txq(adapter, &txq->q);
2568 }
2569 }
2570 if (evtq->desc)
2571 free_rspq_fl(adapter, evtq, NULL);
2572 if (intrq->desc)
2573 free_rspq_fl(adapter, intrq, NULL);
2574}
2575
2576
2577
2578
2579
2580
2581
2582void t4vf_sge_start(struct adapter *adapter)
2583{
2584 adapter->sge.ethtxq_rover = 0;
2585 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2586 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2587}
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597void t4vf_sge_stop(struct adapter *adapter)
2598{
2599 struct sge *s = &adapter->sge;
2600
2601 if (s->rx_timer.function)
2602 del_timer_sync(&s->rx_timer);
2603 if (s->tx_timer.function)
2604 del_timer_sync(&s->tx_timer);
2605}
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616int t4vf_sge_init(struct adapter *adapter)
2617{
2618 struct sge_params *sge_params = &adapter->params.sge;
2619 u32 fl0 = sge_params->sge_fl_buffer_size[0];
2620 u32 fl1 = sge_params->sge_fl_buffer_size[1];
2621 struct sge *s = &adapter->sge;
2622
2623
2624
2625
2626
2627
2628 if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
2629 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2630 fl0, fl1);
2631 return -EINVAL;
2632 }
2633 if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
2634 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2635 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2636 return -EINVAL;
2637 }
2638
2639
2640
2641
2642 if (fl1)
2643 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2644 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2645 ? 128 : 64);
2646 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2647 s->fl_align = t4vf_fl_pkt_align(adapter);
2648
2649
2650
2651
2652
2653
2654
2655
2656 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
2657 case CHELSIO_T4:
2658 s->fl_starve_thres =
2659 EGRTHRESHOLD_G(sge_params->sge_congestion_control);
2660 break;
2661 case CHELSIO_T5:
2662 s->fl_starve_thres =
2663 EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2664 break;
2665 case CHELSIO_T6:
2666 default:
2667 s->fl_starve_thres =
2668 T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2669 break;
2670 }
2671 s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2672
2673
2674
2675
2676 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
2677 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
2678
2679
2680
2681
2682 spin_lock_init(&s->intrq_lock);
2683
2684 return 0;
2685}
2686