1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/ip.h>
41#include <net/ipv6.h>
42#include <net/tcp.h>
43#include <linux/dma-mapping.h>
44#include <linux/prefetch.h>
45
46#include "t4vf_common.h"
47#include "t4vf_defs.h"
48
49#include "../cxgb4/t4_regs.h"
50#include "../cxgb4/t4_values.h"
51#include "../cxgb4/t4fw_api.h"
52#include "../cxgb4/t4_msg.h"
53
54
55
56
57enum {
58
59
60
61
62
63
64
65 EQ_UNIT = SGE_EQ_IDXSIZE,
66 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
67 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
68
69
70
71
72
73
74
75 MAX_TX_RECLAIM = 16,
76
77
78
79
80
81 MAX_RX_REFILL = 16,
82
83
84
85
86
87
88 RX_QCHECK_PERIOD = (HZ / 2),
89
90
91
92
93
94 TX_QCHECK_PERIOD = (HZ / 2),
95 MAX_TIMER_TX_RECLAIM = 100,
96
97
98
99
100
101
102
103
104
105 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
106 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
107 ((ETHTXQ_MAX_FRAGS-1) & 1) +
108 2),
109 ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
110 sizeof(struct cpl_tx_pkt_lso_core) +
111 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
112 ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
113
114 ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
115
116
117
118
119
120
121
122 MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
123
124
125
126
127 MAX_CTRL_WR_LEN = 256,
128
129
130
131
132
133 MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
134 ? MAX_IMM_TX_PKT_LEN
135 : MAX_CTRL_WR_LEN),
136
137
138
139
140
141
142
143 RX_COPY_THRES = 256,
144 RX_PULL_LEN = 128,
145
146
147
148
149
150
151 RX_SKB_LEN = 512,
152};
153
154
155
156
157struct tx_sw_desc {
158 struct sk_buff *skb;
159 struct ulptx_sgl *sgl;
160};
161
162
163
164
165
166
167
168struct rx_sw_desc {
169 struct page *page;
170 dma_addr_t dma_addr;
171
172};
173
174
175
176
177
178
179
180
181
182
183enum {
184 RX_LARGE_BUF = 1 << 0,
185 RX_UNMAPPED_BUF = 1 << 1,
186};
187
188
189
190
191
192
193
194
195static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
196{
197 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
198}
199
200
201
202
203
204
205
206
207static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
208{
209 return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
210}
211
212
213
214
215
216
217
218static inline int need_skb_unmap(void)
219{
220#ifdef CONFIG_NEED_DMA_MAP_STATE
221 return 1;
222#else
223 return 0;
224#endif
225}
226
227
228
229
230
231
232
233static inline unsigned int txq_avail(const struct sge_txq *tq)
234{
235 return tq->size - 1 - tq->in_use;
236}
237
238
239
240
241
242
243
244
245
246
247static inline unsigned int fl_cap(const struct sge_fl *fl)
248{
249 return fl->size - FL_PER_EQ_UNIT;
250}
251
252
253
254
255
256
257
258
259
260
261static inline bool fl_starving(const struct adapter *adapter,
262 const struct sge_fl *fl)
263{
264 const struct sge *s = &adapter->sge;
265
266 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
267}
268
269
270
271
272
273
274
275
276
277static int map_skb(struct device *dev, const struct sk_buff *skb,
278 dma_addr_t *addr)
279{
280 const skb_frag_t *fp, *end;
281 const struct skb_shared_info *si;
282
283 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
284 if (dma_mapping_error(dev, *addr))
285 goto out_err;
286
287 si = skb_shinfo(skb);
288 end = &si->frags[si->nr_frags];
289 for (fp = si->frags; fp < end; fp++) {
290 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
291 DMA_TO_DEVICE);
292 if (dma_mapping_error(dev, *addr))
293 goto unwind;
294 }
295 return 0;
296
297unwind:
298 while (fp-- > si->frags)
299 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
300 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
301
302out_err:
303 return -ENOMEM;
304}
305
306static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
307 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
308{
309 const struct ulptx_sge_pair *p;
310 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
311
312 if (likely(skb_headlen(skb)))
313 dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
314 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
315 else {
316 dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
317 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
318 nfrags--;
319 }
320
321
322
323
324
325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
326 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
327unmap:
328 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
329 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
330 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
331 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
332 p++;
333 } else if ((u8 *)p == (u8 *)tq->stat) {
334 p = (const struct ulptx_sge_pair *)tq->desc;
335 goto unmap;
336 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
337 const __be64 *addr = (const __be64 *)tq->desc;
338
339 dma_unmap_page(dev, be64_to_cpu(addr[0]),
340 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
341 dma_unmap_page(dev, be64_to_cpu(addr[1]),
342 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
343 p = (const struct ulptx_sge_pair *)&addr[2];
344 } else {
345 const __be64 *addr = (const __be64 *)tq->desc;
346
347 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
348 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
349 dma_unmap_page(dev, be64_to_cpu(addr[0]),
350 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
351 p = (const struct ulptx_sge_pair *)&addr[1];
352 }
353 }
354 if (nfrags) {
355 __be64 addr;
356
357 if ((u8 *)p == (u8 *)tq->stat)
358 p = (const struct ulptx_sge_pair *)tq->desc;
359 addr = ((u8 *)p + 16 <= (u8 *)tq->stat
360 ? p->addr[0]
361 : *(const __be64 *)tq->desc);
362 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
363 DMA_TO_DEVICE);
364 }
365}
366
367
368
369
370
371
372
373
374
375
376
377static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
378 unsigned int n, bool unmap)
379{
380 struct tx_sw_desc *sdesc;
381 unsigned int cidx = tq->cidx;
382 struct device *dev = adapter->pdev_dev;
383
384 const int need_unmap = need_skb_unmap() && unmap;
385
386 sdesc = &tq->sdesc[cidx];
387 while (n--) {
388
389
390
391
392 if (sdesc->skb) {
393 if (need_unmap)
394 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
395 dev_consume_skb_any(sdesc->skb);
396 sdesc->skb = NULL;
397 }
398
399 sdesc++;
400 if (++cidx == tq->size) {
401 cidx = 0;
402 sdesc = tq->sdesc;
403 }
404 }
405 tq->cidx = cidx;
406}
407
408
409
410
411static inline int reclaimable(const struct sge_txq *tq)
412{
413 int hw_cidx = be16_to_cpu(tq->stat->cidx);
414 int reclaimable = hw_cidx - tq->cidx;
415 if (reclaimable < 0)
416 reclaimable += tq->size;
417 return reclaimable;
418}
419
420
421
422
423
424
425
426
427
428
429
430static inline void reclaim_completed_tx(struct adapter *adapter,
431 struct sge_txq *tq,
432 bool unmap)
433{
434 int avail = reclaimable(tq);
435
436 if (avail) {
437
438
439
440
441 if (avail > MAX_TX_RECLAIM)
442 avail = MAX_TX_RECLAIM;
443
444 free_tx_desc(adapter, tq, avail, unmap);
445 tq->in_use -= avail;
446 }
447}
448
449
450
451
452
453
454static inline int get_buf_size(const struct adapter *adapter,
455 const struct rx_sw_desc *sdesc)
456{
457 const struct sge *s = &adapter->sge;
458
459 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
460 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
461}
462
463
464
465
466
467
468
469
470
471
472
473static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
474{
475 while (n--) {
476 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
477
478 if (is_buf_mapped(sdesc))
479 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
480 get_buf_size(adapter, sdesc),
481 DMA_FROM_DEVICE);
482 put_page(sdesc->page);
483 sdesc->page = NULL;
484 if (++fl->cidx == fl->size)
485 fl->cidx = 0;
486 fl->avail--;
487 }
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
504{
505 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
506
507 if (is_buf_mapped(sdesc))
508 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
509 get_buf_size(adapter, sdesc),
510 DMA_FROM_DEVICE);
511 sdesc->page = NULL;
512 if (++fl->cidx == fl->size)
513 fl->cidx = 0;
514 fl->avail--;
515}
516
517
518
519
520
521
522
523
524
525static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
526{
527 u32 val = adapter->params.arch.sge_fl_db;
528
529
530
531
532
533 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
534 if (is_t4(adapter->params.chip))
535 val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
536 else
537 val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
538
539
540
541
542 wmb();
543
544
545
546
547
548 if (unlikely(fl->bar2_addr == NULL)) {
549 t4_write_reg(adapter,
550 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
551 QID_V(fl->cntxt_id) | val);
552 } else {
553 writel(val | QID_V(fl->bar2_qid),
554 fl->bar2_addr + SGE_UDB_KDOORBELL);
555
556
557
558
559 wmb();
560 }
561 fl->pend_cred %= FL_PER_EQ_UNIT;
562 }
563}
564
565
566
567
568
569
570
571static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
572 dma_addr_t dma_addr)
573{
574 sdesc->page = page;
575 sdesc->dma_addr = dma_addr;
576}
577
578
579
580
581#define POISON_BUF_VAL -1
582
583static inline void poison_buf(struct page *page, size_t sz)
584{
585#if POISON_BUF_VAL >= 0
586 memset(page_address(page), POISON_BUF_VAL, sz);
587#endif
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
605 int n, gfp_t gfp)
606{
607 struct sge *s = &adapter->sge;
608 struct page *page;
609 dma_addr_t dma_addr;
610 unsigned int cred = fl->avail;
611 __be64 *d = &fl->desc[fl->pidx];
612 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
613
614
615
616
617
618
619 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
620
621 gfp |= __GFP_NOWARN;
622
623
624
625
626
627
628
629 if (s->fl_pg_order == 0)
630 goto alloc_small_pages;
631
632 while (n) {
633 page = __dev_alloc_pages(gfp, s->fl_pg_order);
634 if (unlikely(!page)) {
635
636
637
638
639
640 fl->large_alloc_failed++;
641 break;
642 }
643 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
644
645 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
646 PAGE_SIZE << s->fl_pg_order,
647 DMA_FROM_DEVICE);
648 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
649
650
651
652
653
654
655
656
657 __free_pages(page, s->fl_pg_order);
658 goto out;
659 }
660 dma_addr |= RX_LARGE_BUF;
661 *d++ = cpu_to_be64(dma_addr);
662
663 set_rx_sw_desc(sdesc, page, dma_addr);
664 sdesc++;
665
666 fl->avail++;
667 if (++fl->pidx == fl->size) {
668 fl->pidx = 0;
669 sdesc = fl->sdesc;
670 d = fl->desc;
671 }
672 n--;
673 }
674
675alloc_small_pages:
676 while (n--) {
677 page = __dev_alloc_page(gfp);
678 if (unlikely(!page)) {
679 fl->alloc_failed++;
680 break;
681 }
682 poison_buf(page, PAGE_SIZE);
683
684 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
685 DMA_FROM_DEVICE);
686 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
687 put_page(page);
688 break;
689 }
690 *d++ = cpu_to_be64(dma_addr);
691
692 set_rx_sw_desc(sdesc, page, dma_addr);
693 sdesc++;
694
695 fl->avail++;
696 if (++fl->pidx == fl->size) {
697 fl->pidx = 0;
698 sdesc = fl->sdesc;
699 d = fl->desc;
700 }
701 }
702
703out:
704
705
706
707
708
709 cred = fl->avail - cred;
710 fl->pend_cred += cred;
711 ring_fl_db(adapter, fl);
712
713 if (unlikely(fl_starving(adapter, fl))) {
714 smp_wmb();
715 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
716 }
717
718 return cred;
719}
720
721
722
723
724
725static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
726{
727 refill_fl(adapter, fl,
728 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
729 GFP_ATOMIC);
730}
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
752 size_t swsize, dma_addr_t *busaddrp, void *swringp,
753 size_t stat_size)
754{
755
756
757
758 size_t hwlen = nelem * hwsize + stat_size;
759 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
760
761 if (!hwring)
762 return NULL;
763
764
765
766
767
768 BUG_ON((swsize != 0) != (swringp != NULL));
769 if (swsize) {
770 void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
771
772 if (!swring) {
773 dma_free_coherent(dev, hwlen, hwring, *busaddrp);
774 return NULL;
775 }
776 *(void **)swringp = swring;
777 }
778
779 return hwring;
780}
781
782
783
784
785
786
787
788
789static inline unsigned int sgl_len(unsigned int n)
790{
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808 n--;
809 return (3 * n) / 2 + (n & 1) + 2;
810}
811
812
813
814
815
816
817
818
819static inline unsigned int flits_to_desc(unsigned int flits)
820{
821 BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
822 return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
823}
824
825
826
827
828
829
830
831
832static inline int is_eth_imm(const struct sk_buff *skb)
833{
834
835
836
837
838
839
840
841 return false;
842}
843
844
845
846
847
848
849
850
851static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
852{
853 unsigned int flits;
854
855
856
857
858
859
860 if (is_eth_imm(skb))
861 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
862 sizeof(__be64));
863
864
865
866
867
868
869
870
871
872
873 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
874 if (skb_shinfo(skb)->gso_size)
875 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
876 sizeof(struct cpl_tx_pkt_lso_core) +
877 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
878 else
879 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
880 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
881 return flits;
882}
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
902 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
903 const dma_addr_t *addr)
904{
905 unsigned int i, len;
906 struct ulptx_sge_pair *to;
907 const struct skb_shared_info *si = skb_shinfo(skb);
908 unsigned int nfrags = si->nr_frags;
909 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
910
911 len = skb_headlen(skb) - start;
912 if (likely(len)) {
913 sgl->len0 = htonl(len);
914 sgl->addr0 = cpu_to_be64(addr[0] + start);
915 nfrags++;
916 } else {
917 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
918 sgl->addr0 = cpu_to_be64(addr[1]);
919 }
920
921 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
922 ULPTX_NSGE_V(nfrags));
923 if (likely(--nfrags == 0))
924 return;
925
926
927
928
929
930 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
931
932 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
933 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
934 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
935 to->addr[0] = cpu_to_be64(addr[i]);
936 to->addr[1] = cpu_to_be64(addr[++i]);
937 }
938 if (nfrags) {
939 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
940 to->len[1] = cpu_to_be32(0);
941 to->addr[0] = cpu_to_be64(addr[i + 1]);
942 }
943 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
944 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
945
946 if (likely(part0))
947 memcpy(sgl->sge, buf, part0);
948 part1 = (u8 *)end - (u8 *)tq->stat;
949 memcpy(tq->desc, (u8 *)buf + part0, part1);
950 end = (void *)tq->desc + part1;
951 }
952 if ((uintptr_t)end & 8)
953 *end = 0;
954}
955
956
957
958
959
960
961
962
963
964static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
965 int n)
966{
967
968
969
970 wmb();
971
972
973
974
975 if (unlikely(tq->bar2_addr == NULL)) {
976 u32 val = PIDX_V(n);
977
978 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
979 QID_V(tq->cntxt_id) | val);
980 } else {
981 u32 val = PIDX_T5_V(n);
982
983
984
985
986
987
988
989 WARN_ON(val & DBPRIO_F);
990
991
992
993
994
995 if (n == 1 && tq->bar2_qid == 0) {
996 unsigned int index = (tq->pidx
997 ? (tq->pidx - 1)
998 : (tq->size - 1));
999 __be64 *src = (__be64 *)&tq->desc[index];
1000 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1001 SGE_UDB_WCDOORBELL);
1002 unsigned int count = EQ_UNIT / sizeof(__be64);
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 while (count) {
1014
1015
1016
1017
1018 writeq((__force u64)*src, dst);
1019 src++;
1020 dst++;
1021 count--;
1022 }
1023 } else
1024 writel(val | QID_V(tq->bar2_qid),
1025 tq->bar2_addr + SGE_UDB_KDOORBELL);
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 wmb();
1038 }
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1053 void *pos)
1054{
1055 u64 *p;
1056 int left = (void *)tq->stat - pos;
1057
1058 if (likely(skb->len <= left)) {
1059 if (likely(!skb->data_len))
1060 skb_copy_from_linear_data(skb, pos, skb->len);
1061 else
1062 skb_copy_bits(skb, 0, pos, skb->len);
1063 pos += skb->len;
1064 } else {
1065 skb_copy_bits(skb, 0, pos, left);
1066 skb_copy_bits(skb, left, tq->desc, skb->len - left);
1067 pos = (void *)tq->desc + (skb->len - left);
1068 }
1069
1070
1071 p = PTR_ALIGN(pos, 8);
1072 if ((uintptr_t)p & 8)
1073 *p = 0;
1074}
1075
1076
1077
1078
1079
1080static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1081{
1082 int csum_type;
1083 const struct iphdr *iph = ip_hdr(skb);
1084
1085 if (iph->version == 4) {
1086 if (iph->protocol == IPPROTO_TCP)
1087 csum_type = TX_CSUM_TCPIP;
1088 else if (iph->protocol == IPPROTO_UDP)
1089 csum_type = TX_CSUM_UDPIP;
1090 else {
1091nocsum:
1092
1093
1094
1095
1096 return TXPKT_L4CSUM_DIS_F;
1097 }
1098 } else {
1099
1100
1101
1102 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1103
1104 if (ip6h->nexthdr == IPPROTO_TCP)
1105 csum_type = TX_CSUM_TCPIP6;
1106 else if (ip6h->nexthdr == IPPROTO_UDP)
1107 csum_type = TX_CSUM_UDPIP6;
1108 else
1109 goto nocsum;
1110 }
1111
1112 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1113 u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1114 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1115
1116 if (chip <= CHELSIO_T5)
1117 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1118 else
1119 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1120 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1121 } else {
1122 int start = skb_transport_offset(skb);
1123
1124 return TXPKT_CSUM_TYPE_V(csum_type) |
1125 TXPKT_CSUM_START_V(start) |
1126 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1127 }
1128}
1129
1130
1131
1132
1133static void txq_stop(struct sge_eth_txq *txq)
1134{
1135 netif_tx_stop_queue(txq->txq);
1136 txq->q.stops++;
1137}
1138
1139
1140
1141
1142static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1143{
1144 tq->in_use += n;
1145 tq->pidx += n;
1146 if (tq->pidx >= tq->size)
1147 tq->pidx -= tq->size;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1158{
1159 u32 wr_mid;
1160 u64 cntrl, *end;
1161 int qidx, credits, max_pkt_len;
1162 unsigned int flits, ndesc;
1163 struct adapter *adapter;
1164 struct sge_eth_txq *txq;
1165 const struct port_info *pi;
1166 struct fw_eth_tx_pkt_vm_wr *wr;
1167 struct cpl_tx_pkt_core *cpl;
1168 const struct skb_shared_info *ssi;
1169 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1170 const size_t fw_hdr_copy_len = sizeof(wr->firmware);
1171
1172
1173
1174
1175
1176
1177
1178 if (unlikely(skb->len < fw_hdr_copy_len))
1179 goto out_free;
1180
1181
1182 max_pkt_len = ETH_HLEN + dev->mtu;
1183 if (skb_vlan_tagged(skb))
1184 max_pkt_len += VLAN_HLEN;
1185 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1186 goto out_free;
1187
1188
1189
1190
1191 pi = netdev_priv(dev);
1192 adapter = pi->adapter;
1193 qidx = skb_get_queue_mapping(skb);
1194 BUG_ON(qidx >= pi->nqsets);
1195 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1196
1197 if (pi->vlan_id && !skb_vlan_tag_present(skb))
1198 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1199 pi->vlan_id);
1200
1201
1202
1203
1204
1205 reclaim_completed_tx(adapter, &txq->q, true);
1206
1207
1208
1209
1210
1211
1212 flits = calc_tx_flits(skb);
1213 ndesc = flits_to_desc(flits);
1214 credits = txq_avail(&txq->q) - ndesc;
1215
1216 if (unlikely(credits < 0)) {
1217
1218
1219
1220
1221
1222
1223 txq_stop(txq);
1224 dev_err(adapter->pdev_dev,
1225 "%s: TX ring %u full while queue awake!\n",
1226 dev->name, qidx);
1227 return NETDEV_TX_BUSY;
1228 }
1229
1230 if (!is_eth_imm(skb) &&
1231 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1232
1233
1234
1235
1236
1237 txq->mapping_err++;
1238 goto out_free;
1239 }
1240
1241 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1242 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252 txq_stop(txq);
1253 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1254 }
1255
1256
1257
1258
1259
1260
1261
1262 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1263 wr = (void *)&txq->q.desc[txq->q.pidx];
1264 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1265 wr->r3[0] = cpu_to_be32(0);
1266 wr->r3[1] = cpu_to_be32(0);
1267 skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
1268 end = (u64 *)wr + flits;
1269
1270
1271
1272
1273
1274
1275 ssi = skb_shinfo(skb);
1276 if (ssi->gso_size) {
1277 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1278 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1279 int l3hdr_len = skb_network_header_len(skb);
1280 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1281
1282 wr->op_immdlen =
1283 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1284 FW_WR_IMMDLEN_V(sizeof(*lso) +
1285 sizeof(*cpl)));
1286
1287
1288
1289 lso->lso_ctrl =
1290 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1291 LSO_FIRST_SLICE_F |
1292 LSO_LAST_SLICE_F |
1293 LSO_IPV6_V(v6) |
1294 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1295 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1296 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1297 lso->ipid_ofst = cpu_to_be16(0);
1298 lso->mss = cpu_to_be16(ssi->gso_size);
1299 lso->seqno_offset = cpu_to_be32(0);
1300 if (is_t4(adapter->params.chip))
1301 lso->len = cpu_to_be32(skb->len);
1302 else
1303 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1304
1305
1306
1307
1308
1309 cpl = (void *)(lso + 1);
1310
1311 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1312 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1313 else
1314 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1315
1316 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1317 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1318 TXPKT_IPHDR_LEN_V(l3hdr_len);
1319 txq->tso++;
1320 txq->tx_cso += ssi->gso_segs;
1321 } else {
1322 int len;
1323
1324 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1325 wr->op_immdlen =
1326 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1327 FW_WR_IMMDLEN_V(len));
1328
1329
1330
1331
1332
1333 cpl = (void *)(wr + 1);
1334 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1335 cntrl = hwcsum(adapter->params.chip, skb) |
1336 TXPKT_IPCSUM_DIS_F;
1337 txq->tx_cso++;
1338 } else
1339 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1340 }
1341
1342
1343
1344
1345
1346 if (skb_vlan_tag_present(skb)) {
1347 txq->vlan_ins++;
1348 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1349 }
1350
1351
1352
1353
1354 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1355 TXPKT_INTF_V(pi->port_id) |
1356 TXPKT_PF_V(0));
1357 cpl->pack = cpu_to_be16(0);
1358 cpl->len = cpu_to_be16(skb->len);
1359 cpl->ctrl1 = cpu_to_be64(cntrl);
1360
1361#ifdef T4_TRACE
1362 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1363 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1364 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1365#endif
1366
1367
1368
1369
1370
1371 if (is_eth_imm(skb)) {
1372
1373
1374
1375
1376 inline_tx_skb(skb, &txq->q, cpl + 1);
1377 dev_consume_skb_any(skb);
1378 } else {
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1417 struct sge_txq *tq = &txq->q;
1418 int last_desc;
1419
1420
1421
1422
1423
1424
1425
1426
1427 if (unlikely((void *)sgl == (void *)tq->stat)) {
1428 sgl = (void *)tq->desc;
1429 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1430 }
1431
1432 write_sgl(skb, tq, sgl, end, 0, addr);
1433 skb_orphan(skb);
1434
1435 last_desc = tq->pidx + ndesc - 1;
1436 if (last_desc >= tq->size)
1437 last_desc -= tq->size;
1438 tq->sdesc[last_desc].skb = skb;
1439 tq->sdesc[last_desc].sgl = sgl;
1440 }
1441
1442
1443
1444
1445
1446 txq_advance(&txq->q, ndesc);
1447 netif_trans_update(dev);
1448 ring_tx_db(adapter, &txq->q, ndesc);
1449 return NETDEV_TX_OK;
1450
1451out_free:
1452
1453
1454
1455
1456 dev_kfree_skb_any(skb);
1457 return NETDEV_TX_OK;
1458}
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469static inline void copy_frags(struct sk_buff *skb,
1470 const struct pkt_gl *gl,
1471 unsigned int offset)
1472{
1473 int i;
1474
1475
1476 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1477 gl->frags[0].offset + offset,
1478 gl->frags[0].size - offset);
1479 skb_shinfo(skb)->nr_frags = gl->nfrags;
1480 for (i = 1; i < gl->nfrags; i++)
1481 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1482 gl->frags[i].offset,
1483 gl->frags[i].size);
1484
1485
1486 get_page(gl->frags[gl->nfrags - 1].page);
1487}
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1499 unsigned int skb_len,
1500 unsigned int pull_len)
1501{
1502 struct sk_buff *skb;
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515 if (gl->tot_len <= RX_COPY_THRES) {
1516
1517 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1518 if (unlikely(!skb))
1519 goto out;
1520 __skb_put(skb, gl->tot_len);
1521 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1522 } else {
1523 skb = alloc_skb(skb_len, GFP_ATOMIC);
1524 if (unlikely(!skb))
1525 goto out;
1526 __skb_put(skb, pull_len);
1527 skb_copy_to_linear_data(skb, gl->va, pull_len);
1528
1529 copy_frags(skb, gl, pull_len);
1530 skb->len = gl->tot_len;
1531 skb->data_len = skb->len - pull_len;
1532 skb->truesize += skb->data_len;
1533 }
1534
1535out:
1536 return skb;
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546static void t4vf_pktgl_free(const struct pkt_gl *gl)
1547{
1548 int frag;
1549
1550 frag = gl->nfrags - 1;
1551 while (frag--)
1552 put_page(gl->frags[frag].page);
1553}
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1565 const struct cpl_rx_pkt *pkt)
1566{
1567 struct adapter *adapter = rxq->rspq.adapter;
1568 struct sge *s = &adapter->sge;
1569 struct port_info *pi;
1570 int ret;
1571 struct sk_buff *skb;
1572
1573 skb = napi_get_frags(&rxq->rspq.napi);
1574 if (unlikely(!skb)) {
1575 t4vf_pktgl_free(gl);
1576 rxq->stats.rx_drops++;
1577 return;
1578 }
1579
1580 copy_frags(skb, gl, s->pktshift);
1581 skb->len = gl->tot_len - s->pktshift;
1582 skb->data_len = skb->len;
1583 skb->truesize += skb->data_len;
1584 skb->ip_summed = CHECKSUM_UNNECESSARY;
1585 skb_record_rx_queue(skb, rxq->rspq.idx);
1586 pi = netdev_priv(skb->dev);
1587
1588 if (pkt->vlan_ex && !pi->vlan_id) {
1589 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1590 be16_to_cpu(pkt->vlan));
1591 rxq->stats.vlan_ex++;
1592 }
1593 ret = napi_gro_frags(&rxq->rspq.napi);
1594
1595 if (ret == GRO_HELD)
1596 rxq->stats.lro_pkts++;
1597 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1598 rxq->stats.lro_merged++;
1599 rxq->stats.pkts++;
1600 rxq->stats.rx_cso++;
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1612 const struct pkt_gl *gl)
1613{
1614 struct sk_buff *skb;
1615 const struct cpl_rx_pkt *pkt = (void *)rsp;
1616 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1617 (rspq->netdev->features & NETIF_F_RXCSUM);
1618 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1619 struct adapter *adapter = rspq->adapter;
1620 struct sge *s = &adapter->sge;
1621 struct port_info *pi;
1622
1623
1624
1625
1626
1627 if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1628 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1629 !pkt->ip_frag) {
1630 do_gro(rxq, gl, pkt);
1631 return 0;
1632 }
1633
1634
1635
1636
1637 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1638 if (unlikely(!skb)) {
1639 t4vf_pktgl_free(gl);
1640 rxq->stats.rx_drops++;
1641 return 0;
1642 }
1643 __skb_pull(skb, s->pktshift);
1644 skb->protocol = eth_type_trans(skb, rspq->netdev);
1645 skb_record_rx_queue(skb, rspq->idx);
1646 pi = netdev_priv(skb->dev);
1647 rxq->stats.pkts++;
1648
1649 if (csum_ok && !pkt->err_vec &&
1650 (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1651 if (!pkt->ip_frag) {
1652 skb->ip_summed = CHECKSUM_UNNECESSARY;
1653 rxq->stats.rx_cso++;
1654 } else if (pkt->l2info & htonl(RXF_IP_F)) {
1655 __sum16 c = (__force __sum16)pkt->csum;
1656 skb->csum = csum_unfold(c);
1657 skb->ip_summed = CHECKSUM_COMPLETE;
1658 rxq->stats.rx_cso++;
1659 }
1660 } else
1661 skb_checksum_none_assert(skb);
1662
1663 if (pkt->vlan_ex && !pi->vlan_id) {
1664 rxq->stats.vlan_ex++;
1665 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1666 be16_to_cpu(pkt->vlan));
1667 }
1668
1669 netif_receive_skb(skb);
1670
1671 return 0;
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682static inline bool is_new_response(const struct rsp_ctrl *rc,
1683 const struct sge_rspq *rspq)
1684{
1685 return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1709 int frags)
1710{
1711 struct rx_sw_desc *sdesc;
1712
1713 while (frags--) {
1714 if (fl->cidx == 0)
1715 fl->cidx = fl->size - 1;
1716 else
1717 fl->cidx--;
1718 sdesc = &fl->sdesc[fl->cidx];
1719 sdesc->page = gl->frags[frags].page;
1720 sdesc->dma_addr |= RX_UNMAPPED_BUF;
1721 fl->avail++;
1722 }
1723}
1724
1725
1726
1727
1728
1729
1730
1731static inline void rspq_next(struct sge_rspq *rspq)
1732{
1733 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1734 if (unlikely(++rspq->cidx == rspq->size)) {
1735 rspq->cidx = 0;
1736 rspq->gen ^= 1;
1737 rspq->cur_desc = rspq->desc;
1738 }
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754static int process_responses(struct sge_rspq *rspq, int budget)
1755{
1756 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1757 struct adapter *adapter = rspq->adapter;
1758 struct sge *s = &adapter->sge;
1759 int budget_left = budget;
1760
1761 while (likely(budget_left)) {
1762 int ret, rsp_type;
1763 const struct rsp_ctrl *rc;
1764
1765 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1766 if (!is_new_response(rc, rspq))
1767 break;
1768
1769
1770
1771
1772
1773 dma_rmb();
1774 rsp_type = RSPD_TYPE_G(rc->type_gen);
1775 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
1776 struct page_frag *fp;
1777 struct pkt_gl gl;
1778 const struct rx_sw_desc *sdesc;
1779 u32 bufsz, frag;
1780 u32 len = be32_to_cpu(rc->pldbuflen_qid);
1781
1782
1783
1784
1785
1786 if (len & RSPD_NEWBUF_F) {
1787
1788
1789
1790
1791
1792 if (likely(rspq->offset > 0)) {
1793 free_rx_bufs(rspq->adapter, &rxq->fl,
1794 1);
1795 rspq->offset = 0;
1796 }
1797 len = RSPD_LEN_G(len);
1798 }
1799 gl.tot_len = len;
1800
1801
1802
1803
1804 for (frag = 0, fp = gl.frags; ; frag++, fp++) {
1805 BUG_ON(frag >= MAX_SKB_FRAGS);
1806 BUG_ON(rxq->fl.avail == 0);
1807 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1808 bufsz = get_buf_size(adapter, sdesc);
1809 fp->page = sdesc->page;
1810 fp->offset = rspq->offset;
1811 fp->size = min(bufsz, len);
1812 len -= fp->size;
1813 if (!len)
1814 break;
1815 unmap_rx_buf(rspq->adapter, &rxq->fl);
1816 }
1817 gl.nfrags = frag+1;
1818
1819
1820
1821
1822
1823
1824 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1825 get_buf_addr(sdesc),
1826 fp->size, DMA_FROM_DEVICE);
1827 gl.va = (page_address(gl.frags[0].page) +
1828 gl.frags[0].offset);
1829 prefetch(gl.va);
1830
1831
1832
1833
1834
1835 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1836 if (likely(ret == 0))
1837 rspq->offset += ALIGN(fp->size, s->fl_align);
1838 else
1839 restore_rx_bufs(&gl, &rxq->fl, frag);
1840 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
1841 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1842 } else {
1843 WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
1844 ret = 0;
1845 }
1846
1847 if (unlikely(ret)) {
1848
1849
1850
1851
1852
1853 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1854 rspq->next_intr_params =
1855 QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
1856 break;
1857 }
1858
1859 rspq_next(rspq);
1860 budget_left--;
1861 }
1862
1863
1864
1865
1866
1867
1868 if (rspq->offset >= 0 &&
1869 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1870 __refill_fl(rspq->adapter, &rxq->fl);
1871 return budget - budget_left;
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885static int napi_rx_handler(struct napi_struct *napi, int budget)
1886{
1887 unsigned int intr_params;
1888 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1889 int work_done = process_responses(rspq, budget);
1890 u32 val;
1891
1892 if (likely(work_done < budget)) {
1893 napi_complete_done(napi, work_done);
1894 intr_params = rspq->next_intr_params;
1895 rspq->next_intr_params = rspq->intr_params;
1896 } else
1897 intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
1898
1899 if (unlikely(work_done == 0))
1900 rspq->unhandled_irqs++;
1901
1902 val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1903
1904
1905
1906 if (unlikely(!rspq->bar2_addr)) {
1907 t4_write_reg(rspq->adapter,
1908 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1909 val | INGRESSQID_V((u32)rspq->cntxt_id));
1910 } else {
1911 writel(val | INGRESSQID_V(rspq->bar2_qid),
1912 rspq->bar2_addr + SGE_UDB_GTS);
1913 wmb();
1914 }
1915 return work_done;
1916}
1917
1918
1919
1920
1921
1922irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1923{
1924 struct sge_rspq *rspq = cookie;
1925
1926 napi_schedule(&rspq->napi);
1927 return IRQ_HANDLED;
1928}
1929
1930
1931
1932
1933
1934static unsigned int process_intrq(struct adapter *adapter)
1935{
1936 struct sge *s = &adapter->sge;
1937 struct sge_rspq *intrq = &s->intrq;
1938 unsigned int work_done;
1939 u32 val;
1940
1941 spin_lock(&adapter->sge.intrq_lock);
1942 for (work_done = 0; ; work_done++) {
1943 const struct rsp_ctrl *rc;
1944 unsigned int qid, iq_idx;
1945 struct sge_rspq *rspq;
1946
1947
1948
1949
1950
1951 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1952 if (!is_new_response(rc, intrq))
1953 break;
1954
1955
1956
1957
1958
1959
1960 dma_rmb();
1961 if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
1962 dev_err(adapter->pdev_dev,
1963 "Unexpected INTRQ response type %d\n",
1964 RSPD_TYPE_G(rc->type_gen));
1965 continue;
1966 }
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976 qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
1977 iq_idx = IQ_IDX(s, qid);
1978 if (unlikely(iq_idx >= MAX_INGQ)) {
1979 dev_err(adapter->pdev_dev,
1980 "Ingress QID %d out of range\n", qid);
1981 continue;
1982 }
1983 rspq = s->ingr_map[iq_idx];
1984 if (unlikely(rspq == NULL)) {
1985 dev_err(adapter->pdev_dev,
1986 "Ingress QID %d RSPQ=NULL\n", qid);
1987 continue;
1988 }
1989 if (unlikely(rspq->abs_id != qid)) {
1990 dev_err(adapter->pdev_dev,
1991 "Ingress QID %d refers to RSPQ %d\n",
1992 qid, rspq->abs_id);
1993 continue;
1994 }
1995
1996
1997
1998
1999
2000
2001 napi_schedule(&rspq->napi);
2002 rspq_next(intrq);
2003 }
2004
2005 val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
2006
2007
2008
2009 if (unlikely(!intrq->bar2_addr)) {
2010 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
2011 val | INGRESSQID_V(intrq->cntxt_id));
2012 } else {
2013 writel(val | INGRESSQID_V(intrq->bar2_qid),
2014 intrq->bar2_addr + SGE_UDB_GTS);
2015 wmb();
2016 }
2017
2018 spin_unlock(&adapter->sge.intrq_lock);
2019
2020 return work_done;
2021}
2022
2023
2024
2025
2026
2027static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
2028{
2029 struct adapter *adapter = cookie;
2030
2031 process_intrq(adapter);
2032 return IRQ_HANDLED;
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2043{
2044 BUG_ON((adapter->flags &
2045 (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
2046 if (adapter->flags & CXGB4VF_USING_MSIX)
2047 return t4vf_sge_intr_msix;
2048 else
2049 return t4vf_intr_msi;
2050}
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063static void sge_rx_timer_cb(struct timer_list *t)
2064{
2065 struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
2066 struct sge *s = &adapter->sge;
2067 unsigned int i;
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2078 unsigned long m;
2079
2080 for (m = s->starving_fl[i]; m; m &= m - 1) {
2081 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2082 struct sge_fl *fl = s->egr_map[id];
2083
2084 clear_bit(id, s->starving_fl);
2085 smp_mb__after_atomic();
2086
2087
2088
2089
2090
2091
2092
2093 if (fl_starving(adapter, fl)) {
2094 struct sge_eth_rxq *rxq;
2095
2096 rxq = container_of(fl, struct sge_eth_rxq, fl);
2097 if (napi_reschedule(&rxq->rspq.napi))
2098 fl->starving++;
2099 else
2100 set_bit(id, s->starving_fl);
2101 }
2102 }
2103 }
2104
2105
2106
2107
2108 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2109}
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122static void sge_tx_timer_cb(struct timer_list *t)
2123{
2124 struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
2125 struct sge *s = &adapter->sge;
2126 unsigned int i, budget;
2127
2128 budget = MAX_TIMER_TX_RECLAIM;
2129 i = s->ethtxq_rover;
2130 do {
2131 struct sge_eth_txq *txq = &s->ethtxq[i];
2132
2133 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2134 int avail = reclaimable(&txq->q);
2135
2136 if (avail > budget)
2137 avail = budget;
2138
2139 free_tx_desc(adapter, &txq->q, avail, true);
2140 txq->q.in_use -= avail;
2141 __netif_tx_unlock(txq->txq);
2142
2143 budget -= avail;
2144 if (!budget)
2145 break;
2146 }
2147
2148 i++;
2149 if (i >= s->ethqsets)
2150 i = 0;
2151 } while (i != s->ethtxq_rover);
2152 s->ethtxq_rover = i;
2153
2154
2155
2156
2157
2158
2159 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175static void __iomem *bar2_address(struct adapter *adapter,
2176 unsigned int qid,
2177 enum t4_bar2_qtype qtype,
2178 unsigned int *pbar2_qid)
2179{
2180 u64 bar2_qoffset;
2181 int ret;
2182
2183 ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
2184 &bar2_qoffset, pbar2_qid);
2185 if (ret)
2186 return NULL;
2187
2188 return adapter->bar2 + bar2_qoffset;
2189}
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2202 bool iqasynch, struct net_device *dev,
2203 int intr_dest,
2204 struct sge_fl *fl, rspq_handler_t hnd)
2205{
2206 struct sge *s = &adapter->sge;
2207 struct port_info *pi = netdev_priv(dev);
2208 struct fw_iq_cmd cmd, rpl;
2209 int ret, iqandst, flsz = 0;
2210 int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING);
2211
2212
2213
2214
2215
2216
2217
2218
2219 if ((adapter->flags & CXGB4VF_USING_MSI) &&
2220 rspq != &adapter->sge.intrq) {
2221 iqandst = SGE_INTRDST_IQ;
2222 intr_dest = adapter->sge.intrq.abs_id;
2223 } else
2224 iqandst = SGE_INTRDST_PCI;
2225
2226
2227
2228
2229
2230
2231
2232 rspq->size = roundup(rspq->size, 16);
2233 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2234 0, &rspq->phys_addr, NULL, 0);
2235 if (!rspq->desc)
2236 return -ENOMEM;
2237
2238
2239
2240
2241
2242
2243
2244
2245 memset(&cmd, 0, sizeof(cmd));
2246 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2247 FW_CMD_REQUEST_F |
2248 FW_CMD_WRITE_F |
2249 FW_CMD_EXEC_F);
2250 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
2251 FW_IQ_CMD_IQSTART_F |
2252 FW_LEN16(cmd));
2253 cmd.type_to_iqandstindex =
2254 cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2255 FW_IQ_CMD_IQASYNCH_V(iqasynch) |
2256 FW_IQ_CMD_VIID_V(pi->viid) |
2257 FW_IQ_CMD_IQANDST_V(iqandst) |
2258 FW_IQ_CMD_IQANUS_V(1) |
2259 FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
2260 FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
2261 cmd.iqdroprss_to_iqesize =
2262 cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
2263 FW_IQ_CMD_IQGTSMODE_F |
2264 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
2265 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
2266 cmd.iqsize = cpu_to_be16(rspq->size);
2267 cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2268
2269 if (fl) {
2270 unsigned int chip_ver =
2271 CHELSIO_CHIP_VERSION(adapter->params.chip);
2272
2273
2274
2275
2276
2277
2278
2279
2280 if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
2281 fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
2282 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2283 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2284 sizeof(__be64), sizeof(struct rx_sw_desc),
2285 &fl->addr, &fl->sdesc, s->stat_len);
2286 if (!fl->desc) {
2287 ret = -ENOMEM;
2288 goto err;
2289 }
2290
2291
2292
2293
2294
2295
2296 flsz = (fl->size / FL_PER_EQ_UNIT +
2297 s->stat_len / EQ_UNIT);
2298
2299
2300
2301
2302
2303 cmd.iqns_to_fl0congen =
2304 cpu_to_be32(
2305 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2306 FW_IQ_CMD_FL0PACKEN_F |
2307 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
2308 FW_IQ_CMD_FL0DATARO_V(relaxed) |
2309 FW_IQ_CMD_FL0PADEN_F);
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319 cmd.fl0dcaen_to_fl0cidxfthresh =
2320 cpu_to_be16(
2321 FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5
2322 ? FETCHBURSTMIN_128B_X
2323 : FETCHBURSTMIN_64B_T6_X) |
2324 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
2325 FETCHBURSTMAX_512B_X :
2326 FETCHBURSTMAX_256B_X));
2327 cmd.fl0size = cpu_to_be16(flsz);
2328 cmd.fl0addr = cpu_to_be64(fl->addr);
2329 }
2330
2331
2332
2333
2334
2335 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2336 if (ret)
2337 goto err;
2338
2339 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2340 rspq->cur_desc = rspq->desc;
2341 rspq->cidx = 0;
2342 rspq->gen = 1;
2343 rspq->next_intr_params = rspq->intr_params;
2344 rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2345 rspq->bar2_addr = bar2_address(adapter,
2346 rspq->cntxt_id,
2347 T4_BAR2_QTYPE_INGRESS,
2348 &rspq->bar2_qid);
2349 rspq->abs_id = be16_to_cpu(rpl.physiqid);
2350 rspq->size--;
2351 rspq->adapter = adapter;
2352 rspq->netdev = dev;
2353 rspq->handler = hnd;
2354
2355
2356 rspq->offset = fl ? 0 : -1;
2357
2358 if (fl) {
2359 fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2360 fl->avail = 0;
2361 fl->pend_cred = 0;
2362 fl->pidx = 0;
2363 fl->cidx = 0;
2364 fl->alloc_failed = 0;
2365 fl->large_alloc_failed = 0;
2366 fl->starving = 0;
2367
2368
2369
2370
2371 fl->bar2_addr = bar2_address(adapter,
2372 fl->cntxt_id,
2373 T4_BAR2_QTYPE_EGRESS,
2374 &fl->bar2_qid);
2375
2376 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2377 }
2378
2379 return 0;
2380
2381err:
2382
2383
2384
2385
2386 if (rspq->desc) {
2387 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2388 rspq->desc, rspq->phys_addr);
2389 rspq->desc = NULL;
2390 }
2391 if (fl && fl->desc) {
2392 kfree(fl->sdesc);
2393 fl->sdesc = NULL;
2394 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2395 fl->desc, fl->addr);
2396 fl->desc = NULL;
2397 }
2398 return ret;
2399}
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2411 struct net_device *dev, struct netdev_queue *devq,
2412 unsigned int iqid)
2413{
2414 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
2415 struct port_info *pi = netdev_priv(dev);
2416 struct fw_eq_eth_cmd cmd, rpl;
2417 struct sge *s = &adapter->sge;
2418 int ret, nentries;
2419
2420
2421
2422
2423
2424 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2425
2426
2427
2428
2429
2430 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2431 sizeof(struct tx_desc),
2432 sizeof(struct tx_sw_desc),
2433 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2434 if (!txq->q.desc)
2435 return -ENOMEM;
2436
2437
2438
2439
2440
2441
2442
2443
2444 memset(&cmd, 0, sizeof(cmd));
2445 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2446 FW_CMD_REQUEST_F |
2447 FW_CMD_WRITE_F |
2448 FW_CMD_EXEC_F);
2449 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
2450 FW_EQ_ETH_CMD_EQSTART_F |
2451 FW_LEN16(cmd));
2452 cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2453 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2454 cmd.fetchszm_to_iqid =
2455 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
2456 FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
2457 FW_EQ_ETH_CMD_IQID_V(iqid));
2458 cmd.dcaen_to_eqsize =
2459 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
2460 ? FETCHBURSTMIN_64B_X
2461 : FETCHBURSTMIN_64B_T6_X) |
2462 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2463 FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2464 CIDXFLUSHTHRESH_32_X) |
2465 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2466 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2467
2468
2469
2470
2471
2472 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2473 if (ret) {
2474
2475
2476
2477
2478 kfree(txq->q.sdesc);
2479 txq->q.sdesc = NULL;
2480 dma_free_coherent(adapter->pdev_dev,
2481 nentries * sizeof(struct tx_desc),
2482 txq->q.desc, txq->q.phys_addr);
2483 txq->q.desc = NULL;
2484 return ret;
2485 }
2486
2487 txq->q.in_use = 0;
2488 txq->q.cidx = 0;
2489 txq->q.pidx = 0;
2490 txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2491 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
2492 txq->q.bar2_addr = bar2_address(adapter,
2493 txq->q.cntxt_id,
2494 T4_BAR2_QTYPE_EGRESS,
2495 &txq->q.bar2_qid);
2496 txq->q.abs_id =
2497 FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
2498 txq->txq = devq;
2499 txq->tso = 0;
2500 txq->tx_cso = 0;
2501 txq->vlan_ins = 0;
2502 txq->q.stops = 0;
2503 txq->q.restarts = 0;
2504 txq->mapping_err = 0;
2505 return 0;
2506}
2507
2508
2509
2510
2511static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2512{
2513 struct sge *s = &adapter->sge;
2514
2515 dma_free_coherent(adapter->pdev_dev,
2516 tq->size * sizeof(*tq->desc) + s->stat_len,
2517 tq->desc, tq->phys_addr);
2518 tq->cntxt_id = 0;
2519 tq->sdesc = NULL;
2520 tq->desc = NULL;
2521}
2522
2523
2524
2525
2526
2527static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2528 struct sge_fl *fl)
2529{
2530 struct sge *s = &adapter->sge;
2531 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2532
2533 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2534 rspq->cntxt_id, flid, 0xffff);
2535 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2536 rspq->desc, rspq->phys_addr);
2537 netif_napi_del(&rspq->napi);
2538 rspq->netdev = NULL;
2539 rspq->cntxt_id = 0;
2540 rspq->abs_id = 0;
2541 rspq->desc = NULL;
2542
2543 if (fl) {
2544 free_rx_bufs(adapter, fl, fl->avail);
2545 dma_free_coherent(adapter->pdev_dev,
2546 fl->size * sizeof(*fl->desc) + s->stat_len,
2547 fl->desc, fl->addr);
2548 kfree(fl->sdesc);
2549 fl->sdesc = NULL;
2550 fl->cntxt_id = 0;
2551 fl->desc = NULL;
2552 }
2553}
2554
2555
2556
2557
2558
2559
2560
2561void t4vf_free_sge_resources(struct adapter *adapter)
2562{
2563 struct sge *s = &adapter->sge;
2564 struct sge_eth_rxq *rxq = s->ethrxq;
2565 struct sge_eth_txq *txq = s->ethtxq;
2566 struct sge_rspq *evtq = &s->fw_evtq;
2567 struct sge_rspq *intrq = &s->intrq;
2568 int qs;
2569
2570 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2571 if (rxq->rspq.desc)
2572 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2573 if (txq->q.desc) {
2574 t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2575 free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2576 kfree(txq->q.sdesc);
2577 free_txq(adapter, &txq->q);
2578 }
2579 }
2580 if (evtq->desc)
2581 free_rspq_fl(adapter, evtq, NULL);
2582 if (intrq->desc)
2583 free_rspq_fl(adapter, intrq, NULL);
2584}
2585
2586
2587
2588
2589
2590
2591
2592void t4vf_sge_start(struct adapter *adapter)
2593{
2594 adapter->sge.ethtxq_rover = 0;
2595 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2596 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2597}
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607void t4vf_sge_stop(struct adapter *adapter)
2608{
2609 struct sge *s = &adapter->sge;
2610
2611 if (s->rx_timer.function)
2612 del_timer_sync(&s->rx_timer);
2613 if (s->tx_timer.function)
2614 del_timer_sync(&s->tx_timer);
2615}
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626int t4vf_sge_init(struct adapter *adapter)
2627{
2628 struct sge_params *sge_params = &adapter->params.sge;
2629 u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
2630 u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
2631 struct sge *s = &adapter->sge;
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642 if (fl_large_pg <= fl_small_pg)
2643 fl_large_pg = 0;
2644
2645
2646
2647
2648 if (fl_small_pg != PAGE_SIZE ||
2649 (fl_large_pg & (fl_large_pg - 1)) != 0) {
2650 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2651 fl_small_pg, fl_large_pg);
2652 return -EINVAL;
2653 }
2654 if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
2655 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2656 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2657 return -EINVAL;
2658 }
2659
2660
2661
2662
2663 if (fl_large_pg)
2664 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2665 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2666 ? 128 : 64);
2667 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2668 s->fl_align = t4vf_fl_pkt_align(adapter);
2669
2670
2671
2672
2673
2674
2675
2676
2677 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
2678 case CHELSIO_T4:
2679 s->fl_starve_thres =
2680 EGRTHRESHOLD_G(sge_params->sge_congestion_control);
2681 break;
2682 case CHELSIO_T5:
2683 s->fl_starve_thres =
2684 EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2685 break;
2686 case CHELSIO_T6:
2687 default:
2688 s->fl_starve_thres =
2689 T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2690 break;
2691 }
2692 s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2693
2694
2695
2696
2697 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
2698 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
2699
2700
2701
2702
2703 spin_lock_init(&s->intrq_lock);
2704
2705 return 0;
2706}
2707