1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/ip.h>
41#include <net/ipv6.h>
42#include <net/tcp.h>
43#include <linux/dma-mapping.h>
44#include <linux/prefetch.h>
45
46#include "t4vf_common.h"
47#include "t4vf_defs.h"
48
49#include "../cxgb4/t4_regs.h"
50#include "../cxgb4/t4_values.h"
51#include "../cxgb4/t4fw_api.h"
52#include "../cxgb4/t4_msg.h"
53
54
55
56
57enum {
58
59
60
61
62
63
64
65 EQ_UNIT = SGE_EQ_IDXSIZE,
66 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
67 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
68
69
70
71
72
73
74
75 MAX_TX_RECLAIM = 16,
76
77
78
79
80
81 MAX_RX_REFILL = 16,
82
83
84
85
86
87
88 RX_QCHECK_PERIOD = (HZ / 2),
89
90
91
92
93
94 TX_QCHECK_PERIOD = (HZ / 2),
95 MAX_TIMER_TX_RECLAIM = 100,
96
97
98
99
100
101
102
103
104
105 ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
106 ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
107 ((ETHTXQ_MAX_FRAGS-1) & 1) +
108 2),
109 ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
110 sizeof(struct cpl_tx_pkt_lso_core) +
111 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
112 ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
113
114 ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
115
116
117
118
119
120
121
122 MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
123
124
125
126
127 MAX_CTRL_WR_LEN = 256,
128
129
130
131
132
133 MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
134 ? MAX_IMM_TX_PKT_LEN
135 : MAX_CTRL_WR_LEN),
136
137
138
139
140
141
142
143 RX_COPY_THRES = 256,
144 RX_PULL_LEN = 128,
145
146
147
148
149
150
151 RX_SKB_LEN = 512,
152};
153
154
155
156
157struct tx_sw_desc {
158 struct sk_buff *skb;
159 struct ulptx_sgl *sgl;
160};
161
162
163
164
165
166
167
168struct rx_sw_desc {
169 struct page *page;
170 dma_addr_t dma_addr;
171
172};
173
174
175
176
177
178
179
180
181
182
183enum {
184 RX_LARGE_BUF = 1 << 0,
185 RX_UNMAPPED_BUF = 1 << 1,
186};
187
188
189
190
191
192
193
194
195static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
196{
197 return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
198}
199
200
201
202
203
204
205
206
207static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
208{
209 return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
210}
211
212
213
214
215
216
217
218static inline int need_skb_unmap(void)
219{
220#ifdef CONFIG_NEED_DMA_MAP_STATE
221 return 1;
222#else
223 return 0;
224#endif
225}
226
227
228
229
230
231
232
233static inline unsigned int txq_avail(const struct sge_txq *tq)
234{
235 return tq->size - 1 - tq->in_use;
236}
237
238
239
240
241
242
243
244
245
246
247static inline unsigned int fl_cap(const struct sge_fl *fl)
248{
249 return fl->size - FL_PER_EQ_UNIT;
250}
251
252
253
254
255
256
257
258
259
260
261static inline bool fl_starving(const struct adapter *adapter,
262 const struct sge_fl *fl)
263{
264 const struct sge *s = &adapter->sge;
265
266 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
267}
268
269
270
271
272
273
274
275
276
277static int map_skb(struct device *dev, const struct sk_buff *skb,
278 dma_addr_t *addr)
279{
280 const skb_frag_t *fp, *end;
281 const struct skb_shared_info *si;
282
283 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
284 if (dma_mapping_error(dev, *addr))
285 goto out_err;
286
287 si = skb_shinfo(skb);
288 end = &si->frags[si->nr_frags];
289 for (fp = si->frags; fp < end; fp++) {
290 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
291 DMA_TO_DEVICE);
292 if (dma_mapping_error(dev, *addr))
293 goto unwind;
294 }
295 return 0;
296
297unwind:
298 while (fp-- > si->frags)
299 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
300 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
301
302out_err:
303 return -ENOMEM;
304}
305
306static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
307 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
308{
309 const struct ulptx_sge_pair *p;
310 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
311
312 if (likely(skb_headlen(skb)))
313 dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
314 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
315 else {
316 dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
317 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
318 nfrags--;
319 }
320
321
322
323
324
325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
326 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
327unmap:
328 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
329 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
330 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
331 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
332 p++;
333 } else if ((u8 *)p == (u8 *)tq->stat) {
334 p = (const struct ulptx_sge_pair *)tq->desc;
335 goto unmap;
336 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
337 const __be64 *addr = (const __be64 *)tq->desc;
338
339 dma_unmap_page(dev, be64_to_cpu(addr[0]),
340 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
341 dma_unmap_page(dev, be64_to_cpu(addr[1]),
342 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
343 p = (const struct ulptx_sge_pair *)&addr[2];
344 } else {
345 const __be64 *addr = (const __be64 *)tq->desc;
346
347 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
348 be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
349 dma_unmap_page(dev, be64_to_cpu(addr[0]),
350 be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
351 p = (const struct ulptx_sge_pair *)&addr[1];
352 }
353 }
354 if (nfrags) {
355 __be64 addr;
356
357 if ((u8 *)p == (u8 *)tq->stat)
358 p = (const struct ulptx_sge_pair *)tq->desc;
359 addr = ((u8 *)p + 16 <= (u8 *)tq->stat
360 ? p->addr[0]
361 : *(const __be64 *)tq->desc);
362 dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
363 DMA_TO_DEVICE);
364 }
365}
366
367
368
369
370
371
372
373
374
375
376
377static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
378 unsigned int n, bool unmap)
379{
380 struct tx_sw_desc *sdesc;
381 unsigned int cidx = tq->cidx;
382 struct device *dev = adapter->pdev_dev;
383
384 const int need_unmap = need_skb_unmap() && unmap;
385
386 sdesc = &tq->sdesc[cidx];
387 while (n--) {
388
389
390
391
392 if (sdesc->skb) {
393 if (need_unmap)
394 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
395 dev_consume_skb_any(sdesc->skb);
396 sdesc->skb = NULL;
397 }
398
399 sdesc++;
400 if (++cidx == tq->size) {
401 cidx = 0;
402 sdesc = tq->sdesc;
403 }
404 }
405 tq->cidx = cidx;
406}
407
408
409
410
411static inline int reclaimable(const struct sge_txq *tq)
412{
413 int hw_cidx = be16_to_cpu(tq->stat->cidx);
414 int reclaimable = hw_cidx - tq->cidx;
415 if (reclaimable < 0)
416 reclaimable += tq->size;
417 return reclaimable;
418}
419
420
421
422
423
424
425
426
427
428
429
430static inline void reclaim_completed_tx(struct adapter *adapter,
431 struct sge_txq *tq,
432 bool unmap)
433{
434 int avail = reclaimable(tq);
435
436 if (avail) {
437
438
439
440
441 if (avail > MAX_TX_RECLAIM)
442 avail = MAX_TX_RECLAIM;
443
444 free_tx_desc(adapter, tq, avail, unmap);
445 tq->in_use -= avail;
446 }
447}
448
449
450
451
452
453
454static inline int get_buf_size(const struct adapter *adapter,
455 const struct rx_sw_desc *sdesc)
456{
457 const struct sge *s = &adapter->sge;
458
459 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
460 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
461}
462
463
464
465
466
467
468
469
470
471
472
473static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
474{
475 while (n--) {
476 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
477
478 if (is_buf_mapped(sdesc))
479 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
480 get_buf_size(adapter, sdesc),
481 PCI_DMA_FROMDEVICE);
482 put_page(sdesc->page);
483 sdesc->page = NULL;
484 if (++fl->cidx == fl->size)
485 fl->cidx = 0;
486 fl->avail--;
487 }
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
504{
505 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
506
507 if (is_buf_mapped(sdesc))
508 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
509 get_buf_size(adapter, sdesc),
510 PCI_DMA_FROMDEVICE);
511 sdesc->page = NULL;
512 if (++fl->cidx == fl->size)
513 fl->cidx = 0;
514 fl->avail--;
515}
516
517
518
519
520
521
522
523
524
525static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
526{
527 u32 val = adapter->params.arch.sge_fl_db;
528
529
530
531
532
533 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
534 if (is_t4(adapter->params.chip))
535 val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
536 else
537 val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
538
539
540
541
542 wmb();
543
544
545
546
547
548 if (unlikely(fl->bar2_addr == NULL)) {
549 t4_write_reg(adapter,
550 T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
551 QID_V(fl->cntxt_id) | val);
552 } else {
553 writel(val | QID_V(fl->bar2_qid),
554 fl->bar2_addr + SGE_UDB_KDOORBELL);
555
556
557
558
559 wmb();
560 }
561 fl->pend_cred %= FL_PER_EQ_UNIT;
562 }
563}
564
565
566
567
568
569
570
571static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
572 dma_addr_t dma_addr)
573{
574 sdesc->page = page;
575 sdesc->dma_addr = dma_addr;
576}
577
578
579
580
581#define POISON_BUF_VAL -1
582
583static inline void poison_buf(struct page *page, size_t sz)
584{
585#if POISON_BUF_VAL >= 0
586 memset(page_address(page), POISON_BUF_VAL, sz);
587#endif
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
605 int n, gfp_t gfp)
606{
607 struct sge *s = &adapter->sge;
608 struct page *page;
609 dma_addr_t dma_addr;
610 unsigned int cred = fl->avail;
611 __be64 *d = &fl->desc[fl->pidx];
612 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
613
614
615
616
617
618
619 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
620
621 gfp |= __GFP_NOWARN;
622
623
624
625
626
627
628
629 if (s->fl_pg_order == 0)
630 goto alloc_small_pages;
631
632 while (n) {
633 page = __dev_alloc_pages(gfp, s->fl_pg_order);
634 if (unlikely(!page)) {
635
636
637
638
639
640 fl->large_alloc_failed++;
641 break;
642 }
643 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
644
645 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
646 PAGE_SIZE << s->fl_pg_order,
647 PCI_DMA_FROMDEVICE);
648 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
649
650
651
652
653
654
655
656
657 __free_pages(page, s->fl_pg_order);
658 goto out;
659 }
660 dma_addr |= RX_LARGE_BUF;
661 *d++ = cpu_to_be64(dma_addr);
662
663 set_rx_sw_desc(sdesc, page, dma_addr);
664 sdesc++;
665
666 fl->avail++;
667 if (++fl->pidx == fl->size) {
668 fl->pidx = 0;
669 sdesc = fl->sdesc;
670 d = fl->desc;
671 }
672 n--;
673 }
674
675alloc_small_pages:
676 while (n--) {
677 page = __dev_alloc_page(gfp);
678 if (unlikely(!page)) {
679 fl->alloc_failed++;
680 break;
681 }
682 poison_buf(page, PAGE_SIZE);
683
684 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
685 PCI_DMA_FROMDEVICE);
686 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
687 put_page(page);
688 break;
689 }
690 *d++ = cpu_to_be64(dma_addr);
691
692 set_rx_sw_desc(sdesc, page, dma_addr);
693 sdesc++;
694
695 fl->avail++;
696 if (++fl->pidx == fl->size) {
697 fl->pidx = 0;
698 sdesc = fl->sdesc;
699 d = fl->desc;
700 }
701 }
702
703out:
704
705
706
707
708
709 cred = fl->avail - cred;
710 fl->pend_cred += cred;
711 ring_fl_db(adapter, fl);
712
713 if (unlikely(fl_starving(adapter, fl))) {
714 smp_wmb();
715 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
716 }
717
718 return cred;
719}
720
721
722
723
724
725static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
726{
727 refill_fl(adapter, fl,
728 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
729 GFP_ATOMIC);
730}
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
752 size_t swsize, dma_addr_t *busaddrp, void *swringp,
753 size_t stat_size)
754{
755
756
757
758 size_t hwlen = nelem * hwsize + stat_size;
759 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
760
761 if (!hwring)
762 return NULL;
763
764
765
766
767
768 BUG_ON((swsize != 0) != (swringp != NULL));
769 if (swsize) {
770 void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
771
772 if (!swring) {
773 dma_free_coherent(dev, hwlen, hwring, *busaddrp);
774 return NULL;
775 }
776 *(void **)swringp = swring;
777 }
778
779 return hwring;
780}
781
782
783
784
785
786
787
788
789static inline unsigned int sgl_len(unsigned int n)
790{
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808 n--;
809 return (3 * n) / 2 + (n & 1) + 2;
810}
811
812
813
814
815
816
817
818
819static inline unsigned int flits_to_desc(unsigned int flits)
820{
821 BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
822 return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
823}
824
825
826
827
828
829
830
831
832static inline int is_eth_imm(const struct sk_buff *skb)
833{
834
835
836
837
838
839
840
841 return false;
842}
843
844
845
846
847
848
849
850
851static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
852{
853 unsigned int flits;
854
855
856
857
858
859
860 if (is_eth_imm(skb))
861 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
862 sizeof(__be64));
863
864
865
866
867
868
869
870
871
872
873 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
874 if (skb_shinfo(skb)->gso_size)
875 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
876 sizeof(struct cpl_tx_pkt_lso_core) +
877 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
878 else
879 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
880 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
881 return flits;
882}
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
902 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
903 const dma_addr_t *addr)
904{
905 unsigned int i, len;
906 struct ulptx_sge_pair *to;
907 const struct skb_shared_info *si = skb_shinfo(skb);
908 unsigned int nfrags = si->nr_frags;
909 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
910
911 len = skb_headlen(skb) - start;
912 if (likely(len)) {
913 sgl->len0 = htonl(len);
914 sgl->addr0 = cpu_to_be64(addr[0] + start);
915 nfrags++;
916 } else {
917 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
918 sgl->addr0 = cpu_to_be64(addr[1]);
919 }
920
921 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
922 ULPTX_NSGE_V(nfrags));
923 if (likely(--nfrags == 0))
924 return;
925
926
927
928
929
930 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
931
932 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
933 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
934 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
935 to->addr[0] = cpu_to_be64(addr[i]);
936 to->addr[1] = cpu_to_be64(addr[++i]);
937 }
938 if (nfrags) {
939 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
940 to->len[1] = cpu_to_be32(0);
941 to->addr[0] = cpu_to_be64(addr[i + 1]);
942 }
943 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
944 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
945
946 if (likely(part0))
947 memcpy(sgl->sge, buf, part0);
948 part1 = (u8 *)end - (u8 *)tq->stat;
949 memcpy(tq->desc, (u8 *)buf + part0, part1);
950 end = (void *)tq->desc + part1;
951 }
952 if ((uintptr_t)end & 8)
953 *end = 0;
954}
955
956
957
958
959
960
961
962
963
964static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
965 int n)
966{
967
968
969
970 wmb();
971
972
973
974
975 if (unlikely(tq->bar2_addr == NULL)) {
976 u32 val = PIDX_V(n);
977
978 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
979 QID_V(tq->cntxt_id) | val);
980 } else {
981 u32 val = PIDX_T5_V(n);
982
983
984
985
986
987
988
989 WARN_ON(val & DBPRIO_F);
990
991
992
993
994
995 if (n == 1 && tq->bar2_qid == 0) {
996 unsigned int index = (tq->pidx
997 ? (tq->pidx - 1)
998 : (tq->size - 1));
999 __be64 *src = (__be64 *)&tq->desc[index];
1000 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1001 SGE_UDB_WCDOORBELL);
1002 unsigned int count = EQ_UNIT / sizeof(__be64);
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 while (count) {
1014
1015
1016
1017
1018 writeq((__force u64)*src, dst);
1019 src++;
1020 dst++;
1021 count--;
1022 }
1023 } else
1024 writel(val | QID_V(tq->bar2_qid),
1025 tq->bar2_addr + SGE_UDB_KDOORBELL);
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 wmb();
1038 }
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1053 void *pos)
1054{
1055 u64 *p;
1056 int left = (void *)tq->stat - pos;
1057
1058 if (likely(skb->len <= left)) {
1059 if (likely(!skb->data_len))
1060 skb_copy_from_linear_data(skb, pos, skb->len);
1061 else
1062 skb_copy_bits(skb, 0, pos, skb->len);
1063 pos += skb->len;
1064 } else {
1065 skb_copy_bits(skb, 0, pos, left);
1066 skb_copy_bits(skb, left, tq->desc, skb->len - left);
1067 pos = (void *)tq->desc + (skb->len - left);
1068 }
1069
1070
1071 p = PTR_ALIGN(pos, 8);
1072 if ((uintptr_t)p & 8)
1073 *p = 0;
1074}
1075
1076
1077
1078
1079
1080static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1081{
1082 int csum_type;
1083 const struct iphdr *iph = ip_hdr(skb);
1084
1085 if (iph->version == 4) {
1086 if (iph->protocol == IPPROTO_TCP)
1087 csum_type = TX_CSUM_TCPIP;
1088 else if (iph->protocol == IPPROTO_UDP)
1089 csum_type = TX_CSUM_UDPIP;
1090 else {
1091nocsum:
1092
1093
1094
1095
1096 return TXPKT_L4CSUM_DIS_F;
1097 }
1098 } else {
1099
1100
1101
1102 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1103
1104 if (ip6h->nexthdr == IPPROTO_TCP)
1105 csum_type = TX_CSUM_TCPIP6;
1106 else if (ip6h->nexthdr == IPPROTO_UDP)
1107 csum_type = TX_CSUM_UDPIP6;
1108 else
1109 goto nocsum;
1110 }
1111
1112 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1113 u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1114 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1115
1116 if (chip <= CHELSIO_T5)
1117 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1118 else
1119 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1120 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1121 } else {
1122 int start = skb_transport_offset(skb);
1123
1124 return TXPKT_CSUM_TYPE_V(csum_type) |
1125 TXPKT_CSUM_START_V(start) |
1126 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1127 }
1128}
1129
1130
1131
1132
1133static void txq_stop(struct sge_eth_txq *txq)
1134{
1135 netif_tx_stop_queue(txq->txq);
1136 txq->q.stops++;
1137}
1138
1139
1140
1141
1142static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1143{
1144 tq->in_use += n;
1145 tq->pidx += n;
1146 if (tq->pidx >= tq->size)
1147 tq->pidx -= tq->size;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1158{
1159 u32 wr_mid;
1160 u64 cntrl, *end;
1161 int qidx, credits, max_pkt_len;
1162 unsigned int flits, ndesc;
1163 struct adapter *adapter;
1164 struct sge_eth_txq *txq;
1165 const struct port_info *pi;
1166 struct fw_eth_tx_pkt_vm_wr *wr;
1167 struct cpl_tx_pkt_core *cpl;
1168 const struct skb_shared_info *ssi;
1169 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1170 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
1171 sizeof(wr->ethmacsrc) +
1172 sizeof(wr->ethtype) +
1173 sizeof(wr->vlantci));
1174
1175
1176
1177
1178
1179
1180
1181 if (unlikely(skb->len < fw_hdr_copy_len))
1182 goto out_free;
1183
1184
1185 max_pkt_len = ETH_HLEN + dev->mtu;
1186 if (skb_vlan_tagged(skb))
1187 max_pkt_len += VLAN_HLEN;
1188 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1189 goto out_free;
1190
1191
1192
1193
1194 pi = netdev_priv(dev);
1195 adapter = pi->adapter;
1196 qidx = skb_get_queue_mapping(skb);
1197 BUG_ON(qidx >= pi->nqsets);
1198 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1199
1200 if (pi->vlan_id && !skb_vlan_tag_present(skb))
1201 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1202 pi->vlan_id);
1203
1204
1205
1206
1207
1208 reclaim_completed_tx(adapter, &txq->q, true);
1209
1210
1211
1212
1213
1214
1215 flits = calc_tx_flits(skb);
1216 ndesc = flits_to_desc(flits);
1217 credits = txq_avail(&txq->q) - ndesc;
1218
1219 if (unlikely(credits < 0)) {
1220
1221
1222
1223
1224
1225
1226 txq_stop(txq);
1227 dev_err(adapter->pdev_dev,
1228 "%s: TX ring %u full while queue awake!\n",
1229 dev->name, qidx);
1230 return NETDEV_TX_BUSY;
1231 }
1232
1233 if (!is_eth_imm(skb) &&
1234 unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1235
1236
1237
1238
1239
1240 txq->mapping_err++;
1241 goto out_free;
1242 }
1243
1244 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1245 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 txq_stop(txq);
1256 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1257 }
1258
1259
1260
1261
1262
1263
1264
1265 BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1266 wr = (void *)&txq->q.desc[txq->q.pidx];
1267 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1268 wr->r3[0] = cpu_to_be32(0);
1269 wr->r3[1] = cpu_to_be32(0);
1270 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1271 end = (u64 *)wr + flits;
1272
1273
1274
1275
1276
1277
1278 ssi = skb_shinfo(skb);
1279 if (ssi->gso_size) {
1280 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1281 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1282 int l3hdr_len = skb_network_header_len(skb);
1283 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1284
1285 wr->op_immdlen =
1286 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1287 FW_WR_IMMDLEN_V(sizeof(*lso) +
1288 sizeof(*cpl)));
1289
1290
1291
1292 lso->lso_ctrl =
1293 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1294 LSO_FIRST_SLICE_F |
1295 LSO_LAST_SLICE_F |
1296 LSO_IPV6_V(v6) |
1297 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1298 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1299 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1300 lso->ipid_ofst = cpu_to_be16(0);
1301 lso->mss = cpu_to_be16(ssi->gso_size);
1302 lso->seqno_offset = cpu_to_be32(0);
1303 if (is_t4(adapter->params.chip))
1304 lso->len = cpu_to_be32(skb->len);
1305 else
1306 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1307
1308
1309
1310
1311
1312 cpl = (void *)(lso + 1);
1313
1314 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1315 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1316 else
1317 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1318
1319 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1320 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1321 TXPKT_IPHDR_LEN_V(l3hdr_len);
1322 txq->tso++;
1323 txq->tx_cso += ssi->gso_segs;
1324 } else {
1325 int len;
1326
1327 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1328 wr->op_immdlen =
1329 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1330 FW_WR_IMMDLEN_V(len));
1331
1332
1333
1334
1335
1336 cpl = (void *)(wr + 1);
1337 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1338 cntrl = hwcsum(adapter->params.chip, skb) |
1339 TXPKT_IPCSUM_DIS_F;
1340 txq->tx_cso++;
1341 } else
1342 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1343 }
1344
1345
1346
1347
1348
1349 if (skb_vlan_tag_present(skb)) {
1350 txq->vlan_ins++;
1351 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1352 }
1353
1354
1355
1356
1357 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1358 TXPKT_INTF_V(pi->port_id) |
1359 TXPKT_PF_V(0));
1360 cpl->pack = cpu_to_be16(0);
1361 cpl->len = cpu_to_be16(skb->len);
1362 cpl->ctrl1 = cpu_to_be64(cntrl);
1363
1364#ifdef T4_TRACE
1365 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1366 "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1367 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1368#endif
1369
1370
1371
1372
1373
1374 if (is_eth_imm(skb)) {
1375
1376
1377
1378
1379 inline_tx_skb(skb, &txq->q, cpl + 1);
1380 dev_consume_skb_any(skb);
1381 } else {
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1420 struct sge_txq *tq = &txq->q;
1421 int last_desc;
1422
1423
1424
1425
1426
1427
1428
1429
1430 if (unlikely((void *)sgl == (void *)tq->stat)) {
1431 sgl = (void *)tq->desc;
1432 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1433 }
1434
1435 write_sgl(skb, tq, sgl, end, 0, addr);
1436 skb_orphan(skb);
1437
1438 last_desc = tq->pidx + ndesc - 1;
1439 if (last_desc >= tq->size)
1440 last_desc -= tq->size;
1441 tq->sdesc[last_desc].skb = skb;
1442 tq->sdesc[last_desc].sgl = sgl;
1443 }
1444
1445
1446
1447
1448
1449 txq_advance(&txq->q, ndesc);
1450 netif_trans_update(dev);
1451 ring_tx_db(adapter, &txq->q, ndesc);
1452 return NETDEV_TX_OK;
1453
1454out_free:
1455
1456
1457
1458
1459 dev_kfree_skb_any(skb);
1460 return NETDEV_TX_OK;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472static inline void copy_frags(struct sk_buff *skb,
1473 const struct pkt_gl *gl,
1474 unsigned int offset)
1475{
1476 int i;
1477
1478
1479 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1480 gl->frags[0].offset + offset,
1481 gl->frags[0].size - offset);
1482 skb_shinfo(skb)->nr_frags = gl->nfrags;
1483 for (i = 1; i < gl->nfrags; i++)
1484 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1485 gl->frags[i].offset,
1486 gl->frags[i].size);
1487
1488
1489 get_page(gl->frags[gl->nfrags - 1].page);
1490}
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1502 unsigned int skb_len,
1503 unsigned int pull_len)
1504{
1505 struct sk_buff *skb;
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518 if (gl->tot_len <= RX_COPY_THRES) {
1519
1520 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1521 if (unlikely(!skb))
1522 goto out;
1523 __skb_put(skb, gl->tot_len);
1524 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1525 } else {
1526 skb = alloc_skb(skb_len, GFP_ATOMIC);
1527 if (unlikely(!skb))
1528 goto out;
1529 __skb_put(skb, pull_len);
1530 skb_copy_to_linear_data(skb, gl->va, pull_len);
1531
1532 copy_frags(skb, gl, pull_len);
1533 skb->len = gl->tot_len;
1534 skb->data_len = skb->len - pull_len;
1535 skb->truesize += skb->data_len;
1536 }
1537
1538out:
1539 return skb;
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549static void t4vf_pktgl_free(const struct pkt_gl *gl)
1550{
1551 int frag;
1552
1553 frag = gl->nfrags - 1;
1554 while (frag--)
1555 put_page(gl->frags[frag].page);
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1568 const struct cpl_rx_pkt *pkt)
1569{
1570 struct adapter *adapter = rxq->rspq.adapter;
1571 struct sge *s = &adapter->sge;
1572 struct port_info *pi;
1573 int ret;
1574 struct sk_buff *skb;
1575
1576 skb = napi_get_frags(&rxq->rspq.napi);
1577 if (unlikely(!skb)) {
1578 t4vf_pktgl_free(gl);
1579 rxq->stats.rx_drops++;
1580 return;
1581 }
1582
1583 copy_frags(skb, gl, s->pktshift);
1584 skb->len = gl->tot_len - s->pktshift;
1585 skb->data_len = skb->len;
1586 skb->truesize += skb->data_len;
1587 skb->ip_summed = CHECKSUM_UNNECESSARY;
1588 skb_record_rx_queue(skb, rxq->rspq.idx);
1589 pi = netdev_priv(skb->dev);
1590
1591 if (pkt->vlan_ex && !pi->vlan_id) {
1592 __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1593 be16_to_cpu(pkt->vlan));
1594 rxq->stats.vlan_ex++;
1595 }
1596 ret = napi_gro_frags(&rxq->rspq.napi);
1597
1598 if (ret == GRO_HELD)
1599 rxq->stats.lro_pkts++;
1600 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1601 rxq->stats.lro_merged++;
1602 rxq->stats.pkts++;
1603 rxq->stats.rx_cso++;
1604}
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1615 const struct pkt_gl *gl)
1616{
1617 struct sk_buff *skb;
1618 const struct cpl_rx_pkt *pkt = (void *)rsp;
1619 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1620 (rspq->netdev->features & NETIF_F_RXCSUM);
1621 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1622 struct adapter *adapter = rspq->adapter;
1623 struct sge *s = &adapter->sge;
1624 struct port_info *pi;
1625
1626
1627
1628
1629
1630 if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1631 (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1632 !pkt->ip_frag) {
1633 do_gro(rxq, gl, pkt);
1634 return 0;
1635 }
1636
1637
1638
1639
1640 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1641 if (unlikely(!skb)) {
1642 t4vf_pktgl_free(gl);
1643 rxq->stats.rx_drops++;
1644 return 0;
1645 }
1646 __skb_pull(skb, s->pktshift);
1647 skb->protocol = eth_type_trans(skb, rspq->netdev);
1648 skb_record_rx_queue(skb, rspq->idx);
1649 pi = netdev_priv(skb->dev);
1650 rxq->stats.pkts++;
1651
1652 if (csum_ok && !pkt->err_vec &&
1653 (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1654 if (!pkt->ip_frag) {
1655 skb->ip_summed = CHECKSUM_UNNECESSARY;
1656 rxq->stats.rx_cso++;
1657 } else if (pkt->l2info & htonl(RXF_IP_F)) {
1658 __sum16 c = (__force __sum16)pkt->csum;
1659 skb->csum = csum_unfold(c);
1660 skb->ip_summed = CHECKSUM_COMPLETE;
1661 rxq->stats.rx_cso++;
1662 }
1663 } else
1664 skb_checksum_none_assert(skb);
1665
1666 if (pkt->vlan_ex && !pi->vlan_id) {
1667 rxq->stats.vlan_ex++;
1668 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1669 be16_to_cpu(pkt->vlan));
1670 }
1671
1672 netif_receive_skb(skb);
1673
1674 return 0;
1675}
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685static inline bool is_new_response(const struct rsp_ctrl *rc,
1686 const struct sge_rspq *rspq)
1687{
1688 return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1712 int frags)
1713{
1714 struct rx_sw_desc *sdesc;
1715
1716 while (frags--) {
1717 if (fl->cidx == 0)
1718 fl->cidx = fl->size - 1;
1719 else
1720 fl->cidx--;
1721 sdesc = &fl->sdesc[fl->cidx];
1722 sdesc->page = gl->frags[frags].page;
1723 sdesc->dma_addr |= RX_UNMAPPED_BUF;
1724 fl->avail++;
1725 }
1726}
1727
1728
1729
1730
1731
1732
1733
1734static inline void rspq_next(struct sge_rspq *rspq)
1735{
1736 rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1737 if (unlikely(++rspq->cidx == rspq->size)) {
1738 rspq->cidx = 0;
1739 rspq->gen ^= 1;
1740 rspq->cur_desc = rspq->desc;
1741 }
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757static int process_responses(struct sge_rspq *rspq, int budget)
1758{
1759 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1760 struct adapter *adapter = rspq->adapter;
1761 struct sge *s = &adapter->sge;
1762 int budget_left = budget;
1763
1764 while (likely(budget_left)) {
1765 int ret, rsp_type;
1766 const struct rsp_ctrl *rc;
1767
1768 rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1769 if (!is_new_response(rc, rspq))
1770 break;
1771
1772
1773
1774
1775
1776 dma_rmb();
1777 rsp_type = RSPD_TYPE_G(rc->type_gen);
1778 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
1779 struct page_frag *fp;
1780 struct pkt_gl gl;
1781 const struct rx_sw_desc *sdesc;
1782 u32 bufsz, frag;
1783 u32 len = be32_to_cpu(rc->pldbuflen_qid);
1784
1785
1786
1787
1788
1789 if (len & RSPD_NEWBUF_F) {
1790
1791
1792
1793
1794
1795 if (likely(rspq->offset > 0)) {
1796 free_rx_bufs(rspq->adapter, &rxq->fl,
1797 1);
1798 rspq->offset = 0;
1799 }
1800 len = RSPD_LEN_G(len);
1801 }
1802 gl.tot_len = len;
1803
1804
1805
1806
1807 for (frag = 0, fp = gl.frags; ; frag++, fp++) {
1808 BUG_ON(frag >= MAX_SKB_FRAGS);
1809 BUG_ON(rxq->fl.avail == 0);
1810 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1811 bufsz = get_buf_size(adapter, sdesc);
1812 fp->page = sdesc->page;
1813 fp->offset = rspq->offset;
1814 fp->size = min(bufsz, len);
1815 len -= fp->size;
1816 if (!len)
1817 break;
1818 unmap_rx_buf(rspq->adapter, &rxq->fl);
1819 }
1820 gl.nfrags = frag+1;
1821
1822
1823
1824
1825
1826
1827 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1828 get_buf_addr(sdesc),
1829 fp->size, DMA_FROM_DEVICE);
1830 gl.va = (page_address(gl.frags[0].page) +
1831 gl.frags[0].offset);
1832 prefetch(gl.va);
1833
1834
1835
1836
1837
1838 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1839 if (likely(ret == 0))
1840 rspq->offset += ALIGN(fp->size, s->fl_align);
1841 else
1842 restore_rx_bufs(&gl, &rxq->fl, frag);
1843 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
1844 ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1845 } else {
1846 WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
1847 ret = 0;
1848 }
1849
1850 if (unlikely(ret)) {
1851
1852
1853
1854
1855
1856 const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1857 rspq->next_intr_params =
1858 QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
1859 break;
1860 }
1861
1862 rspq_next(rspq);
1863 budget_left--;
1864 }
1865
1866
1867
1868
1869
1870
1871 if (rspq->offset >= 0 &&
1872 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1873 __refill_fl(rspq->adapter, &rxq->fl);
1874 return budget - budget_left;
1875}
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888static int napi_rx_handler(struct napi_struct *napi, int budget)
1889{
1890 unsigned int intr_params;
1891 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1892 int work_done = process_responses(rspq, budget);
1893 u32 val;
1894
1895 if (likely(work_done < budget)) {
1896 napi_complete_done(napi, work_done);
1897 intr_params = rspq->next_intr_params;
1898 rspq->next_intr_params = rspq->intr_params;
1899 } else
1900 intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
1901
1902 if (unlikely(work_done == 0))
1903 rspq->unhandled_irqs++;
1904
1905 val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1906
1907
1908
1909 if (unlikely(!rspq->bar2_addr)) {
1910 t4_write_reg(rspq->adapter,
1911 T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1912 val | INGRESSQID_V((u32)rspq->cntxt_id));
1913 } else {
1914 writel(val | INGRESSQID_V(rspq->bar2_qid),
1915 rspq->bar2_addr + SGE_UDB_GTS);
1916 wmb();
1917 }
1918 return work_done;
1919}
1920
1921
1922
1923
1924
1925irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1926{
1927 struct sge_rspq *rspq = cookie;
1928
1929 napi_schedule(&rspq->napi);
1930 return IRQ_HANDLED;
1931}
1932
1933
1934
1935
1936
1937static unsigned int process_intrq(struct adapter *adapter)
1938{
1939 struct sge *s = &adapter->sge;
1940 struct sge_rspq *intrq = &s->intrq;
1941 unsigned int work_done;
1942 u32 val;
1943
1944 spin_lock(&adapter->sge.intrq_lock);
1945 for (work_done = 0; ; work_done++) {
1946 const struct rsp_ctrl *rc;
1947 unsigned int qid, iq_idx;
1948 struct sge_rspq *rspq;
1949
1950
1951
1952
1953
1954 rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1955 if (!is_new_response(rc, intrq))
1956 break;
1957
1958
1959
1960
1961
1962
1963 dma_rmb();
1964 if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
1965 dev_err(adapter->pdev_dev,
1966 "Unexpected INTRQ response type %d\n",
1967 RSPD_TYPE_G(rc->type_gen));
1968 continue;
1969 }
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979 qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
1980 iq_idx = IQ_IDX(s, qid);
1981 if (unlikely(iq_idx >= MAX_INGQ)) {
1982 dev_err(adapter->pdev_dev,
1983 "Ingress QID %d out of range\n", qid);
1984 continue;
1985 }
1986 rspq = s->ingr_map[iq_idx];
1987 if (unlikely(rspq == NULL)) {
1988 dev_err(adapter->pdev_dev,
1989 "Ingress QID %d RSPQ=NULL\n", qid);
1990 continue;
1991 }
1992 if (unlikely(rspq->abs_id != qid)) {
1993 dev_err(adapter->pdev_dev,
1994 "Ingress QID %d refers to RSPQ %d\n",
1995 qid, rspq->abs_id);
1996 continue;
1997 }
1998
1999
2000
2001
2002
2003
2004 napi_schedule(&rspq->napi);
2005 rspq_next(intrq);
2006 }
2007
2008 val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
2009
2010
2011
2012 if (unlikely(!intrq->bar2_addr)) {
2013 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
2014 val | INGRESSQID_V(intrq->cntxt_id));
2015 } else {
2016 writel(val | INGRESSQID_V(intrq->bar2_qid),
2017 intrq->bar2_addr + SGE_UDB_GTS);
2018 wmb();
2019 }
2020
2021 spin_unlock(&adapter->sge.intrq_lock);
2022
2023 return work_done;
2024}
2025
2026
2027
2028
2029
2030static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
2031{
2032 struct adapter *adapter = cookie;
2033
2034 process_intrq(adapter);
2035 return IRQ_HANDLED;
2036}
2037
2038
2039
2040
2041
2042
2043
2044
2045irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2046{
2047 BUG_ON((adapter->flags &
2048 (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
2049 if (adapter->flags & CXGB4VF_USING_MSIX)
2050 return t4vf_sge_intr_msix;
2051 else
2052 return t4vf_intr_msi;
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066static void sge_rx_timer_cb(struct timer_list *t)
2067{
2068 struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
2069 struct sge *s = &adapter->sge;
2070 unsigned int i;
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080 for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2081 unsigned long m;
2082
2083 for (m = s->starving_fl[i]; m; m &= m - 1) {
2084 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2085 struct sge_fl *fl = s->egr_map[id];
2086
2087 clear_bit(id, s->starving_fl);
2088 smp_mb__after_atomic();
2089
2090
2091
2092
2093
2094
2095
2096 if (fl_starving(adapter, fl)) {
2097 struct sge_eth_rxq *rxq;
2098
2099 rxq = container_of(fl, struct sge_eth_rxq, fl);
2100 if (napi_reschedule(&rxq->rspq.napi))
2101 fl->starving++;
2102 else
2103 set_bit(id, s->starving_fl);
2104 }
2105 }
2106 }
2107
2108
2109
2110
2111 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125static void sge_tx_timer_cb(struct timer_list *t)
2126{
2127 struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
2128 struct sge *s = &adapter->sge;
2129 unsigned int i, budget;
2130
2131 budget = MAX_TIMER_TX_RECLAIM;
2132 i = s->ethtxq_rover;
2133 do {
2134 struct sge_eth_txq *txq = &s->ethtxq[i];
2135
2136 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2137 int avail = reclaimable(&txq->q);
2138
2139 if (avail > budget)
2140 avail = budget;
2141
2142 free_tx_desc(adapter, &txq->q, avail, true);
2143 txq->q.in_use -= avail;
2144 __netif_tx_unlock(txq->txq);
2145
2146 budget -= avail;
2147 if (!budget)
2148 break;
2149 }
2150
2151 i++;
2152 if (i >= s->ethqsets)
2153 i = 0;
2154 } while (i != s->ethtxq_rover);
2155 s->ethtxq_rover = i;
2156
2157
2158
2159
2160
2161
2162 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178static void __iomem *bar2_address(struct adapter *adapter,
2179 unsigned int qid,
2180 enum t4_bar2_qtype qtype,
2181 unsigned int *pbar2_qid)
2182{
2183 u64 bar2_qoffset;
2184 int ret;
2185
2186 ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
2187 &bar2_qoffset, pbar2_qid);
2188 if (ret)
2189 return NULL;
2190
2191 return adapter->bar2 + bar2_qoffset;
2192}
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2205 bool iqasynch, struct net_device *dev,
2206 int intr_dest,
2207 struct sge_fl *fl, rspq_handler_t hnd)
2208{
2209 struct sge *s = &adapter->sge;
2210 struct port_info *pi = netdev_priv(dev);
2211 struct fw_iq_cmd cmd, rpl;
2212 int ret, iqandst, flsz = 0;
2213 int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING);
2214
2215
2216
2217
2218
2219
2220
2221
2222 if ((adapter->flags & CXGB4VF_USING_MSI) &&
2223 rspq != &adapter->sge.intrq) {
2224 iqandst = SGE_INTRDST_IQ;
2225 intr_dest = adapter->sge.intrq.abs_id;
2226 } else
2227 iqandst = SGE_INTRDST_PCI;
2228
2229
2230
2231
2232
2233
2234
2235 rspq->size = roundup(rspq->size, 16);
2236 rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2237 0, &rspq->phys_addr, NULL, 0);
2238 if (!rspq->desc)
2239 return -ENOMEM;
2240
2241
2242
2243
2244
2245
2246
2247
2248 memset(&cmd, 0, sizeof(cmd));
2249 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2250 FW_CMD_REQUEST_F |
2251 FW_CMD_WRITE_F |
2252 FW_CMD_EXEC_F);
2253 cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
2254 FW_IQ_CMD_IQSTART_F |
2255 FW_LEN16(cmd));
2256 cmd.type_to_iqandstindex =
2257 cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2258 FW_IQ_CMD_IQASYNCH_V(iqasynch) |
2259 FW_IQ_CMD_VIID_V(pi->viid) |
2260 FW_IQ_CMD_IQANDST_V(iqandst) |
2261 FW_IQ_CMD_IQANUS_V(1) |
2262 FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
2263 FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
2264 cmd.iqdroprss_to_iqesize =
2265 cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
2266 FW_IQ_CMD_IQGTSMODE_F |
2267 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
2268 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
2269 cmd.iqsize = cpu_to_be16(rspq->size);
2270 cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2271
2272 if (fl) {
2273 unsigned int chip_ver =
2274 CHELSIO_CHIP_VERSION(adapter->params.chip);
2275
2276
2277
2278
2279
2280
2281
2282
2283 if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
2284 fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
2285 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2286 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2287 sizeof(__be64), sizeof(struct rx_sw_desc),
2288 &fl->addr, &fl->sdesc, s->stat_len);
2289 if (!fl->desc) {
2290 ret = -ENOMEM;
2291 goto err;
2292 }
2293
2294
2295
2296
2297
2298
2299 flsz = (fl->size / FL_PER_EQ_UNIT +
2300 s->stat_len / EQ_UNIT);
2301
2302
2303
2304
2305
2306 cmd.iqns_to_fl0congen =
2307 cpu_to_be32(
2308 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2309 FW_IQ_CMD_FL0PACKEN_F |
2310 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
2311 FW_IQ_CMD_FL0DATARO_V(relaxed) |
2312 FW_IQ_CMD_FL0PADEN_F);
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322 cmd.fl0dcaen_to_fl0cidxfthresh =
2323 cpu_to_be16(
2324 FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5
2325 ? FETCHBURSTMIN_128B_X
2326 : FETCHBURSTMIN_64B_T6_X) |
2327 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
2328 FETCHBURSTMAX_512B_X :
2329 FETCHBURSTMAX_256B_X));
2330 cmd.fl0size = cpu_to_be16(flsz);
2331 cmd.fl0addr = cpu_to_be64(fl->addr);
2332 }
2333
2334
2335
2336
2337
2338 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2339 if (ret)
2340 goto err;
2341
2342 netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2343 rspq->cur_desc = rspq->desc;
2344 rspq->cidx = 0;
2345 rspq->gen = 1;
2346 rspq->next_intr_params = rspq->intr_params;
2347 rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2348 rspq->bar2_addr = bar2_address(adapter,
2349 rspq->cntxt_id,
2350 T4_BAR2_QTYPE_INGRESS,
2351 &rspq->bar2_qid);
2352 rspq->abs_id = be16_to_cpu(rpl.physiqid);
2353 rspq->size--;
2354 rspq->adapter = adapter;
2355 rspq->netdev = dev;
2356 rspq->handler = hnd;
2357
2358
2359 rspq->offset = fl ? 0 : -1;
2360
2361 if (fl) {
2362 fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2363 fl->avail = 0;
2364 fl->pend_cred = 0;
2365 fl->pidx = 0;
2366 fl->cidx = 0;
2367 fl->alloc_failed = 0;
2368 fl->large_alloc_failed = 0;
2369 fl->starving = 0;
2370
2371
2372
2373
2374 fl->bar2_addr = bar2_address(adapter,
2375 fl->cntxt_id,
2376 T4_BAR2_QTYPE_EGRESS,
2377 &fl->bar2_qid);
2378
2379 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2380 }
2381
2382 return 0;
2383
2384err:
2385
2386
2387
2388
2389 if (rspq->desc) {
2390 dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2391 rspq->desc, rspq->phys_addr);
2392 rspq->desc = NULL;
2393 }
2394 if (fl && fl->desc) {
2395 kfree(fl->sdesc);
2396 fl->sdesc = NULL;
2397 dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2398 fl->desc, fl->addr);
2399 fl->desc = NULL;
2400 }
2401 return ret;
2402}
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2414 struct net_device *dev, struct netdev_queue *devq,
2415 unsigned int iqid)
2416{
2417 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
2418 struct port_info *pi = netdev_priv(dev);
2419 struct fw_eq_eth_cmd cmd, rpl;
2420 struct sge *s = &adapter->sge;
2421 int ret, nentries;
2422
2423
2424
2425
2426
2427 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2428
2429
2430
2431
2432
2433 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2434 sizeof(struct tx_desc),
2435 sizeof(struct tx_sw_desc),
2436 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2437 if (!txq->q.desc)
2438 return -ENOMEM;
2439
2440
2441
2442
2443
2444
2445
2446
2447 memset(&cmd, 0, sizeof(cmd));
2448 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2449 FW_CMD_REQUEST_F |
2450 FW_CMD_WRITE_F |
2451 FW_CMD_EXEC_F);
2452 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
2453 FW_EQ_ETH_CMD_EQSTART_F |
2454 FW_LEN16(cmd));
2455 cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2456 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2457 cmd.fetchszm_to_iqid =
2458 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
2459 FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
2460 FW_EQ_ETH_CMD_IQID_V(iqid));
2461 cmd.dcaen_to_eqsize =
2462 cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
2463 ? FETCHBURSTMIN_64B_X
2464 : FETCHBURSTMIN_64B_T6_X) |
2465 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2466 FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2467 CIDXFLUSHTHRESH_32_X) |
2468 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2469 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2470
2471
2472
2473
2474
2475 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2476 if (ret) {
2477
2478
2479
2480
2481 kfree(txq->q.sdesc);
2482 txq->q.sdesc = NULL;
2483 dma_free_coherent(adapter->pdev_dev,
2484 nentries * sizeof(struct tx_desc),
2485 txq->q.desc, txq->q.phys_addr);
2486 txq->q.desc = NULL;
2487 return ret;
2488 }
2489
2490 txq->q.in_use = 0;
2491 txq->q.cidx = 0;
2492 txq->q.pidx = 0;
2493 txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2494 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
2495 txq->q.bar2_addr = bar2_address(adapter,
2496 txq->q.cntxt_id,
2497 T4_BAR2_QTYPE_EGRESS,
2498 &txq->q.bar2_qid);
2499 txq->q.abs_id =
2500 FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
2501 txq->txq = devq;
2502 txq->tso = 0;
2503 txq->tx_cso = 0;
2504 txq->vlan_ins = 0;
2505 txq->q.stops = 0;
2506 txq->q.restarts = 0;
2507 txq->mapping_err = 0;
2508 return 0;
2509}
2510
2511
2512
2513
2514static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2515{
2516 struct sge *s = &adapter->sge;
2517
2518 dma_free_coherent(adapter->pdev_dev,
2519 tq->size * sizeof(*tq->desc) + s->stat_len,
2520 tq->desc, tq->phys_addr);
2521 tq->cntxt_id = 0;
2522 tq->sdesc = NULL;
2523 tq->desc = NULL;
2524}
2525
2526
2527
2528
2529
2530static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2531 struct sge_fl *fl)
2532{
2533 struct sge *s = &adapter->sge;
2534 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2535
2536 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2537 rspq->cntxt_id, flid, 0xffff);
2538 dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2539 rspq->desc, rspq->phys_addr);
2540 netif_napi_del(&rspq->napi);
2541 rspq->netdev = NULL;
2542 rspq->cntxt_id = 0;
2543 rspq->abs_id = 0;
2544 rspq->desc = NULL;
2545
2546 if (fl) {
2547 free_rx_bufs(adapter, fl, fl->avail);
2548 dma_free_coherent(adapter->pdev_dev,
2549 fl->size * sizeof(*fl->desc) + s->stat_len,
2550 fl->desc, fl->addr);
2551 kfree(fl->sdesc);
2552 fl->sdesc = NULL;
2553 fl->cntxt_id = 0;
2554 fl->desc = NULL;
2555 }
2556}
2557
2558
2559
2560
2561
2562
2563
2564void t4vf_free_sge_resources(struct adapter *adapter)
2565{
2566 struct sge *s = &adapter->sge;
2567 struct sge_eth_rxq *rxq = s->ethrxq;
2568 struct sge_eth_txq *txq = s->ethtxq;
2569 struct sge_rspq *evtq = &s->fw_evtq;
2570 struct sge_rspq *intrq = &s->intrq;
2571 int qs;
2572
2573 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2574 if (rxq->rspq.desc)
2575 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2576 if (txq->q.desc) {
2577 t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2578 free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2579 kfree(txq->q.sdesc);
2580 free_txq(adapter, &txq->q);
2581 }
2582 }
2583 if (evtq->desc)
2584 free_rspq_fl(adapter, evtq, NULL);
2585 if (intrq->desc)
2586 free_rspq_fl(adapter, intrq, NULL);
2587}
2588
2589
2590
2591
2592
2593
2594
2595void t4vf_sge_start(struct adapter *adapter)
2596{
2597 adapter->sge.ethtxq_rover = 0;
2598 mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2599 mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2600}
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610void t4vf_sge_stop(struct adapter *adapter)
2611{
2612 struct sge *s = &adapter->sge;
2613
2614 if (s->rx_timer.function)
2615 del_timer_sync(&s->rx_timer);
2616 if (s->tx_timer.function)
2617 del_timer_sync(&s->tx_timer);
2618}
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629int t4vf_sge_init(struct adapter *adapter)
2630{
2631 struct sge_params *sge_params = &adapter->params.sge;
2632 u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
2633 u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
2634 struct sge *s = &adapter->sge;
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645 if (fl_large_pg <= fl_small_pg)
2646 fl_large_pg = 0;
2647
2648
2649
2650
2651 if (fl_small_pg != PAGE_SIZE ||
2652 (fl_large_pg & (fl_large_pg - 1)) != 0) {
2653 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2654 fl_small_pg, fl_large_pg);
2655 return -EINVAL;
2656 }
2657 if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
2658 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2659 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2660 return -EINVAL;
2661 }
2662
2663
2664
2665
2666 if (fl_large_pg)
2667 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2668 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2669 ? 128 : 64);
2670 s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2671 s->fl_align = t4vf_fl_pkt_align(adapter);
2672
2673
2674
2675
2676
2677
2678
2679
2680 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
2681 case CHELSIO_T4:
2682 s->fl_starve_thres =
2683 EGRTHRESHOLD_G(sge_params->sge_congestion_control);
2684 break;
2685 case CHELSIO_T5:
2686 s->fl_starve_thres =
2687 EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2688 break;
2689 case CHELSIO_T6:
2690 default:
2691 s->fl_starve_thres =
2692 T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2693 break;
2694 }
2695 s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2696
2697
2698
2699
2700 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
2701 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
2702
2703
2704
2705
2706 spin_lock_init(&s->intrq_lock);
2707
2708 return 0;
2709}
2710