1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/if_vlan.h>
39#include <linux/ip.h>
40#include <linux/dma-mapping.h>
41#include <linux/jiffies.h>
42#include <linux/prefetch.h>
43#include <linux/export.h>
44#include <net/ipv6.h>
45#include <net/tcp.h>
46#include <net/busy_poll.h>
47#ifdef CONFIG_CHELSIO_T4_FCOE
48#include <scsi/fc/fc_fcoe.h>
49#endif
50#include "cxgb4.h"
51#include "t4_regs.h"
52#include "t4_values.h"
53#include "t4_msg.h"
54#include "t4fw_api.h"
55#include "cxgb4_ptp.h"
56
57
58
59
60
61#if PAGE_SHIFT >= 16
62# define FL_PG_ORDER 0
63#else
64# define FL_PG_ORDER (16 - PAGE_SHIFT)
65#endif
66
67
68#define RX_COPY_THRES 256
69#define RX_PULL_LEN 128
70
71
72
73
74
75#define RX_PKT_SKB_LEN 512
76
77
78
79
80
81
82
83#define MAX_TX_RECLAIM 16
84
85
86
87
88
89#define MAX_RX_REFILL 16U
90
91
92
93
94
95#define RX_QCHECK_PERIOD (HZ / 2)
96
97
98
99
100#define TX_QCHECK_PERIOD (HZ / 2)
101
102
103
104
105#define MAX_TIMER_TX_RECLAIM 100
106
107
108
109
110#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
111
112
113
114
115
116
117#define ETHTXQ_STOP_THRES \
118 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
119
120
121
122
123
124#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
125
126
127
128
129
130#define MAX_IMM_TX_PKT_LEN 256
131
132
133
134
135#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
136
137struct tx_sw_desc {
138 struct sk_buff *skb;
139 struct ulptx_sgl *sgl;
140};
141
142struct rx_sw_desc {
143 struct page *page;
144 dma_addr_t dma_addr;
145};
146
147
148
149
150
151
152
153#define FL_MTU_SMALL 1500
154#define FL_MTU_LARGE 9000
155
156static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
157 unsigned int mtu)
158{
159 struct sge *s = &adapter->sge;
160
161 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
162}
163
164#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
165#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
166
167
168
169
170
171
172
173
174
175
176
177enum {
178 RX_BUF_FLAGS = 0x1f,
179 RX_BUF_SIZE = 0x0f,
180 RX_UNMAPPED_BUF = 0x10,
181
182
183
184
185
186
187
188
189 RX_SMALL_PG_BUF = 0x0,
190 RX_LARGE_PG_BUF = 0x1,
191
192 RX_SMALL_MTU_BUF = 0x2,
193 RX_LARGE_MTU_BUF = 0x3,
194};
195
196static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
197#define MIN_NAPI_WORK 1
198
199static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
200{
201 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
202}
203
204static inline bool is_buf_mapped(const struct rx_sw_desc *d)
205{
206 return !(d->dma_addr & RX_UNMAPPED_BUF);
207}
208
209
210
211
212
213
214
215
216static inline unsigned int txq_avail(const struct sge_txq *q)
217{
218 return q->size - 1 - q->in_use;
219}
220
221
222
223
224
225
226
227
228
229static inline unsigned int fl_cap(const struct sge_fl *fl)
230{
231 return fl->size - 8;
232}
233
234
235
236
237
238
239
240
241
242
243static inline bool fl_starving(const struct adapter *adapter,
244 const struct sge_fl *fl)
245{
246 const struct sge *s = &adapter->sge;
247
248 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
249}
250
251static int map_skb(struct device *dev, const struct sk_buff *skb,
252 dma_addr_t *addr)
253{
254 const skb_frag_t *fp, *end;
255 const struct skb_shared_info *si;
256
257 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
258 if (dma_mapping_error(dev, *addr))
259 goto out_err;
260
261 si = skb_shinfo(skb);
262 end = &si->frags[si->nr_frags];
263
264 for (fp = si->frags; fp < end; fp++) {
265 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
266 DMA_TO_DEVICE);
267 if (dma_mapping_error(dev, *addr))
268 goto unwind;
269 }
270 return 0;
271
272unwind:
273 while (fp-- > si->frags)
274 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
275
276 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
277out_err:
278 return -ENOMEM;
279}
280
281#ifdef CONFIG_NEED_DMA_MAP_STATE
282static void unmap_skb(struct device *dev, const struct sk_buff *skb,
283 const dma_addr_t *addr)
284{
285 const skb_frag_t *fp, *end;
286 const struct skb_shared_info *si;
287
288 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
289
290 si = skb_shinfo(skb);
291 end = &si->frags[si->nr_frags];
292 for (fp = si->frags; fp < end; fp++)
293 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
294}
295
296
297
298
299
300
301
302
303
304static void deferred_unmap_destructor(struct sk_buff *skb)
305{
306 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
307}
308#endif
309
310static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
311 const struct ulptx_sgl *sgl, const struct sge_txq *q)
312{
313 const struct ulptx_sge_pair *p;
314 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
315
316 if (likely(skb_headlen(skb)))
317 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
318 DMA_TO_DEVICE);
319 else {
320 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
321 DMA_TO_DEVICE);
322 nfrags--;
323 }
324
325
326
327
328
329 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
330 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
331unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
332 ntohl(p->len[0]), DMA_TO_DEVICE);
333 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
334 ntohl(p->len[1]), DMA_TO_DEVICE);
335 p++;
336 } else if ((u8 *)p == (u8 *)q->stat) {
337 p = (const struct ulptx_sge_pair *)q->desc;
338 goto unmap;
339 } else if ((u8 *)p + 8 == (u8 *)q->stat) {
340 const __be64 *addr = (const __be64 *)q->desc;
341
342 dma_unmap_page(dev, be64_to_cpu(addr[0]),
343 ntohl(p->len[0]), DMA_TO_DEVICE);
344 dma_unmap_page(dev, be64_to_cpu(addr[1]),
345 ntohl(p->len[1]), DMA_TO_DEVICE);
346 p = (const struct ulptx_sge_pair *)&addr[2];
347 } else {
348 const __be64 *addr = (const __be64 *)q->desc;
349
350 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
351 ntohl(p->len[0]), DMA_TO_DEVICE);
352 dma_unmap_page(dev, be64_to_cpu(addr[0]),
353 ntohl(p->len[1]), DMA_TO_DEVICE);
354 p = (const struct ulptx_sge_pair *)&addr[1];
355 }
356 }
357 if (nfrags) {
358 __be64 addr;
359
360 if ((u8 *)p == (u8 *)q->stat)
361 p = (const struct ulptx_sge_pair *)q->desc;
362 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
363 *(const __be64 *)q->desc;
364 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
365 DMA_TO_DEVICE);
366 }
367}
368
369
370
371
372
373
374
375
376
377
378
379void free_tx_desc(struct adapter *adap, struct sge_txq *q,
380 unsigned int n, bool unmap)
381{
382 struct tx_sw_desc *d;
383 unsigned int cidx = q->cidx;
384 struct device *dev = adap->pdev_dev;
385
386 d = &q->sdesc[cidx];
387 while (n--) {
388 if (d->skb) {
389 if (unmap)
390 unmap_sgl(dev, d->skb, d->sgl, q);
391 dev_consume_skb_any(d->skb);
392 d->skb = NULL;
393 }
394 ++d;
395 if (++cidx == q->size) {
396 cidx = 0;
397 d = q->sdesc;
398 }
399 }
400 q->cidx = cidx;
401}
402
403
404
405
406static inline int reclaimable(const struct sge_txq *q)
407{
408 int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
409 hw_cidx -= q->cidx;
410 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
411}
412
413
414
415
416
417
418
419
420
421
422
423static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
424 bool unmap)
425{
426 int avail = reclaimable(q);
427
428 if (avail) {
429
430
431
432
433 if (avail > MAX_TX_RECLAIM)
434 avail = MAX_TX_RECLAIM;
435
436 free_tx_desc(adap, q, avail, unmap);
437 q->in_use -= avail;
438 }
439}
440
441static inline int get_buf_size(struct adapter *adapter,
442 const struct rx_sw_desc *d)
443{
444 struct sge *s = &adapter->sge;
445 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
446 int buf_size;
447
448 switch (rx_buf_size_idx) {
449 case RX_SMALL_PG_BUF:
450 buf_size = PAGE_SIZE;
451 break;
452
453 case RX_LARGE_PG_BUF:
454 buf_size = PAGE_SIZE << s->fl_pg_order;
455 break;
456
457 case RX_SMALL_MTU_BUF:
458 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
459 break;
460
461 case RX_LARGE_MTU_BUF:
462 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
463 break;
464
465 default:
466 BUG_ON(1);
467 }
468
469 return buf_size;
470}
471
472
473
474
475
476
477
478
479
480
481static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
482{
483 while (n--) {
484 struct rx_sw_desc *d = &q->sdesc[q->cidx];
485
486 if (is_buf_mapped(d))
487 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
488 get_buf_size(adap, d),
489 PCI_DMA_FROMDEVICE);
490 put_page(d->page);
491 d->page = NULL;
492 if (++q->cidx == q->size)
493 q->cidx = 0;
494 q->avail--;
495 }
496}
497
498
499
500
501
502
503
504
505
506
507
508
509static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
510{
511 struct rx_sw_desc *d = &q->sdesc[q->cidx];
512
513 if (is_buf_mapped(d))
514 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
515 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
516 d->page = NULL;
517 if (++q->cidx == q->size)
518 q->cidx = 0;
519 q->avail--;
520}
521
522static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
523{
524 if (q->pend_cred >= 8) {
525 u32 val = adap->params.arch.sge_fl_db;
526
527 if (is_t4(adap->params.chip))
528 val |= PIDX_V(q->pend_cred / 8);
529 else
530 val |= PIDX_T5_V(q->pend_cred / 8);
531
532
533
534
535 wmb();
536
537
538
539
540
541 if (unlikely(q->bar2_addr == NULL)) {
542 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
543 val | QID_V(q->cntxt_id));
544 } else {
545 writel(val | QID_V(q->bar2_qid),
546 q->bar2_addr + SGE_UDB_KDOORBELL);
547
548
549
550
551 wmb();
552 }
553 q->pend_cred &= 7;
554 }
555}
556
557static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
558 dma_addr_t mapping)
559{
560 sd->page = pg;
561 sd->dma_addr = mapping;
562}
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
579 gfp_t gfp)
580{
581 struct sge *s = &adap->sge;
582 struct page *pg;
583 dma_addr_t mapping;
584 unsigned int cred = q->avail;
585 __be64 *d = &q->desc[q->pidx];
586 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
587 int node;
588
589#ifdef CONFIG_DEBUG_FS
590 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
591 goto out;
592#endif
593
594 gfp |= __GFP_NOWARN;
595 node = dev_to_node(adap->pdev_dev);
596
597 if (s->fl_pg_order == 0)
598 goto alloc_small_pages;
599
600
601
602
603 while (n) {
604 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
605 if (unlikely(!pg)) {
606 q->large_alloc_failed++;
607 break;
608 }
609
610 mapping = dma_map_page(adap->pdev_dev, pg, 0,
611 PAGE_SIZE << s->fl_pg_order,
612 PCI_DMA_FROMDEVICE);
613 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
614 __free_pages(pg, s->fl_pg_order);
615 q->mapping_err++;
616 goto out;
617 }
618 mapping |= RX_LARGE_PG_BUF;
619 *d++ = cpu_to_be64(mapping);
620
621 set_rx_sw_desc(sd, pg, mapping);
622 sd++;
623
624 q->avail++;
625 if (++q->pidx == q->size) {
626 q->pidx = 0;
627 sd = q->sdesc;
628 d = q->desc;
629 }
630 n--;
631 }
632
633alloc_small_pages:
634 while (n--) {
635 pg = alloc_pages_node(node, gfp, 0);
636 if (unlikely(!pg)) {
637 q->alloc_failed++;
638 break;
639 }
640
641 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
642 PCI_DMA_FROMDEVICE);
643 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
644 put_page(pg);
645 q->mapping_err++;
646 goto out;
647 }
648 *d++ = cpu_to_be64(mapping);
649
650 set_rx_sw_desc(sd, pg, mapping);
651 sd++;
652
653 q->avail++;
654 if (++q->pidx == q->size) {
655 q->pidx = 0;
656 sd = q->sdesc;
657 d = q->desc;
658 }
659 }
660
661out: cred = q->avail - cred;
662 q->pend_cred += cred;
663 ring_fl_db(adap, q);
664
665 if (unlikely(fl_starving(adap, q))) {
666 smp_wmb();
667 q->low++;
668 set_bit(q->cntxt_id - adap->sge.egr_start,
669 adap->sge.starving_fl);
670 }
671
672 return cred;
673}
674
675static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
676{
677 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
678 GFP_ATOMIC);
679}
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
701 size_t sw_size, dma_addr_t *phys, void *metadata,
702 size_t stat_size, int node)
703{
704 size_t len = nelem * elem_size + stat_size;
705 void *s = NULL;
706 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
707
708 if (!p)
709 return NULL;
710 if (sw_size) {
711 s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
712
713 if (!s) {
714 dma_free_coherent(dev, len, p, *phys);
715 return NULL;
716 }
717 }
718 if (metadata)
719 *(void **)metadata = s;
720 memset(p, 0, len);
721 return p;
722}
723
724
725
726
727
728
729
730
731static inline unsigned int sgl_len(unsigned int n)
732{
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749 n--;
750 return (3 * n) / 2 + (n & 1) + 2;
751}
752
753
754
755
756
757
758
759
760static inline unsigned int flits_to_desc(unsigned int n)
761{
762 BUG_ON(n > SGE_MAX_WR_LEN / 8);
763 return DIV_ROUND_UP(n, 8);
764}
765
766
767
768
769
770
771
772
773static inline int is_eth_imm(const struct sk_buff *skb)
774{
775 int hdrlen = skb_shinfo(skb)->gso_size ?
776 sizeof(struct cpl_tx_pkt_lso_core) : 0;
777
778 hdrlen += sizeof(struct cpl_tx_pkt);
779 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
780 return hdrlen;
781 return 0;
782}
783
784
785
786
787
788
789
790
791static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
792{
793 unsigned int flits;
794 int hdrlen = is_eth_imm(skb);
795
796
797
798
799
800
801 if (hdrlen)
802 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
803
804
805
806
807
808
809
810
811
812 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
813 if (skb_shinfo(skb)->gso_size)
814 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
815 sizeof(struct cpl_tx_pkt_lso_core) +
816 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
817 else
818 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
819 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
820 return flits;
821}
822
823
824
825
826
827
828
829
830static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
831{
832 return flits_to_desc(calc_tx_flits(skb));
833}
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
853 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
854 const dma_addr_t *addr)
855{
856 unsigned int i, len;
857 struct ulptx_sge_pair *to;
858 const struct skb_shared_info *si = skb_shinfo(skb);
859 unsigned int nfrags = si->nr_frags;
860 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
861
862 len = skb_headlen(skb) - start;
863 if (likely(len)) {
864 sgl->len0 = htonl(len);
865 sgl->addr0 = cpu_to_be64(addr[0] + start);
866 nfrags++;
867 } else {
868 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
869 sgl->addr0 = cpu_to_be64(addr[1]);
870 }
871
872 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
873 ULPTX_NSGE_V(nfrags));
874 if (likely(--nfrags == 0))
875 return;
876
877
878
879
880
881 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
882
883 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
884 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
885 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
886 to->addr[0] = cpu_to_be64(addr[i]);
887 to->addr[1] = cpu_to_be64(addr[++i]);
888 }
889 if (nfrags) {
890 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
891 to->len[1] = cpu_to_be32(0);
892 to->addr[0] = cpu_to_be64(addr[i + 1]);
893 }
894 if (unlikely((u8 *)end > (u8 *)q->stat)) {
895 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
896
897 if (likely(part0))
898 memcpy(sgl->sge, buf, part0);
899 part1 = (u8 *)end - (u8 *)q->stat;
900 memcpy(q->desc, (u8 *)buf + part0, part1);
901 end = (void *)q->desc + part1;
902 }
903 if ((uintptr_t)end & 8)
904 *end = 0;
905}
906
907
908
909
910
911static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
912{
913 int count = 8;
914
915 while (count) {
916 writeq(*src, dst);
917 src++;
918 dst++;
919 count--;
920 }
921}
922
923
924
925
926
927
928
929
930
931static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
932{
933
934
935
936 wmb();
937
938
939
940
941 if (unlikely(q->bar2_addr == NULL)) {
942 u32 val = PIDX_V(n);
943 unsigned long flags;
944
945
946
947
948 spin_lock_irqsave(&q->db_lock, flags);
949 if (!q->db_disabled)
950 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
951 QID_V(q->cntxt_id) | val);
952 else
953 q->db_pidx_inc += n;
954 q->db_pidx = q->pidx;
955 spin_unlock_irqrestore(&q->db_lock, flags);
956 } else {
957 u32 val = PIDX_T5_V(n);
958
959
960
961
962
963
964
965 WARN_ON(val & DBPRIO_F);
966
967
968
969
970
971 if (n == 1 && q->bar2_qid == 0) {
972 int index = (q->pidx
973 ? (q->pidx - 1)
974 : (q->size - 1));
975 u64 *wr = (u64 *)&q->desc[index];
976
977 cxgb_pio_copy((u64 __iomem *)
978 (q->bar2_addr + SGE_UDB_WCDOORBELL),
979 wr);
980 } else {
981 writel(val | QID_V(q->bar2_qid),
982 q->bar2_addr + SGE_UDB_KDOORBELL);
983 }
984
985
986
987
988
989
990
991
992
993
994
995 wmb();
996 }
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
1011 void *pos)
1012{
1013 u64 *p;
1014 int left = (void *)q->stat - pos;
1015
1016 if (likely(skb->len <= left)) {
1017 if (likely(!skb->data_len))
1018 skb_copy_from_linear_data(skb, pos, skb->len);
1019 else
1020 skb_copy_bits(skb, 0, pos, skb->len);
1021 pos += skb->len;
1022 } else {
1023 skb_copy_bits(skb, 0, pos, left);
1024 skb_copy_bits(skb, left, q->desc, skb->len - left);
1025 pos = (void *)q->desc + (skb->len - left);
1026 }
1027
1028
1029 p = PTR_ALIGN(pos, 8);
1030 if ((uintptr_t)p & 8)
1031 *p = 0;
1032}
1033
1034static void *inline_tx_skb_header(const struct sk_buff *skb,
1035 const struct sge_txq *q, void *pos,
1036 int length)
1037{
1038 u64 *p;
1039 int left = (void *)q->stat - pos;
1040
1041 if (likely(length <= left)) {
1042 memcpy(pos, skb->data, length);
1043 pos += length;
1044 } else {
1045 memcpy(pos, skb->data, left);
1046 memcpy(q->desc, skb->data + left, length - left);
1047 pos = (void *)q->desc + (length - left);
1048 }
1049
1050 p = PTR_ALIGN(pos, 8);
1051 if ((uintptr_t)p & 8) {
1052 *p = 0;
1053 return p + 1;
1054 }
1055 return p;
1056}
1057
1058
1059
1060
1061
1062static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1063{
1064 int csum_type;
1065 const struct iphdr *iph = ip_hdr(skb);
1066
1067 if (iph->version == 4) {
1068 if (iph->protocol == IPPROTO_TCP)
1069 csum_type = TX_CSUM_TCPIP;
1070 else if (iph->protocol == IPPROTO_UDP)
1071 csum_type = TX_CSUM_UDPIP;
1072 else {
1073nocsum:
1074
1075
1076
1077 return TXPKT_L4CSUM_DIS_F;
1078 }
1079 } else {
1080
1081
1082
1083 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1084
1085 if (ip6h->nexthdr == IPPROTO_TCP)
1086 csum_type = TX_CSUM_TCPIP6;
1087 else if (ip6h->nexthdr == IPPROTO_UDP)
1088 csum_type = TX_CSUM_UDPIP6;
1089 else
1090 goto nocsum;
1091 }
1092
1093 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1094 u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1095 int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1096
1097 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1098 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1099 else
1100 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1101 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1102 } else {
1103 int start = skb_transport_offset(skb);
1104
1105 return TXPKT_CSUM_TYPE_V(csum_type) |
1106 TXPKT_CSUM_START_V(start) |
1107 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1108 }
1109}
1110
1111static void eth_txq_stop(struct sge_eth_txq *q)
1112{
1113 netif_tx_stop_queue(q->txq);
1114 q->q.stops++;
1115}
1116
1117static inline void txq_advance(struct sge_txq *q, unsigned int n)
1118{
1119 q->in_use += n;
1120 q->pidx += n;
1121 if (q->pidx >= q->size)
1122 q->pidx -= q->size;
1123}
1124
1125#ifdef CONFIG_CHELSIO_T4_FCOE
1126static inline int
1127cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1128 const struct port_info *pi, u64 *cntrl)
1129{
1130 const struct cxgb_fcoe *fcoe = &pi->fcoe;
1131
1132 if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1133 return 0;
1134
1135 if (skb->protocol != htons(ETH_P_FCOE))
1136 return 0;
1137
1138 skb_reset_mac_header(skb);
1139 skb->mac_len = sizeof(struct ethhdr);
1140
1141 skb_set_network_header(skb, skb->mac_len);
1142 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1143
1144 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1145 return -ENOTSUPP;
1146
1147
1148 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1149 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1150 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1151 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1152 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1153 return 0;
1154}
1155#endif
1156
1157
1158
1159
1160
1161
1162
1163
1164netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1165{
1166 u32 wr_mid, ctrl0, op;
1167 u64 cntrl, *end;
1168 int qidx, credits;
1169 unsigned int flits, ndesc;
1170 struct adapter *adap;
1171 struct sge_eth_txq *q;
1172 const struct port_info *pi;
1173 struct fw_eth_tx_pkt_wr *wr;
1174 struct cpl_tx_pkt_core *cpl;
1175 const struct skb_shared_info *ssi;
1176 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1177 bool immediate = false;
1178 int len, max_pkt_len;
1179 bool ptp_enabled = is_ptp_enabled(skb, dev);
1180#ifdef CONFIG_CHELSIO_T4_FCOE
1181 int err;
1182#endif
1183
1184
1185
1186
1187
1188 if (unlikely(skb->len < ETH_HLEN)) {
1189out_free: dev_kfree_skb_any(skb);
1190 return NETDEV_TX_OK;
1191 }
1192
1193
1194 max_pkt_len = ETH_HLEN + dev->mtu;
1195 if (skb_vlan_tagged(skb))
1196 max_pkt_len += VLAN_HLEN;
1197 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1198 goto out_free;
1199
1200 pi = netdev_priv(dev);
1201 adap = pi->adapter;
1202 qidx = skb_get_queue_mapping(skb);
1203 if (ptp_enabled) {
1204 spin_lock(&adap->ptp_lock);
1205 if (!(adap->ptp_tx_skb)) {
1206 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1207 adap->ptp_tx_skb = skb_get(skb);
1208 } else {
1209 spin_unlock(&adap->ptp_lock);
1210 goto out_free;
1211 }
1212 q = &adap->sge.ptptxq;
1213 } else {
1214 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1215 }
1216 skb_tx_timestamp(skb);
1217
1218 reclaim_completed_tx(adap, &q->q, true);
1219 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1220
1221#ifdef CONFIG_CHELSIO_T4_FCOE
1222 err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1223 if (unlikely(err == -ENOTSUPP)) {
1224 if (ptp_enabled)
1225 spin_unlock(&adap->ptp_lock);
1226 goto out_free;
1227 }
1228#endif
1229
1230 flits = calc_tx_flits(skb);
1231 ndesc = flits_to_desc(flits);
1232 credits = txq_avail(&q->q) - ndesc;
1233
1234 if (unlikely(credits < 0)) {
1235 eth_txq_stop(q);
1236 dev_err(adap->pdev_dev,
1237 "%s: Tx ring %u full while queue awake!\n",
1238 dev->name, qidx);
1239 if (ptp_enabled)
1240 spin_unlock(&adap->ptp_lock);
1241 return NETDEV_TX_BUSY;
1242 }
1243
1244 if (is_eth_imm(skb))
1245 immediate = true;
1246
1247 if (!immediate &&
1248 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1249 q->mapping_err++;
1250 if (ptp_enabled)
1251 spin_unlock(&adap->ptp_lock);
1252 goto out_free;
1253 }
1254
1255 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1256 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1257 eth_txq_stop(q);
1258 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1259 }
1260
1261 wr = (void *)&q->q.desc[q->q.pidx];
1262 wr->equiq_to_len16 = htonl(wr_mid);
1263 wr->r3 = cpu_to_be64(0);
1264 end = (u64 *)wr + flits;
1265
1266 len = immediate ? skb->len : 0;
1267 ssi = skb_shinfo(skb);
1268 if (ssi->gso_size) {
1269 struct cpl_tx_pkt_lso *lso = (void *)wr;
1270 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1271 int l3hdr_len = skb_network_header_len(skb);
1272 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1273
1274 len += sizeof(*lso);
1275 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1276 FW_WR_IMMDLEN_V(len));
1277 lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1278 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1279 LSO_IPV6_V(v6) |
1280 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1281 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1282 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1283 lso->c.ipid_ofst = htons(0);
1284 lso->c.mss = htons(ssi->gso_size);
1285 lso->c.seqno_offset = htonl(0);
1286 if (is_t4(adap->params.chip))
1287 lso->c.len = htonl(skb->len);
1288 else
1289 lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1290 cpl = (void *)(lso + 1);
1291
1292 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1293 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1294 else
1295 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1296
1297 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1298 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1299 TXPKT_IPHDR_LEN_V(l3hdr_len);
1300 q->tso++;
1301 q->tx_cso += ssi->gso_segs;
1302 } else {
1303 len += sizeof(*cpl);
1304 if (ptp_enabled)
1305 op = FW_PTP_TX_PKT_WR;
1306 else
1307 op = FW_ETH_TX_PKT_WR;
1308 wr->op_immdlen = htonl(FW_WR_OP_V(op) |
1309 FW_WR_IMMDLEN_V(len));
1310 cpl = (void *)(wr + 1);
1311 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1312 cntrl = hwcsum(adap->params.chip, skb) |
1313 TXPKT_IPCSUM_DIS_F;
1314 q->tx_cso++;
1315 }
1316 }
1317
1318 if (skb_vlan_tag_present(skb)) {
1319 q->vlan_ins++;
1320 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1321#ifdef CONFIG_CHELSIO_T4_FCOE
1322 if (skb->protocol == htons(ETH_P_FCOE))
1323 cntrl |= TXPKT_VLAN_V(
1324 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1325#endif
1326 }
1327
1328 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1329 TXPKT_PF_V(adap->pf);
1330 if (ptp_enabled)
1331 ctrl0 |= TXPKT_TSTAMP_F;
1332#ifdef CONFIG_CHELSIO_T4_DCB
1333 if (is_t4(adap->params.chip))
1334 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1335 else
1336 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1337#endif
1338 cpl->ctrl0 = htonl(ctrl0);
1339 cpl->pack = htons(0);
1340 cpl->len = htons(skb->len);
1341 cpl->ctrl1 = cpu_to_be64(cntrl);
1342
1343 if (immediate) {
1344 inline_tx_skb(skb, &q->q, cpl + 1);
1345 dev_consume_skb_any(skb);
1346 } else {
1347 int last_desc;
1348
1349 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1350 addr);
1351 skb_orphan(skb);
1352
1353 last_desc = q->q.pidx + ndesc - 1;
1354 if (last_desc >= q->q.size)
1355 last_desc -= q->q.size;
1356 q->q.sdesc[last_desc].skb = skb;
1357 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1358 }
1359
1360 txq_advance(&q->q, ndesc);
1361
1362 ring_tx_db(adap, &q->q, ndesc);
1363 if (ptp_enabled)
1364 spin_unlock(&adap->ptp_lock);
1365 return NETDEV_TX_OK;
1366}
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1377{
1378 int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
1379 int reclaim = hw_cidx - q->cidx;
1380
1381 if (reclaim < 0)
1382 reclaim += q->size;
1383
1384 q->in_use -= reclaim;
1385 q->cidx = hw_cidx;
1386}
1387
1388
1389
1390
1391
1392
1393
1394static inline int is_imm(const struct sk_buff *skb)
1395{
1396 return skb->len <= MAX_CTRL_WR_LEN;
1397}
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1410{
1411 reclaim_completed_tx_imm(&q->q);
1412 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1413 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1414 q->q.stops++;
1415 q->full = 1;
1416 }
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1428{
1429 unsigned int ndesc;
1430 struct fw_wr_hdr *wr;
1431
1432 if (unlikely(!is_imm(skb))) {
1433 WARN_ON(1);
1434 dev_kfree_skb(skb);
1435 return NET_XMIT_DROP;
1436 }
1437
1438 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1439 spin_lock(&q->sendq.lock);
1440
1441 if (unlikely(q->full)) {
1442 skb->priority = ndesc;
1443 __skb_queue_tail(&q->sendq, skb);
1444 spin_unlock(&q->sendq.lock);
1445 return NET_XMIT_CN;
1446 }
1447
1448 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1449 inline_tx_skb(skb, &q->q, wr);
1450
1451 txq_advance(&q->q, ndesc);
1452 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1453 ctrlq_check_stop(q, wr);
1454
1455 ring_tx_db(q->adap, &q->q, ndesc);
1456 spin_unlock(&q->sendq.lock);
1457
1458 kfree_skb(skb);
1459 return NET_XMIT_SUCCESS;
1460}
1461
1462
1463
1464
1465
1466
1467
1468static void restart_ctrlq(unsigned long data)
1469{
1470 struct sk_buff *skb;
1471 unsigned int written = 0;
1472 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1473
1474 spin_lock(&q->sendq.lock);
1475 reclaim_completed_tx_imm(&q->q);
1476 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);
1477
1478 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1479 struct fw_wr_hdr *wr;
1480 unsigned int ndesc = skb->priority;
1481
1482 written += ndesc;
1483
1484
1485
1486 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1487 txq_advance(&q->q, ndesc);
1488 spin_unlock(&q->sendq.lock);
1489
1490 inline_tx_skb(skb, &q->q, wr);
1491 kfree_skb(skb);
1492
1493 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1494 unsigned long old = q->q.stops;
1495
1496 ctrlq_check_stop(q, wr);
1497 if (q->q.stops != old) {
1498 spin_lock(&q->sendq.lock);
1499 goto ringdb;
1500 }
1501 }
1502 if (written > 16) {
1503 ring_tx_db(q->adap, &q->q, written);
1504 written = 0;
1505 }
1506 spin_lock(&q->sendq.lock);
1507 }
1508 q->full = 0;
1509ringdb: if (written)
1510 ring_tx_db(q->adap, &q->q, written);
1511 spin_unlock(&q->sendq.lock);
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1522{
1523 int ret;
1524
1525 local_bh_disable();
1526 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1527 local_bh_enable();
1528 return ret;
1529}
1530
1531
1532
1533
1534
1535
1536
1537
1538static inline int is_ofld_imm(const struct sk_buff *skb)
1539{
1540 return skb->len <= MAX_IMM_TX_PKT_LEN;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1552{
1553 unsigned int flits, cnt;
1554
1555 if (is_ofld_imm(skb))
1556 return DIV_ROUND_UP(skb->len, 8);
1557
1558 flits = skb_transport_offset(skb) / 8U;
1559 cnt = skb_shinfo(skb)->nr_frags;
1560 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1561 cnt++;
1562 return flits + sgl_len(cnt);
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574static void txq_stop_maperr(struct sge_uld_txq *q)
1575{
1576 q->mapping_err++;
1577 q->q.stops++;
1578 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1579 q->adap->sge.txq_maperr);
1580}
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590static void ofldtxq_stop(struct sge_uld_txq *q, struct sk_buff *skb)
1591{
1592 struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1593
1594 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1595 q->q.stops++;
1596 q->full = 1;
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617static void service_ofldq(struct sge_uld_txq *q)
1618{
1619 u64 *pos, *before, *end;
1620 int credits;
1621 struct sk_buff *skb;
1622 struct sge_txq *txq;
1623 unsigned int left;
1624 unsigned int written = 0;
1625 unsigned int flits, ndesc;
1626
1627
1628
1629
1630
1631
1632
1633 if (q->service_ofldq_running)
1634 return;
1635 q->service_ofldq_running = true;
1636
1637 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1638
1639
1640
1641
1642
1643
1644
1645 spin_unlock(&q->sendq.lock);
1646
1647 reclaim_completed_tx(q->adap, &q->q, false);
1648
1649 flits = skb->priority;
1650 ndesc = flits_to_desc(flits);
1651 credits = txq_avail(&q->q) - ndesc;
1652 BUG_ON(credits < 0);
1653 if (unlikely(credits < TXQ_STOP_THRES))
1654 ofldtxq_stop(q, skb);
1655
1656 pos = (u64 *)&q->q.desc[q->q.pidx];
1657 if (is_ofld_imm(skb))
1658 inline_tx_skb(skb, &q->q, pos);
1659 else if (map_skb(q->adap->pdev_dev, skb,
1660 (dma_addr_t *)skb->head)) {
1661 txq_stop_maperr(q);
1662 spin_lock(&q->sendq.lock);
1663 break;
1664 } else {
1665 int last_desc, hdr_len = skb_transport_offset(skb);
1666
1667
1668
1669
1670 before = (u64 *)pos;
1671 end = (u64 *)pos + flits;
1672 txq = &q->q;
1673 pos = (void *)inline_tx_skb_header(skb, &q->q,
1674 (void *)pos,
1675 hdr_len);
1676 if (before > (u64 *)pos) {
1677 left = (u8 *)end - (u8 *)txq->stat;
1678 end = (void *)txq->desc + left;
1679 }
1680
1681
1682
1683
1684
1685 if (pos == (u64 *)txq->stat) {
1686 left = (u8 *)end - (u8 *)txq->stat;
1687 end = (void *)txq->desc + left;
1688 pos = (void *)txq->desc;
1689 }
1690
1691 write_sgl(skb, &q->q, (void *)pos,
1692 end, hdr_len,
1693 (dma_addr_t *)skb->head);
1694#ifdef CONFIG_NEED_DMA_MAP_STATE
1695 skb->dev = q->adap->port[0];
1696 skb->destructor = deferred_unmap_destructor;
1697#endif
1698 last_desc = q->q.pidx + ndesc - 1;
1699 if (last_desc >= q->q.size)
1700 last_desc -= q->q.size;
1701 q->q.sdesc[last_desc].skb = skb;
1702 }
1703
1704 txq_advance(&q->q, ndesc);
1705 written += ndesc;
1706 if (unlikely(written > 32)) {
1707 ring_tx_db(q->adap, &q->q, written);
1708 written = 0;
1709 }
1710
1711
1712
1713
1714
1715
1716 spin_lock(&q->sendq.lock);
1717 __skb_unlink(skb, &q->sendq);
1718 if (is_ofld_imm(skb))
1719 kfree_skb(skb);
1720 }
1721 if (likely(written))
1722 ring_tx_db(q->adap, &q->q, written);
1723
1724
1725
1726
1727 q->service_ofldq_running = false;
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
1738{
1739 skb->priority = calc_tx_flits_ofld(skb);
1740 spin_lock(&q->sendq.lock);
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750 __skb_queue_tail(&q->sendq, skb);
1751 if (q->sendq.qlen == 1)
1752 service_ofldq(q);
1753
1754 spin_unlock(&q->sendq.lock);
1755 return NET_XMIT_SUCCESS;
1756}
1757
1758
1759
1760
1761
1762
1763
1764static void restart_ofldq(unsigned long data)
1765{
1766 struct sge_uld_txq *q = (struct sge_uld_txq *)data;
1767
1768 spin_lock(&q->sendq.lock);
1769 q->full = 0;
1770 service_ofldq(q);
1771 spin_unlock(&q->sendq.lock);
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781static inline unsigned int skb_txq(const struct sk_buff *skb)
1782{
1783 return skb->queue_mapping >> 1;
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1794{
1795 return skb->queue_mapping & 1;
1796}
1797
1798static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
1799 unsigned int tx_uld_type)
1800{
1801 struct sge_uld_txq_info *txq_info;
1802 struct sge_uld_txq *txq;
1803 unsigned int idx = skb_txq(skb);
1804
1805 if (unlikely(is_ctrl_pkt(skb))) {
1806
1807 if (adap->tids.nsftids)
1808 idx = 0;
1809 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1810 }
1811
1812 txq_info = adap->sge.uld_txq_info[tx_uld_type];
1813 if (unlikely(!txq_info)) {
1814 WARN_ON(true);
1815 return NET_XMIT_DROP;
1816 }
1817
1818 txq = &txq_info->uldtxq[idx];
1819 return ofld_xmit(txq, skb);
1820}
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1832{
1833 int ret;
1834
1835 local_bh_disable();
1836 ret = uld_send(adap, skb, CXGB4_TX_OFLD);
1837 local_bh_enable();
1838 return ret;
1839}
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1850{
1851 return t4_ofld_send(netdev2adap(dev), skb);
1852}
1853EXPORT_SYMBOL(cxgb4_ofld_send);
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
1865{
1866 int ret;
1867
1868 local_bh_disable();
1869 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
1870 local_bh_enable();
1871 return ret;
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
1883{
1884 return t4_crypto_send(netdev2adap(dev), skb);
1885}
1886EXPORT_SYMBOL(cxgb4_crypto_send);
1887
1888static inline void copy_frags(struct sk_buff *skb,
1889 const struct pkt_gl *gl, unsigned int offset)
1890{
1891 int i;
1892
1893
1894 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1895 gl->frags[0].offset + offset,
1896 gl->frags[0].size - offset);
1897 skb_shinfo(skb)->nr_frags = gl->nfrags;
1898 for (i = 1; i < gl->nfrags; i++)
1899 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1900 gl->frags[i].offset,
1901 gl->frags[i].size);
1902
1903
1904 get_page(gl->frags[gl->nfrags - 1].page);
1905}
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1917 unsigned int skb_len, unsigned int pull_len)
1918{
1919 struct sk_buff *skb;
1920
1921
1922
1923
1924
1925
1926 if (gl->tot_len <= RX_COPY_THRES) {
1927 skb = dev_alloc_skb(gl->tot_len);
1928 if (unlikely(!skb))
1929 goto out;
1930 __skb_put(skb, gl->tot_len);
1931 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1932 } else {
1933 skb = dev_alloc_skb(skb_len);
1934 if (unlikely(!skb))
1935 goto out;
1936 __skb_put(skb, pull_len);
1937 skb_copy_to_linear_data(skb, gl->va, pull_len);
1938
1939 copy_frags(skb, gl, pull_len);
1940 skb->len = gl->tot_len;
1941 skb->data_len = skb->len - pull_len;
1942 skb->truesize += skb->data_len;
1943 }
1944out: return skb;
1945}
1946EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1947
1948
1949
1950
1951
1952
1953
1954
1955static void t4_pktgl_free(const struct pkt_gl *gl)
1956{
1957 int n;
1958 const struct page_frag *p;
1959
1960 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1961 put_page(p->page);
1962}
1963
1964
1965
1966
1967
1968static noinline int handle_trace_pkt(struct adapter *adap,
1969 const struct pkt_gl *gl)
1970{
1971 struct sk_buff *skb;
1972
1973 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1974 if (unlikely(!skb)) {
1975 t4_pktgl_free(gl);
1976 return 0;
1977 }
1978
1979 if (is_t4(adap->params.chip))
1980 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
1981 else
1982 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1983
1984 skb_reset_mac_header(skb);
1985 skb->protocol = htons(0xffff);
1986 skb->dev = adap->port[0];
1987 netif_receive_skb(skb);
1988 return 0;
1989}
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
2001 struct skb_shared_hwtstamps *hwtstamps,
2002 u64 sgetstamp)
2003{
2004 u64 ns;
2005 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
2006
2007 ns = div_u64(tmp, adap->params.vpd.cclk);
2008
2009 memset(hwtstamps, 0, sizeof(*hwtstamps));
2010 hwtstamps->hwtstamp = ns_to_ktime(ns);
2011}
2012
2013static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
2014 const struct cpl_rx_pkt *pkt)
2015{
2016 struct adapter *adapter = rxq->rspq.adap;
2017 struct sge *s = &adapter->sge;
2018 struct port_info *pi;
2019 int ret;
2020 struct sk_buff *skb;
2021
2022 skb = napi_get_frags(&rxq->rspq.napi);
2023 if (unlikely(!skb)) {
2024 t4_pktgl_free(gl);
2025 rxq->stats.rx_drops++;
2026 return;
2027 }
2028
2029 copy_frags(skb, gl, s->pktshift);
2030 skb->len = gl->tot_len - s->pktshift;
2031 skb->data_len = skb->len;
2032 skb->truesize += skb->data_len;
2033 skb->ip_summed = CHECKSUM_UNNECESSARY;
2034 skb_record_rx_queue(skb, rxq->rspq.idx);
2035 pi = netdev_priv(skb->dev);
2036 if (pi->rxtstamp)
2037 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
2038 gl->sgetstamp);
2039 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
2040 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2041 PKT_HASH_TYPE_L3);
2042
2043 if (unlikely(pkt->vlan_ex)) {
2044 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2045 rxq->stats.vlan_ex++;
2046 }
2047 ret = napi_gro_frags(&rxq->rspq.napi);
2048 if (ret == GRO_HELD)
2049 rxq->stats.lro_pkts++;
2050 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
2051 rxq->stats.lro_merged++;
2052 rxq->stats.pkts++;
2053 rxq->stats.rx_cso++;
2054}
2055
2056enum {
2057 RX_NON_PTP_PKT = 0,
2058 RX_PTP_PKT_SUC = 1,
2059 RX_PTP_PKT_ERR = 2
2060};
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
2071 struct sk_buff *skb)
2072{
2073 struct skb_shared_hwtstamps *hwtstamps;
2074 struct cpl_rx_mps_pkt *cpl = NULL;
2075 unsigned char *data;
2076 int offset;
2077
2078 cpl = (struct cpl_rx_mps_pkt *)skb->data;
2079 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
2080 X_CPL_RX_MPS_PKT_TYPE_PTP))
2081 return RX_PTP_PKT_ERR;
2082
2083 data = skb->data + sizeof(*cpl);
2084 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
2085 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
2086 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
2087 return RX_PTP_PKT_ERR;
2088
2089 hwtstamps = skb_hwtstamps(skb);
2090 memset(hwtstamps, 0, sizeof(*hwtstamps));
2091 hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
2092
2093 return RX_PTP_PKT_SUC;
2094}
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
2105 struct sge_eth_rxq *rxq, struct sk_buff *skb)
2106{
2107 int ret;
2108
2109 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
2110 !is_t4(adapter->params.chip))) {
2111 ret = t4_systim_to_hwstamp(adapter, skb);
2112 if (ret == RX_PTP_PKT_ERR) {
2113 kfree_skb(skb);
2114 rxq->stats.rx_drops++;
2115 }
2116 return ret;
2117 }
2118 return RX_NON_PTP_PKT;
2119}
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
2130 struct net_device *dev)
2131{
2132 struct port_info *pi = netdev_priv(dev);
2133
2134 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
2135 cxgb4_ptp_read_hwstamp(adapter, pi);
2136 kfree_skb(skb);
2137 return 0;
2138 }
2139 return 1;
2140}
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
2151 const struct pkt_gl *si)
2152{
2153 bool csum_ok;
2154 struct sk_buff *skb;
2155 const struct cpl_rx_pkt *pkt;
2156 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2157 struct adapter *adapter = q->adap;
2158 struct sge *s = &q->adap->sge;
2159 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
2160 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
2161 u16 err_vec;
2162 struct port_info *pi;
2163 int ret = 0;
2164
2165 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
2166 return handle_trace_pkt(q->adap, si);
2167
2168 pkt = (const struct cpl_rx_pkt *)rsp;
2169
2170 if (q->adap->params.tp.rx_pkt_encap)
2171 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
2172 else
2173 err_vec = be16_to_cpu(pkt->err_vec);
2174
2175 csum_ok = pkt->csum_calc && !err_vec &&
2176 (q->netdev->features & NETIF_F_RXCSUM);
2177 if ((pkt->l2info & htonl(RXF_TCP_F)) &&
2178 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
2179 do_gro(rxq, si, pkt);
2180 return 0;
2181 }
2182
2183 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
2184 if (unlikely(!skb)) {
2185 t4_pktgl_free(si);
2186 rxq->stats.rx_drops++;
2187 return 0;
2188 }
2189 pi = netdev_priv(q->netdev);
2190
2191
2192 if (unlikely(pi->ptp_enable)) {
2193 ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
2194 if (ret == RX_PTP_PKT_ERR)
2195 return 0;
2196 }
2197 if (likely(!ret))
2198 __skb_pull(skb, s->pktshift);
2199
2200
2201 if (unlikely(pi->ptp_enable && !ret &&
2202 (pkt->l2info & htonl(RXF_UDP_F)) &&
2203 cxgb4_ptp_is_ptp_rx(skb))) {
2204 if (!t4_tx_hststamp(adapter, skb, q->netdev))
2205 return 0;
2206 }
2207
2208 skb->protocol = eth_type_trans(skb, q->netdev);
2209 skb_record_rx_queue(skb, q->idx);
2210 if (skb->dev->features & NETIF_F_RXHASH)
2211 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2212 PKT_HASH_TYPE_L3);
2213
2214 rxq->stats.pkts++;
2215
2216 if (pi->rxtstamp)
2217 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
2218 si->sgetstamp);
2219 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
2220 if (!pkt->ip_frag) {
2221 skb->ip_summed = CHECKSUM_UNNECESSARY;
2222 rxq->stats.rx_cso++;
2223 } else if (pkt->l2info & htonl(RXF_IP_F)) {
2224 __sum16 c = (__force __sum16)pkt->csum;
2225 skb->csum = csum_unfold(c);
2226 skb->ip_summed = CHECKSUM_COMPLETE;
2227 rxq->stats.rx_cso++;
2228 }
2229 } else {
2230 skb_checksum_none_assert(skb);
2231#ifdef CONFIG_CHELSIO_T4_FCOE
2232#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
2233 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
2234
2235 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
2236 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
2237 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
2238 if (q->adap->params.tp.rx_pkt_encap)
2239 csum_ok = err_vec &
2240 T6_COMPR_RXERR_SUM_F;
2241 else
2242 csum_ok = err_vec & RXERR_CSUM_F;
2243 if (!csum_ok)
2244 skb->ip_summed = CHECKSUM_UNNECESSARY;
2245 }
2246 }
2247
2248#undef CPL_RX_PKT_FLAGS
2249#endif
2250 }
2251
2252 if (unlikely(pkt->vlan_ex)) {
2253 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2254 rxq->stats.vlan_ex++;
2255 }
2256 skb_mark_napi_id(skb, &q->napi);
2257 netif_receive_skb(skb);
2258 return 0;
2259}
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
2277 int frags)
2278{
2279 struct rx_sw_desc *d;
2280
2281 while (frags--) {
2282 if (q->cidx == 0)
2283 q->cidx = q->size - 1;
2284 else
2285 q->cidx--;
2286 d = &q->sdesc[q->cidx];
2287 d->page = si->frags[frags].page;
2288 d->dma_addr |= RX_UNMAPPED_BUF;
2289 q->avail++;
2290 }
2291}
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301static inline bool is_new_response(const struct rsp_ctrl *r,
2302 const struct sge_rspq *q)
2303{
2304 return (r->type_gen >> RSPD_GEN_S) == q->gen;
2305}
2306
2307
2308
2309
2310
2311
2312
2313static inline void rspq_next(struct sge_rspq *q)
2314{
2315 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
2316 if (unlikely(++q->cidx == q->size)) {
2317 q->cidx = 0;
2318 q->gen ^= 1;
2319 q->cur_desc = q->desc;
2320 }
2321}
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336static int process_responses(struct sge_rspq *q, int budget)
2337{
2338 int ret, rsp_type;
2339 int budget_left = budget;
2340 const struct rsp_ctrl *rc;
2341 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2342 struct adapter *adapter = q->adap;
2343 struct sge *s = &adapter->sge;
2344
2345 while (likely(budget_left)) {
2346 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2347 if (!is_new_response(rc, q)) {
2348 if (q->flush_handler)
2349 q->flush_handler(q);
2350 break;
2351 }
2352
2353 dma_rmb();
2354 rsp_type = RSPD_TYPE_G(rc->type_gen);
2355 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
2356 struct page_frag *fp;
2357 struct pkt_gl si;
2358 const struct rx_sw_desc *rsd;
2359 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
2360
2361 if (len & RSPD_NEWBUF_F) {
2362 if (likely(q->offset > 0)) {
2363 free_rx_bufs(q->adap, &rxq->fl, 1);
2364 q->offset = 0;
2365 }
2366 len = RSPD_LEN_G(len);
2367 }
2368 si.tot_len = len;
2369
2370
2371 for (frags = 0, fp = si.frags; ; frags++, fp++) {
2372 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
2373 bufsz = get_buf_size(adapter, rsd);
2374 fp->page = rsd->page;
2375 fp->offset = q->offset;
2376 fp->size = min(bufsz, len);
2377 len -= fp->size;
2378 if (!len)
2379 break;
2380 unmap_rx_buf(q->adap, &rxq->fl);
2381 }
2382
2383 si.sgetstamp = SGE_TIMESTAMP_G(
2384 be64_to_cpu(rc->last_flit));
2385
2386
2387
2388
2389 dma_sync_single_for_cpu(q->adap->pdev_dev,
2390 get_buf_addr(rsd),
2391 fp->size, DMA_FROM_DEVICE);
2392
2393 si.va = page_address(si.frags[0].page) +
2394 si.frags[0].offset;
2395 prefetch(si.va);
2396
2397 si.nfrags = frags + 1;
2398 ret = q->handler(q, q->cur_desc, &si);
2399 if (likely(ret == 0))
2400 q->offset += ALIGN(fp->size, s->fl_align);
2401 else
2402 restore_rx_bufs(&si, &rxq->fl, frags);
2403 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
2404 ret = q->handler(q, q->cur_desc, NULL);
2405 } else {
2406 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
2407 }
2408
2409 if (unlikely(ret)) {
2410
2411 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
2412 break;
2413 }
2414
2415 rspq_next(q);
2416 budget_left--;
2417 }
2418
2419 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
2420 __refill_fl(q->adap, &rxq->fl);
2421 return budget - budget_left;
2422}
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435static int napi_rx_handler(struct napi_struct *napi, int budget)
2436{
2437 unsigned int params;
2438 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
2439 int work_done;
2440 u32 val;
2441
2442 work_done = process_responses(q, budget);
2443 if (likely(work_done < budget)) {
2444 int timer_index;
2445
2446 napi_complete_done(napi, work_done);
2447 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
2448
2449 if (q->adaptive_rx) {
2450 if (work_done > max(timer_pkt_quota[timer_index],
2451 MIN_NAPI_WORK))
2452 timer_index = (timer_index + 1);
2453 else
2454 timer_index = timer_index - 1;
2455
2456 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
2457 q->next_intr_params =
2458 QINTR_TIMER_IDX_V(timer_index) |
2459 QINTR_CNT_EN_V(0);
2460 params = q->next_intr_params;
2461 } else {
2462 params = q->next_intr_params;
2463 q->next_intr_params = q->intr_params;
2464 }
2465 } else
2466 params = QINTR_TIMER_IDX_V(7);
2467
2468 val = CIDXINC_V(work_done) | SEINTARM_V(params);
2469
2470
2471
2472
2473 if (unlikely(q->bar2_addr == NULL)) {
2474 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2475 val | INGRESSQID_V((u32)q->cntxt_id));
2476 } else {
2477 writel(val | INGRESSQID_V(q->bar2_qid),
2478 q->bar2_addr + SGE_UDB_GTS);
2479 wmb();
2480 }
2481 return work_done;
2482}
2483
2484
2485
2486
2487irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
2488{
2489 struct sge_rspq *q = cookie;
2490
2491 napi_schedule(&q->napi);
2492 return IRQ_HANDLED;
2493}
2494
2495
2496
2497
2498
2499static unsigned int process_intrq(struct adapter *adap)
2500{
2501 unsigned int credits;
2502 const struct rsp_ctrl *rc;
2503 struct sge_rspq *q = &adap->sge.intrq;
2504 u32 val;
2505
2506 spin_lock(&adap->sge.intrq_lock);
2507 for (credits = 0; ; credits++) {
2508 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2509 if (!is_new_response(rc, q))
2510 break;
2511
2512 dma_rmb();
2513 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
2514 unsigned int qid = ntohl(rc->pldbuflen_qid);
2515
2516 qid -= adap->sge.ingr_start;
2517 napi_schedule(&adap->sge.ingr_map[qid]->napi);
2518 }
2519
2520 rspq_next(q);
2521 }
2522
2523 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2524
2525
2526
2527
2528 if (unlikely(q->bar2_addr == NULL)) {
2529 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2530 val | INGRESSQID_V(q->cntxt_id));
2531 } else {
2532 writel(val | INGRESSQID_V(q->bar2_qid),
2533 q->bar2_addr + SGE_UDB_GTS);
2534 wmb();
2535 }
2536 spin_unlock(&adap->sge.intrq_lock);
2537 return credits;
2538}
2539
2540
2541
2542
2543
2544static irqreturn_t t4_intr_msi(int irq, void *cookie)
2545{
2546 struct adapter *adap = cookie;
2547
2548 if (adap->flags & MASTER_PF)
2549 t4_slow_intr_handler(adap);
2550 process_intrq(adap);
2551 return IRQ_HANDLED;
2552}
2553
2554
2555
2556
2557
2558
2559static irqreturn_t t4_intr_intx(int irq, void *cookie)
2560{
2561 struct adapter *adap = cookie;
2562
2563 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2564 if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
2565 process_intrq(adap))
2566 return IRQ_HANDLED;
2567 return IRQ_NONE;
2568}
2569
2570
2571
2572
2573
2574
2575
2576
2577irq_handler_t t4_intr_handler(struct adapter *adap)
2578{
2579 if (adap->flags & USING_MSIX)
2580 return t4_sge_intr_msix;
2581 if (adap->flags & USING_MSI)
2582 return t4_intr_msi;
2583 return t4_intr_intx;
2584}
2585
2586static void sge_rx_timer_cb(unsigned long data)
2587{
2588 unsigned long m;
2589 unsigned int i;
2590 struct adapter *adap = (struct adapter *)data;
2591 struct sge *s = &adap->sge;
2592
2593 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2594 for (m = s->starving_fl[i]; m; m &= m - 1) {
2595 struct sge_eth_rxq *rxq;
2596 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2597 struct sge_fl *fl = s->egr_map[id];
2598
2599 clear_bit(id, s->starving_fl);
2600 smp_mb__after_atomic();
2601
2602 if (fl_starving(adap, fl)) {
2603 rxq = container_of(fl, struct sge_eth_rxq, fl);
2604 if (napi_reschedule(&rxq->rspq.napi))
2605 fl->starving++;
2606 else
2607 set_bit(id, s->starving_fl);
2608 }
2609 }
2610
2611
2612
2613
2614 if (!(adap->flags & MASTER_PF))
2615 goto done;
2616
2617 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
2618
2619done:
2620 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2621}
2622
2623static void sge_tx_timer_cb(unsigned long data)
2624{
2625 unsigned long m;
2626 unsigned int i, budget;
2627 struct adapter *adap = (struct adapter *)data;
2628 struct sge *s = &adap->sge;
2629
2630 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2631 for (m = s->txq_maperr[i]; m; m &= m - 1) {
2632 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2633 struct sge_uld_txq *txq = s->egr_map[id];
2634
2635 clear_bit(id, s->txq_maperr);
2636 tasklet_schedule(&txq->qresume_tsk);
2637 }
2638
2639 if (!is_t4(adap->params.chip)) {
2640 struct sge_eth_txq *q = &s->ptptxq;
2641 int avail;
2642
2643 spin_lock(&adap->ptp_lock);
2644 avail = reclaimable(&q->q);
2645
2646 if (avail) {
2647 free_tx_desc(adap, &q->q, avail, false);
2648 q->q.in_use -= avail;
2649 }
2650 spin_unlock(&adap->ptp_lock);
2651 }
2652
2653 budget = MAX_TIMER_TX_RECLAIM;
2654 i = s->ethtxq_rover;
2655 do {
2656 struct sge_eth_txq *q = &s->ethtxq[i];
2657
2658 if (q->q.in_use &&
2659 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2660 __netif_tx_trylock(q->txq)) {
2661 int avail = reclaimable(&q->q);
2662
2663 if (avail) {
2664 if (avail > budget)
2665 avail = budget;
2666
2667 free_tx_desc(adap, &q->q, avail, true);
2668 q->q.in_use -= avail;
2669 budget -= avail;
2670 }
2671 __netif_tx_unlock(q->txq);
2672 }
2673
2674 if (++i >= s->ethqsets)
2675 i = 0;
2676 } while (budget && i != s->ethtxq_rover);
2677 s->ethtxq_rover = i;
2678 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2679}
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694static void __iomem *bar2_address(struct adapter *adapter,
2695 unsigned int qid,
2696 enum t4_bar2_qtype qtype,
2697 unsigned int *pbar2_qid)
2698{
2699 u64 bar2_qoffset;
2700 int ret;
2701
2702 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
2703 &bar2_qoffset, pbar2_qid);
2704 if (ret)
2705 return NULL;
2706
2707 return adapter->bar2 + bar2_qoffset;
2708}
2709
2710
2711
2712
2713int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2714 struct net_device *dev, int intr_idx,
2715 struct sge_fl *fl, rspq_handler_t hnd,
2716 rspq_flush_handler_t flush_hnd, int cong)
2717{
2718 int ret, flsz = 0;
2719 struct fw_iq_cmd c;
2720 struct sge *s = &adap->sge;
2721 struct port_info *pi = netdev_priv(dev);
2722 int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
2723
2724
2725 iq->size = roundup(iq->size, 16);
2726
2727 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2728 &iq->phys_addr, NULL, 0,
2729 dev_to_node(adap->pdev_dev));
2730 if (!iq->desc)
2731 return -ENOMEM;
2732
2733 memset(&c, 0, sizeof(c));
2734 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
2735 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2736 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
2737 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
2738 FW_LEN16(c));
2739 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2740 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
2741 FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
2742 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
2743 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
2744 -intr_idx - 1));
2745 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
2746 FW_IQ_CMD_IQGTSMODE_F |
2747 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
2748 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
2749 c.iqsize = htons(iq->size);
2750 c.iqaddr = cpu_to_be64(iq->phys_addr);
2751 if (cong >= 0)
2752 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
2753
2754 if (fl) {
2755 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
2756
2757
2758
2759
2760
2761
2762
2763
2764 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
2765 fl->size = s->fl_starve_thres - 1 + 2 * 8;
2766 fl->size = roundup(fl->size, 8);
2767 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2768 sizeof(struct rx_sw_desc), &fl->addr,
2769 &fl->sdesc, s->stat_len,
2770 dev_to_node(adap->pdev_dev));
2771 if (!fl->desc)
2772 goto fl_nomem;
2773
2774 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2775 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
2776 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
2777 FW_IQ_CMD_FL0DATARO_V(relaxed) |
2778 FW_IQ_CMD_FL0PADEN_F);
2779 if (cong >= 0)
2780 c.iqns_to_fl0congen |=
2781 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
2782 FW_IQ_CMD_FL0CONGCIF_F |
2783 FW_IQ_CMD_FL0CONGEN_F);
2784
2785
2786
2787
2788
2789
2790
2791
2792 c.fl0dcaen_to_fl0cidxfthresh =
2793 htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
2794 FETCHBURSTMIN_128B_X :
2795 FETCHBURSTMIN_64B_X) |
2796 FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
2797 FETCHBURSTMAX_512B_X :
2798 FETCHBURSTMAX_256B_X));
2799 c.fl0size = htons(flsz);
2800 c.fl0addr = cpu_to_be64(fl->addr);
2801 }
2802
2803 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2804 if (ret)
2805 goto err;
2806
2807 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2808 iq->cur_desc = iq->desc;
2809 iq->cidx = 0;
2810 iq->gen = 1;
2811 iq->next_intr_params = iq->intr_params;
2812 iq->cntxt_id = ntohs(c.iqid);
2813 iq->abs_id = ntohs(c.physiqid);
2814 iq->bar2_addr = bar2_address(adap,
2815 iq->cntxt_id,
2816 T4_BAR2_QTYPE_INGRESS,
2817 &iq->bar2_qid);
2818 iq->size--;
2819 iq->netdev = dev;
2820 iq->handler = hnd;
2821 iq->flush_handler = flush_hnd;
2822
2823 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
2824 skb_queue_head_init(&iq->lro_mgr.lroq);
2825
2826
2827 iq->offset = fl ? 0 : -1;
2828
2829 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2830
2831 if (fl) {
2832 fl->cntxt_id = ntohs(c.fl0id);
2833 fl->avail = fl->pend_cred = 0;
2834 fl->pidx = fl->cidx = 0;
2835 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2836 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2837
2838
2839
2840
2841 fl->bar2_addr = bar2_address(adap,
2842 fl->cntxt_id,
2843 T4_BAR2_QTYPE_EGRESS,
2844 &fl->bar2_qid);
2845 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2846 }
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856 if (!is_t4(adap->params.chip) && cong >= 0) {
2857 u32 param, val, ch_map = 0;
2858 int i;
2859 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
2860
2861 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2862 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
2863 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
2864 if (cong == 0) {
2865 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
2866 } else {
2867 val =
2868 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
2869 for (i = 0; i < 4; i++) {
2870 if (cong & (1 << i))
2871 ch_map |= 1 << (i << cng_ch_bits_log);
2872 }
2873 val |= CONMCTXT_CNGCHMAP_V(ch_map);
2874 }
2875 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
2876 ¶m, &val);
2877 if (ret)
2878 dev_warn(adap->pdev_dev, "Failed to set Congestion"
2879 " Manager Context for Ingress Queue %d: %d\n",
2880 iq->cntxt_id, -ret);
2881 }
2882
2883 return 0;
2884
2885fl_nomem:
2886 ret = -ENOMEM;
2887err:
2888 if (iq->desc) {
2889 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2890 iq->desc, iq->phys_addr);
2891 iq->desc = NULL;
2892 }
2893 if (fl && fl->desc) {
2894 kfree(fl->sdesc);
2895 fl->sdesc = NULL;
2896 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2897 fl->desc, fl->addr);
2898 fl->desc = NULL;
2899 }
2900 return ret;
2901}
2902
2903static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2904{
2905 q->cntxt_id = id;
2906 q->bar2_addr = bar2_address(adap,
2907 q->cntxt_id,
2908 T4_BAR2_QTYPE_EGRESS,
2909 &q->bar2_qid);
2910 q->in_use = 0;
2911 q->cidx = q->pidx = 0;
2912 q->stops = q->restarts = 0;
2913 q->stat = (void *)&q->desc[q->size];
2914 spin_lock_init(&q->db_lock);
2915 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2916}
2917
2918int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2919 struct net_device *dev, struct netdev_queue *netdevq,
2920 unsigned int iqid)
2921{
2922 int ret, nentries;
2923 struct fw_eq_eth_cmd c;
2924 struct sge *s = &adap->sge;
2925 struct port_info *pi = netdev_priv(dev);
2926
2927
2928 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2929
2930 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2931 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2932 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2933 netdev_queue_numa_node_read(netdevq));
2934 if (!txq->q.desc)
2935 return -ENOMEM;
2936
2937 memset(&c, 0, sizeof(c));
2938 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
2939 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2940 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
2941 FW_EQ_ETH_CMD_VFN_V(0));
2942 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
2943 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
2944 c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2945 FW_EQ_ETH_CMD_VIID_V(pi->viid));
2946 c.fetchszm_to_iqid =
2947 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
2948 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
2949 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
2950 c.dcaen_to_eqsize =
2951 htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
2952 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2953 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
2954 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2955 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2956
2957 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2958 if (ret) {
2959 kfree(txq->q.sdesc);
2960 txq->q.sdesc = NULL;
2961 dma_free_coherent(adap->pdev_dev,
2962 nentries * sizeof(struct tx_desc),
2963 txq->q.desc, txq->q.phys_addr);
2964 txq->q.desc = NULL;
2965 return ret;
2966 }
2967
2968 txq->q.q_type = CXGB4_TXQ_ETH;
2969 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
2970 txq->txq = netdevq;
2971 txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2972 txq->mapping_err = 0;
2973 return 0;
2974}
2975
2976int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2977 struct net_device *dev, unsigned int iqid,
2978 unsigned int cmplqid)
2979{
2980 int ret, nentries;
2981 struct fw_eq_ctrl_cmd c;
2982 struct sge *s = &adap->sge;
2983 struct port_info *pi = netdev_priv(dev);
2984
2985
2986 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2987
2988 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2989 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2990 NULL, 0, dev_to_node(adap->pdev_dev));
2991 if (!txq->q.desc)
2992 return -ENOMEM;
2993
2994 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
2995 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2996 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
2997 FW_EQ_CTRL_CMD_VFN_V(0));
2998 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
2999 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
3000 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
3001 c.physeqid_pkd = htonl(0);
3002 c.fetchszm_to_iqid =
3003 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3004 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
3005 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
3006 c.dcaen_to_eqsize =
3007 htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
3008 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
3009 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
3010 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
3011 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3012
3013 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3014 if (ret) {
3015 dma_free_coherent(adap->pdev_dev,
3016 nentries * sizeof(struct tx_desc),
3017 txq->q.desc, txq->q.phys_addr);
3018 txq->q.desc = NULL;
3019 return ret;
3020 }
3021
3022 txq->q.q_type = CXGB4_TXQ_CTRL;
3023 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
3024 txq->adap = adap;
3025 skb_queue_head_init(&txq->sendq);
3026 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
3027 txq->full = 0;
3028 return 0;
3029}
3030
3031int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
3032 unsigned int cmplqid)
3033{
3034 u32 param, val;
3035
3036 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
3037 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
3038 FW_PARAMS_PARAM_YZ_V(eqid));
3039 val = cmplqid;
3040 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
3041}
3042
3043int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
3044 struct net_device *dev, unsigned int iqid,
3045 unsigned int uld_type)
3046{
3047 int ret, nentries;
3048 struct fw_eq_ofld_cmd c;
3049 struct sge *s = &adap->sge;
3050 struct port_info *pi = netdev_priv(dev);
3051 int cmd = FW_EQ_OFLD_CMD;
3052
3053
3054 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3055
3056 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
3057 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
3058 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
3059 NUMA_NO_NODE);
3060 if (!txq->q.desc)
3061 return -ENOMEM;
3062
3063 memset(&c, 0, sizeof(c));
3064 if (unlikely(uld_type == CXGB4_TX_CRYPTO))
3065 cmd = FW_EQ_CTRL_CMD;
3066 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
3067 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3068 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
3069 FW_EQ_OFLD_CMD_VFN_V(0));
3070 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
3071 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
3072 c.fetchszm_to_iqid =
3073 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3074 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
3075 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
3076 c.dcaen_to_eqsize =
3077 htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
3078 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
3079 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
3080 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
3081 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3082
3083 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3084 if (ret) {
3085 kfree(txq->q.sdesc);
3086 txq->q.sdesc = NULL;
3087 dma_free_coherent(adap->pdev_dev,
3088 nentries * sizeof(struct tx_desc),
3089 txq->q.desc, txq->q.phys_addr);
3090 txq->q.desc = NULL;
3091 return ret;
3092 }
3093
3094 txq->q.q_type = CXGB4_TXQ_ULD;
3095 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
3096 txq->adap = adap;
3097 skb_queue_head_init(&txq->sendq);
3098 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
3099 txq->full = 0;
3100 txq->mapping_err = 0;
3101 return 0;
3102}
3103
3104void free_txq(struct adapter *adap, struct sge_txq *q)
3105{
3106 struct sge *s = &adap->sge;
3107
3108 dma_free_coherent(adap->pdev_dev,
3109 q->size * sizeof(struct tx_desc) + s->stat_len,
3110 q->desc, q->phys_addr);
3111 q->cntxt_id = 0;
3112 q->sdesc = NULL;
3113 q->desc = NULL;
3114}
3115
3116void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
3117 struct sge_fl *fl)
3118{
3119 struct sge *s = &adap->sge;
3120 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
3121
3122 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
3123 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
3124 rq->cntxt_id, fl_id, 0xffff);
3125 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
3126 rq->desc, rq->phys_addr);
3127 netif_napi_del(&rq->napi);
3128 rq->netdev = NULL;
3129 rq->cntxt_id = rq->abs_id = 0;
3130 rq->desc = NULL;
3131
3132 if (fl) {
3133 free_rx_bufs(adap, fl, fl->avail);
3134 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
3135 fl->desc, fl->addr);
3136 kfree(fl->sdesc);
3137 fl->sdesc = NULL;
3138 fl->cntxt_id = 0;
3139 fl->desc = NULL;
3140 }
3141}
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
3152{
3153 for ( ; n; n--, q++)
3154 if (q->rspq.desc)
3155 free_rspq_fl(adap, &q->rspq,
3156 q->fl.size ? &q->fl : NULL);
3157}
3158
3159
3160
3161
3162
3163
3164
3165void t4_free_sge_resources(struct adapter *adap)
3166{
3167 int i;
3168 struct sge_eth_rxq *eq;
3169 struct sge_eth_txq *etq;
3170
3171
3172 for (i = 0; i < adap->sge.ethqsets; i++) {
3173 eq = &adap->sge.ethrxq[i];
3174 if (eq->rspq.desc)
3175 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
3176 FW_IQ_TYPE_FL_INT_CAP,
3177 eq->rspq.cntxt_id,
3178 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
3179 0xffff);
3180 }
3181
3182
3183 for (i = 0; i < adap->sge.ethqsets; i++) {
3184 eq = &adap->sge.ethrxq[i];
3185 if (eq->rspq.desc)
3186 free_rspq_fl(adap, &eq->rspq,
3187 eq->fl.size ? &eq->fl : NULL);
3188
3189 etq = &adap->sge.ethtxq[i];
3190 if (etq->q.desc) {
3191 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
3192 etq->q.cntxt_id);
3193 __netif_tx_lock_bh(etq->txq);
3194 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
3195 __netif_tx_unlock_bh(etq->txq);
3196 kfree(etq->q.sdesc);
3197 free_txq(adap, &etq->q);
3198 }
3199 }
3200
3201
3202 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
3203 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
3204
3205 if (cq->q.desc) {
3206 tasklet_kill(&cq->qresume_tsk);
3207 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
3208 cq->q.cntxt_id);
3209 __skb_queue_purge(&cq->sendq);
3210 free_txq(adap, &cq->q);
3211 }
3212 }
3213
3214 if (adap->sge.fw_evtq.desc)
3215 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
3216
3217 if (adap->sge.intrq.desc)
3218 free_rspq_fl(adap, &adap->sge.intrq, NULL);
3219
3220 if (!is_t4(adap->params.chip)) {
3221 etq = &adap->sge.ptptxq;
3222 if (etq->q.desc) {
3223 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
3224 etq->q.cntxt_id);
3225 spin_lock_bh(&adap->ptp_lock);
3226 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
3227 spin_unlock_bh(&adap->ptp_lock);
3228 kfree(etq->q.sdesc);
3229 free_txq(adap, &etq->q);
3230 }
3231 }
3232
3233
3234 memset(adap->sge.egr_map, 0,
3235 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
3236}
3237
3238void t4_sge_start(struct adapter *adap)
3239{
3240 adap->sge.ethtxq_rover = 0;
3241 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
3242 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
3243}
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253void t4_sge_stop(struct adapter *adap)
3254{
3255 int i;
3256 struct sge *s = &adap->sge;
3257
3258 if (in_interrupt())
3259 return;
3260
3261 if (s->rx_timer.function)
3262 del_timer_sync(&s->rx_timer);
3263 if (s->tx_timer.function)
3264 del_timer_sync(&s->tx_timer);
3265
3266 if (is_offload(adap)) {
3267 struct sge_uld_txq_info *txq_info;
3268
3269 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3270 if (txq_info) {
3271 struct sge_uld_txq *txq = txq_info->uldtxq;
3272
3273 for_each_ofldtxq(&adap->sge, i) {
3274 if (txq->q.desc)
3275 tasklet_kill(&txq->qresume_tsk);
3276 }
3277 }
3278 }
3279
3280 if (is_pci_uld(adap)) {
3281 struct sge_uld_txq_info *txq_info;
3282
3283 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
3284 if (txq_info) {
3285 struct sge_uld_txq *txq = txq_info->uldtxq;
3286
3287 for_each_ofldtxq(&adap->sge, i) {
3288 if (txq->q.desc)
3289 tasklet_kill(&txq->qresume_tsk);
3290 }
3291 }
3292 }
3293
3294 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
3295 struct sge_ctrl_txq *cq = &s->ctrlq[i];
3296
3297 if (cq->q.desc)
3298 tasklet_kill(&cq->qresume_tsk);
3299 }
3300}
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310static int t4_sge_init_soft(struct adapter *adap)
3311{
3312 struct sge *s = &adap->sge;
3313 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
3314 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
3315 u32 ingress_rx_threshold;
3316
3317
3318
3319
3320
3321
3322 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
3323 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
3324 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
3325 return -EINVAL;
3326 }
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336 #define READ_FL_BUF(x) \
3337 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
3338
3339 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
3340 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
3341 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
3342 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
3343
3344
3345
3346
3347 if (fl_large_pg <= fl_small_pg)
3348 fl_large_pg = 0;
3349
3350 #undef READ_FL_BUF
3351
3352
3353
3354
3355 if (fl_small_pg != PAGE_SIZE ||
3356 (fl_large_pg & (fl_large_pg-1)) != 0) {
3357 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
3358 fl_small_pg, fl_large_pg);
3359 return -EINVAL;
3360 }
3361 if (fl_large_pg)
3362 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
3363
3364 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
3365 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
3366 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
3367 fl_small_mtu, fl_large_mtu);
3368 return -EINVAL;
3369 }
3370
3371
3372
3373
3374
3375 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
3376 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
3377 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
3378 s->timer_val[0] = core_ticks_to_us(adap,
3379 TIMERVALUE0_G(timer_value_0_and_1));
3380 s->timer_val[1] = core_ticks_to_us(adap,
3381 TIMERVALUE1_G(timer_value_0_and_1));
3382 s->timer_val[2] = core_ticks_to_us(adap,
3383 TIMERVALUE2_G(timer_value_2_and_3));
3384 s->timer_val[3] = core_ticks_to_us(adap,
3385 TIMERVALUE3_G(timer_value_2_and_3));
3386 s->timer_val[4] = core_ticks_to_us(adap,
3387 TIMERVALUE4_G(timer_value_4_and_5));
3388 s->timer_val[5] = core_ticks_to_us(adap,
3389 TIMERVALUE5_G(timer_value_4_and_5));
3390
3391 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
3392 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
3393 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
3394 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
3395 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
3396
3397 return 0;
3398}
3399
3400
3401
3402
3403
3404
3405
3406
3407int t4_sge_init(struct adapter *adap)
3408{
3409 struct sge *s = &adap->sge;
3410 u32 sge_control, sge_conm_ctrl;
3411 int ret, egress_threshold;
3412
3413
3414
3415
3416
3417 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
3418 s->pktshift = PKTSHIFT_G(sge_control);
3419 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
3420
3421 s->fl_align = t4_fl_pkt_align(adap);
3422 ret = t4_sge_init_soft(adap);
3423 if (ret < 0)
3424 return ret;
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
3439 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
3440 case CHELSIO_T4:
3441 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
3442 break;
3443 case CHELSIO_T5:
3444 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3445 break;
3446 case CHELSIO_T6:
3447 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3448 break;
3449 default:
3450 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
3451 CHELSIO_CHIP_VERSION(adap->params.chip));
3452 return -EINVAL;
3453 }
3454 s->fl_starve_thres = 2*egress_threshold + 1;
3455
3456 t4_idma_monitor_init(adap, &s->idma_monitor);
3457
3458
3459
3460
3461 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
3462 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
3463
3464 spin_lock_init(&s->intrq_lock);
3465
3466 return 0;
3467}
3468