1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include <net/arp.h>
40#include "common.h"
41#include "regs.h"
42#include "sge_defs.h"
43#include "t3_cpl.h"
44#include "firmware_exports.h"
45
46#define USE_GTS 0
47
48#define SGE_RX_SM_BUF_SIZE 1536
49
50#define SGE_RX_COPY_THRES 256
51#define SGE_RX_PULL_LEN 128
52
53#define SGE_PG_RSVD SMP_CACHE_BYTES
54
55
56
57
58
59#define FL0_PG_CHUNK_SIZE 2048
60#define FL0_PG_ORDER 0
61#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
62#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
63#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
64#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
65
66#define SGE_RX_DROP_THRES 16
67#define RX_RECLAIM_PERIOD (HZ/4)
68
69
70
71
72#define MAX_RX_REFILL 16U
73
74
75
76
77#define TX_RECLAIM_PERIOD (HZ / 4)
78#define TX_RECLAIM_TIMER_CHUNK 64U
79#define TX_RECLAIM_CHUNK 16U
80
81
82#define WR_LEN (WR_FLITS * 8)
83
84
85
86
87enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
88
89
90enum {
91 TXQ_RUNNING = 1 << 0,
92 TXQ_LAST_PKT_DB = 1 << 1,
93};
94
95struct tx_desc {
96 __be64 flit[TX_DESC_FLITS];
97};
98
99struct rx_desc {
100 __be32 addr_lo;
101 __be32 len_gen;
102 __be32 gen2;
103 __be32 addr_hi;
104};
105
106struct tx_sw_desc {
107 struct sk_buff *skb;
108 u8 eop;
109 u8 addr_idx;
110 u8 fragidx;
111 s8 sflit;
112};
113
114struct rx_sw_desc {
115 union {
116 struct sk_buff *skb;
117 struct fl_pg_chunk pg_chunk;
118 };
119 DECLARE_PCI_UNMAP_ADDR(dma_addr);
120};
121
122struct rsp_desc {
123 struct rss_header rss_hdr;
124 __be32 flags;
125 __be32 len_cq;
126 u8 imm_data[47];
127 u8 intr_gen;
128};
129
130
131
132
133
134struct deferred_unmap_info {
135 struct pci_dev *pdev;
136 dma_addr_t addr[MAX_SKB_FRAGS + 1];
137};
138
139
140
141
142
143
144
145
146
147static u8 flit_desc_map[] = {
148 0,
149#if SGE_NUM_GENBITS == 1
150 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
151 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
152 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
153 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
154#elif SGE_NUM_GENBITS == 2
155 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
156 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
157 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
158 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
159#else
160# error "SGE_NUM_GENBITS must be 1 or 2"
161#endif
162};
163
164static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
165{
166 return container_of(q, struct sge_qset, fl[qidx]);
167}
168
169static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
170{
171 return container_of(q, struct sge_qset, rspq);
172}
173
174static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
175{
176 return container_of(q, struct sge_qset, txq[qidx]);
177}
178
179
180
181
182
183
184
185
186
187
188static inline void refill_rspq(struct adapter *adapter,
189 const struct sge_rspq *q, unsigned int credits)
190{
191 rmb();
192 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
193 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
194}
195
196
197
198
199
200
201
202static inline int need_skb_unmap(void)
203{
204
205
206
207
208 struct dummy {
209 DECLARE_PCI_UNMAP_ADDR(addr);
210 };
211
212 return sizeof(struct dummy) != 0;
213}
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
238 unsigned int cidx, struct pci_dev *pdev)
239{
240 const struct sg_ent *sgp;
241 struct tx_sw_desc *d = &q->sdesc[cidx];
242 int nfrags, frag_idx, curflit, j = d->addr_idx;
243
244 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
245 frag_idx = d->fragidx;
246
247 if (frag_idx == 0 && skb_headlen(skb)) {
248 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
249 skb_headlen(skb), PCI_DMA_TODEVICE);
250 j = 1;
251 }
252
253 curflit = d->sflit + 1 + j;
254 nfrags = skb_shinfo(skb)->nr_frags;
255
256 while (frag_idx < nfrags && curflit < WR_FLITS) {
257 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
258 skb_shinfo(skb)->frags[frag_idx].size,
259 PCI_DMA_TODEVICE);
260 j ^= 1;
261 if (j == 0) {
262 sgp++;
263 curflit++;
264 }
265 curflit++;
266 frag_idx++;
267 }
268
269 if (frag_idx < nfrags) {
270 d = cidx + 1 == q->size ? q->sdesc : d + 1;
271 d->fragidx = frag_idx;
272 d->addr_idx = j;
273 d->sflit = curflit - WR_FLITS - j;
274 }
275}
276
277
278
279
280
281
282
283
284
285
286static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
287 unsigned int n)
288{
289 struct tx_sw_desc *d;
290 struct pci_dev *pdev = adapter->pdev;
291 unsigned int cidx = q->cidx;
292
293 const int need_unmap = need_skb_unmap() &&
294 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
295
296 d = &q->sdesc[cidx];
297 while (n--) {
298 if (d->skb) {
299 if (need_unmap)
300 unmap_skb(d->skb, q, cidx, pdev);
301 if (d->eop)
302 kfree_skb(d->skb);
303 }
304 ++d;
305 if (++cidx == q->size) {
306 cidx = 0;
307 d = q->sdesc;
308 }
309 }
310 q->cidx = cidx;
311}
312
313
314
315
316
317
318
319
320
321
322
323static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
324 struct sge_txq *q,
325 unsigned int chunk)
326{
327 unsigned int reclaim = q->processed - q->cleaned;
328
329 reclaim = min(chunk, reclaim);
330 if (reclaim) {
331 free_tx_desc(adapter, q, reclaim);
332 q->cleaned += reclaim;
333 q->in_use -= reclaim;
334 }
335 return q->processed - q->cleaned;
336}
337
338
339
340
341
342
343
344static inline int should_restart_tx(const struct sge_txq *q)
345{
346 unsigned int r = q->processed - q->cleaned;
347
348 return q->in_use - r < (q->size >> 1);
349}
350
351static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
352 struct rx_sw_desc *d)
353{
354 if (q->use_pages && d->pg_chunk.page) {
355 (*d->pg_chunk.p_cnt)--;
356 if (!*d->pg_chunk.p_cnt)
357 pci_unmap_page(pdev,
358 d->pg_chunk.mapping,
359 q->alloc_size, PCI_DMA_FROMDEVICE);
360
361 put_page(d->pg_chunk.page);
362 d->pg_chunk.page = NULL;
363 } else {
364 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
365 q->buf_size, PCI_DMA_FROMDEVICE);
366 kfree_skb(d->skb);
367 d->skb = NULL;
368 }
369}
370
371
372
373
374
375
376
377
378
379static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
380{
381 unsigned int cidx = q->cidx;
382
383 while (q->credits--) {
384 struct rx_sw_desc *d = &q->sdesc[cidx];
385
386
387 clear_rx_desc(pdev, q, d);
388 if (++cidx == q->size)
389 cidx = 0;
390 }
391
392 if (q->pg_chunk.page) {
393 __free_pages(q->pg_chunk.page, q->order);
394 q->pg_chunk.page = NULL;
395 }
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410static inline int add_one_rx_buf(void *va, unsigned int len,
411 struct rx_desc *d, struct rx_sw_desc *sd,
412 unsigned int gen, struct pci_dev *pdev)
413{
414 dma_addr_t mapping;
415
416 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
417 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
418 return -ENOMEM;
419
420 pci_unmap_addr_set(sd, dma_addr, mapping);
421
422 d->addr_lo = cpu_to_be32(mapping);
423 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
424 wmb();
425 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
426 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
427 return 0;
428}
429
430static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
431 unsigned int gen)
432{
433 d->addr_lo = cpu_to_be32(mapping);
434 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
435 wmb();
436 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
437 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
438 return 0;
439}
440
441static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
442 struct rx_sw_desc *sd, gfp_t gfp,
443 unsigned int order)
444{
445 if (!q->pg_chunk.page) {
446 dma_addr_t mapping;
447
448 q->pg_chunk.page = alloc_pages(gfp, order);
449 if (unlikely(!q->pg_chunk.page))
450 return -ENOMEM;
451 q->pg_chunk.va = page_address(q->pg_chunk.page);
452 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
453 SGE_PG_RSVD;
454 q->pg_chunk.offset = 0;
455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
456 0, q->alloc_size, PCI_DMA_FROMDEVICE);
457 q->pg_chunk.mapping = mapping;
458 }
459 sd->pg_chunk = q->pg_chunk;
460
461 prefetch(sd->pg_chunk.p_cnt);
462
463 q->pg_chunk.offset += q->buf_size;
464 if (q->pg_chunk.offset == (PAGE_SIZE << order))
465 q->pg_chunk.page = NULL;
466 else {
467 q->pg_chunk.va += q->buf_size;
468 get_page(q->pg_chunk.page);
469 }
470
471 if (sd->pg_chunk.offset == 0)
472 *sd->pg_chunk.p_cnt = 1;
473 else
474 *sd->pg_chunk.p_cnt += 1;
475
476 return 0;
477}
478
479static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
480{
481 if (q->pend_cred >= q->credits / 4) {
482 q->pend_cred = 0;
483 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
484 }
485}
486
487
488
489
490
491
492
493
494
495
496
497
498static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
499{
500 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
501 struct rx_desc *d = &q->desc[q->pidx];
502 unsigned int count = 0;
503
504 while (n--) {
505 dma_addr_t mapping;
506 int err;
507
508 if (q->use_pages) {
509 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
510 q->order))) {
511nomem: q->alloc_failed++;
512 break;
513 }
514 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
515 pci_unmap_addr_set(sd, dma_addr, mapping);
516
517 add_one_rx_chunk(mapping, d, q->gen);
518 pci_dma_sync_single_for_device(adap->pdev, mapping,
519 q->buf_size - SGE_PG_RSVD,
520 PCI_DMA_FROMDEVICE);
521 } else {
522 void *buf_start;
523
524 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
525 if (!skb)
526 goto nomem;
527
528 sd->skb = skb;
529 buf_start = skb->data;
530 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
531 q->gen, adap->pdev);
532 if (unlikely(err)) {
533 clear_rx_desc(adap->pdev, q, sd);
534 break;
535 }
536 }
537
538 d++;
539 sd++;
540 if (++q->pidx == q->size) {
541 q->pidx = 0;
542 q->gen ^= 1;
543 sd = q->sdesc;
544 d = q->desc;
545 }
546 count++;
547 }
548
549 q->credits += count;
550 q->pend_cred += count;
551 ring_fl_db(adap, q);
552
553 return count;
554}
555
556static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
557{
558 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
559 GFP_ATOMIC | __GFP_COMP);
560}
561
562
563
564
565
566
567
568
569
570
571static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
572 unsigned int idx)
573{
574 struct rx_desc *from = &q->desc[idx];
575 struct rx_desc *to = &q->desc[q->pidx];
576
577 q->sdesc[q->pidx] = q->sdesc[idx];
578 to->addr_lo = from->addr_lo;
579 to->addr_hi = from->addr_hi;
580 wmb();
581 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
582 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
583
584 if (++q->pidx == q->size) {
585 q->pidx = 0;
586 q->gen ^= 1;
587 }
588
589 q->credits++;
590 q->pend_cred++;
591 ring_fl_db(adap, q);
592}
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
612 size_t sw_size, dma_addr_t * phys, void *metadata)
613{
614 size_t len = nelem * elem_size;
615 void *s = NULL;
616 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
617
618 if (!p)
619 return NULL;
620 if (sw_size && metadata) {
621 s = kcalloc(nelem, sw_size, GFP_KERNEL);
622
623 if (!s) {
624 dma_free_coherent(&pdev->dev, len, p, *phys);
625 return NULL;
626 }
627 *(void **)metadata = s;
628 }
629 memset(p, 0, len);
630 return p;
631}
632
633
634
635
636
637
638
639
640
641static void t3_reset_qset(struct sge_qset *q)
642{
643 if (q->adap &&
644 !(q->adap->flags & NAPI_INIT)) {
645 memset(q, 0, sizeof(*q));
646 return;
647 }
648
649 q->adap = NULL;
650 memset(&q->rspq, 0, sizeof(q->rspq));
651 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
652 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
653 q->txq_stopped = 0;
654 q->tx_reclaim_timer.function = NULL;
655 q->rx_reclaim_timer.function = NULL;
656 q->nomem = 0;
657 napi_free_frags(&q->napi);
658}
659
660
661
662
663
664
665
666
667
668
669
670static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
671{
672 int i;
673 struct pci_dev *pdev = adapter->pdev;
674
675 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
676 if (q->fl[i].desc) {
677 spin_lock_irq(&adapter->sge.reg_lock);
678 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
679 spin_unlock_irq(&adapter->sge.reg_lock);
680 free_rx_bufs(pdev, &q->fl[i]);
681 kfree(q->fl[i].sdesc);
682 dma_free_coherent(&pdev->dev,
683 q->fl[i].size *
684 sizeof(struct rx_desc), q->fl[i].desc,
685 q->fl[i].phys_addr);
686 }
687
688 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
689 if (q->txq[i].desc) {
690 spin_lock_irq(&adapter->sge.reg_lock);
691 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
692 spin_unlock_irq(&adapter->sge.reg_lock);
693 if (q->txq[i].sdesc) {
694 free_tx_desc(adapter, &q->txq[i],
695 q->txq[i].in_use);
696 kfree(q->txq[i].sdesc);
697 }
698 dma_free_coherent(&pdev->dev,
699 q->txq[i].size *
700 sizeof(struct tx_desc),
701 q->txq[i].desc, q->txq[i].phys_addr);
702 __skb_queue_purge(&q->txq[i].sendq);
703 }
704
705 if (q->rspq.desc) {
706 spin_lock_irq(&adapter->sge.reg_lock);
707 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
708 spin_unlock_irq(&adapter->sge.reg_lock);
709 dma_free_coherent(&pdev->dev,
710 q->rspq.size * sizeof(struct rsp_desc),
711 q->rspq.desc, q->rspq.phys_addr);
712 }
713
714 t3_reset_qset(q);
715}
716
717
718
719
720
721
722
723
724static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
725{
726 qs->rspq.cntxt_id = id;
727 qs->fl[0].cntxt_id = 2 * id;
728 qs->fl[1].cntxt_id = 2 * id + 1;
729 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
730 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
731 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
732 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
733 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
734}
735
736
737
738
739
740
741
742
743static inline unsigned int sgl_len(unsigned int n)
744{
745
746 return (3 * n) / 2 + (n & 1);
747}
748
749
750
751
752
753
754
755
756static inline unsigned int flits_to_desc(unsigned int n)
757{
758 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
759 return flit_desc_map[n];
760}
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
778 unsigned int len, unsigned int drop_thres)
779{
780 struct sk_buff *skb = NULL;
781 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
782
783 prefetch(sd->skb->data);
784 fl->credits--;
785
786 if (len <= SGE_RX_COPY_THRES) {
787 skb = alloc_skb(len, GFP_ATOMIC);
788 if (likely(skb != NULL)) {
789 __skb_put(skb, len);
790 pci_dma_sync_single_for_cpu(adap->pdev,
791 pci_unmap_addr(sd, dma_addr), len,
792 PCI_DMA_FROMDEVICE);
793 memcpy(skb->data, sd->skb->data, len);
794 pci_dma_sync_single_for_device(adap->pdev,
795 pci_unmap_addr(sd, dma_addr), len,
796 PCI_DMA_FROMDEVICE);
797 } else if (!drop_thres)
798 goto use_orig_buf;
799recycle:
800 recycle_rx_buf(adap, fl, fl->cidx);
801 return skb;
802 }
803
804 if (unlikely(fl->credits < drop_thres) &&
805 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
806 GFP_ATOMIC | __GFP_COMP) == 0)
807 goto recycle;
808
809use_orig_buf:
810 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
811 fl->buf_size, PCI_DMA_FROMDEVICE);
812 skb = sd->skb;
813 skb_put(skb, len);
814 __refill_fl(adap, fl);
815 return skb;
816}
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
837 struct sge_rspq *q, unsigned int len,
838 unsigned int drop_thres)
839{
840 struct sk_buff *newskb, *skb;
841 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
842
843 dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
844
845 newskb = skb = q->pg_skb;
846 if (!skb && (len <= SGE_RX_COPY_THRES)) {
847 newskb = alloc_skb(len, GFP_ATOMIC);
848 if (likely(newskb != NULL)) {
849 __skb_put(newskb, len);
850 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
851 PCI_DMA_FROMDEVICE);
852 memcpy(newskb->data, sd->pg_chunk.va, len);
853 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
854 len,
855 PCI_DMA_FROMDEVICE);
856 } else if (!drop_thres)
857 return NULL;
858recycle:
859 fl->credits--;
860 recycle_rx_buf(adap, fl, fl->cidx);
861 q->rx_recycle_buf++;
862 return newskb;
863 }
864
865 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
866 goto recycle;
867
868 prefetch(sd->pg_chunk.p_cnt);
869
870 if (!skb)
871 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
872
873 if (unlikely(!newskb)) {
874 if (!drop_thres)
875 return NULL;
876 goto recycle;
877 }
878
879 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
880 PCI_DMA_FROMDEVICE);
881 (*sd->pg_chunk.p_cnt)--;
882 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
883 pci_unmap_page(adap->pdev,
884 sd->pg_chunk.mapping,
885 fl->alloc_size,
886 PCI_DMA_FROMDEVICE);
887 if (!skb) {
888 __skb_put(newskb, SGE_RX_PULL_LEN);
889 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
890 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
891 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
892 len - SGE_RX_PULL_LEN);
893 newskb->len = len;
894 newskb->data_len = len - SGE_RX_PULL_LEN;
895 newskb->truesize += newskb->data_len;
896 } else {
897 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
898 sd->pg_chunk.page,
899 sd->pg_chunk.offset, len);
900 newskb->len += len;
901 newskb->data_len += len;
902 newskb->truesize += len;
903 }
904
905 fl->credits--;
906
907
908
909
910 return newskb;
911}
912
913
914
915
916
917
918
919static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
920{
921 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
922
923 if (skb) {
924 __skb_put(skb, IMMED_PKT_SIZE);
925 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
926 }
927 return skb;
928}
929
930
931
932
933
934
935
936
937static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
938{
939 unsigned int flits;
940
941 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
942 return 1;
943
944 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
945 if (skb_shinfo(skb)->gso_size)
946 flits++;
947 return flits_to_desc(flits);
948}
949
950
951
952
953
954
955
956
957
958
959
960
961
962static inline unsigned int make_sgl(const struct sk_buff *skb,
963 struct sg_ent *sgp, unsigned char *start,
964 unsigned int len, struct pci_dev *pdev)
965{
966 dma_addr_t mapping;
967 unsigned int i, j = 0, nfrags;
968
969 if (len) {
970 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
971 sgp->len[0] = cpu_to_be32(len);
972 sgp->addr[0] = cpu_to_be64(mapping);
973 j = 1;
974 }
975
976 nfrags = skb_shinfo(skb)->nr_frags;
977 for (i = 0; i < nfrags; i++) {
978 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
979
980 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
981 frag->size, PCI_DMA_TODEVICE);
982 sgp->len[j] = cpu_to_be32(frag->size);
983 sgp->addr[j] = cpu_to_be64(mapping);
984 j ^= 1;
985 if (j == 0)
986 ++sgp;
987 }
988 if (j)
989 sgp->len[j] = 0;
990 return ((nfrags + (len != 0)) * 3) / 2 + j;
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1006{
1007#if USE_GTS
1008 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1009 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1010 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1011 t3_write_reg(adap, A_SG_KDOORBELL,
1012 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1013 }
1014#else
1015 wmb();
1016 t3_write_reg(adap, A_SG_KDOORBELL,
1017 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1018#endif
1019}
1020
1021static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1022{
1023#if SGE_NUM_GENBITS == 2
1024 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1025#endif
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1048 struct tx_desc *d, unsigned int pidx,
1049 const struct sge_txq *q,
1050 const struct sg_ent *sgl,
1051 unsigned int flits, unsigned int sgl_flits,
1052 unsigned int gen, __be32 wr_hi,
1053 __be32 wr_lo)
1054{
1055 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1056 struct tx_sw_desc *sd = &q->sdesc[pidx];
1057
1058 sd->skb = skb;
1059 if (need_skb_unmap()) {
1060 sd->fragidx = 0;
1061 sd->addr_idx = 0;
1062 sd->sflit = flits;
1063 }
1064
1065 if (likely(ndesc == 1)) {
1066 sd->eop = 1;
1067 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1068 V_WR_SGLSFLT(flits)) | wr_hi;
1069 wmb();
1070 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1071 V_WR_GEN(gen)) | wr_lo;
1072 wr_gen2(d, gen);
1073 } else {
1074 unsigned int ogen = gen;
1075 const u64 *fp = (const u64 *)sgl;
1076 struct work_request_hdr *wp = wrp;
1077
1078 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1079 V_WR_SGLSFLT(flits)) | wr_hi;
1080
1081 while (sgl_flits) {
1082 unsigned int avail = WR_FLITS - flits;
1083
1084 if (avail > sgl_flits)
1085 avail = sgl_flits;
1086 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1087 sgl_flits -= avail;
1088 ndesc--;
1089 if (!sgl_flits)
1090 break;
1091
1092 fp += avail;
1093 d++;
1094 sd->eop = 0;
1095 sd++;
1096 if (++pidx == q->size) {
1097 pidx = 0;
1098 gen ^= 1;
1099 d = q->desc;
1100 sd = q->sdesc;
1101 }
1102
1103 sd->skb = skb;
1104 wrp = (struct work_request_hdr *)d;
1105 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1106 V_WR_SGLSFLT(1)) | wr_hi;
1107 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1108 sgl_flits + 1)) |
1109 V_WR_GEN(gen)) | wr_lo;
1110 wr_gen2(d, gen);
1111 flits = 1;
1112 }
1113 sd->eop = 1;
1114 wrp->wr_hi |= htonl(F_WR_EOP);
1115 wmb();
1116 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1117 wr_gen2((struct tx_desc *)wp, ogen);
1118 WARN_ON(ndesc != 0);
1119 }
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1136 const struct port_info *pi,
1137 unsigned int pidx, unsigned int gen,
1138 struct sge_txq *q, unsigned int ndesc,
1139 unsigned int compl)
1140{
1141 unsigned int flits, sgl_flits, cntrl, tso_info;
1142 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1143 struct tx_desc *d = &q->desc[pidx];
1144 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1145
1146 cpl->len = htonl(skb->len);
1147 cntrl = V_TXPKT_INTF(pi->port_id);
1148
1149 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1150 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1151
1152 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1153 if (tso_info) {
1154 int eth_type;
1155 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1156
1157 d->flit[2] = 0;
1158 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1159 hdr->cntrl = htonl(cntrl);
1160 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1161 CPL_ETH_II : CPL_ETH_II_VLAN;
1162 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1163 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1164 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1165 hdr->lso_info = htonl(tso_info);
1166 flits = 3;
1167 } else {
1168 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1169 cntrl |= F_TXPKT_IPCSUM_DIS;
1170 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1171 cpl->cntrl = htonl(cntrl);
1172
1173 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1174 q->sdesc[pidx].skb = NULL;
1175 if (!skb->data_len)
1176 skb_copy_from_linear_data(skb, &d->flit[2],
1177 skb->len);
1178 else
1179 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1180
1181 flits = (skb->len + 7) / 8 + 2;
1182 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1183 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1184 | F_WR_SOP | F_WR_EOP | compl);
1185 wmb();
1186 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1187 V_WR_TID(q->token));
1188 wr_gen2(d, gen);
1189 kfree_skb(skb);
1190 return;
1191 }
1192
1193 flits = 2;
1194 }
1195
1196 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1197 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1198
1199 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1200 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1201 htonl(V_WR_TID(q->token)));
1202}
1203
1204static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1205 struct sge_qset *qs, struct sge_txq *q)
1206{
1207 netif_tx_stop_queue(txq);
1208 set_bit(TXQ_ETH, &qs->txq_stopped);
1209 q->stops++;
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1220{
1221 int qidx;
1222 unsigned int ndesc, pidx, credits, gen, compl;
1223 const struct port_info *pi = netdev_priv(dev);
1224 struct adapter *adap = pi->adapter;
1225 struct netdev_queue *txq;
1226 struct sge_qset *qs;
1227 struct sge_txq *q;
1228
1229
1230
1231
1232
1233 if (unlikely(skb->len < ETH_HLEN)) {
1234 dev_kfree_skb(skb);
1235 return NETDEV_TX_OK;
1236 }
1237
1238 qidx = skb_get_queue_mapping(skb);
1239 qs = &pi->qs[qidx];
1240 q = &qs->txq[TXQ_ETH];
1241 txq = netdev_get_tx_queue(dev, qidx);
1242
1243 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1244
1245 credits = q->size - q->in_use;
1246 ndesc = calc_tx_descs(skb);
1247
1248 if (unlikely(credits < ndesc)) {
1249 t3_stop_tx_queue(txq, qs, q);
1250 dev_err(&adap->pdev->dev,
1251 "%s: Tx ring %u full while queue awake!\n",
1252 dev->name, q->cntxt_id & 7);
1253 return NETDEV_TX_BUSY;
1254 }
1255
1256 q->in_use += ndesc;
1257 if (unlikely(credits - ndesc < q->stop_thres)) {
1258 t3_stop_tx_queue(txq, qs, q);
1259
1260 if (should_restart_tx(q) &&
1261 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1262 q->restarts++;
1263 netif_tx_wake_queue(txq);
1264 }
1265 }
1266
1267 gen = q->gen;
1268 q->unacked += ndesc;
1269 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1270 q->unacked &= 7;
1271 pidx = q->pidx;
1272 q->pidx += ndesc;
1273 if (q->pidx >= q->size) {
1274 q->pidx -= q->size;
1275 q->gen ^= 1;
1276 }
1277
1278
1279 if (skb->ip_summed == CHECKSUM_COMPLETE)
1280 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1281 if (skb_shinfo(skb)->gso_size)
1282 qs->port_stats[SGE_PSTAT_TSO]++;
1283 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1284 qs->port_stats[SGE_PSTAT_VLANINS]++;
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310 if (likely(!skb_shared(skb)))
1311 skb_orphan(skb);
1312
1313 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1314 check_ring_tx_db(adap, q);
1315 return NETDEV_TX_OK;
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1331 unsigned int len, unsigned int gen)
1332{
1333 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1334 struct work_request_hdr *to = (struct work_request_hdr *)d;
1335
1336 if (likely(!skb->data_len))
1337 memcpy(&to[1], &from[1], len - sizeof(*from));
1338 else
1339 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1340
1341 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1342 V_WR_BCNTLFLT(len & 7));
1343 wmb();
1344 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1345 V_WR_LEN((len + 7) / 8));
1346 wr_gen2(d, gen);
1347 kfree_skb(skb);
1348}
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1369 struct sk_buff *skb, unsigned int ndesc,
1370 unsigned int qid)
1371{
1372 if (unlikely(!skb_queue_empty(&q->sendq))) {
1373 addq_exit:__skb_queue_tail(&q->sendq, skb);
1374 return 1;
1375 }
1376 if (unlikely(q->size - q->in_use < ndesc)) {
1377 struct sge_qset *qs = txq_to_qset(q, qid);
1378
1379 set_bit(qid, &qs->txq_stopped);
1380 smp_mb__after_clear_bit();
1381
1382 if (should_restart_tx(q) &&
1383 test_and_clear_bit(qid, &qs->txq_stopped))
1384 return 2;
1385
1386 q->stops++;
1387 goto addq_exit;
1388 }
1389 return 0;
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1401{
1402 unsigned int reclaim = q->processed - q->cleaned;
1403
1404 q->in_use -= reclaim;
1405 q->cleaned += reclaim;
1406}
1407
1408static inline int immediate(const struct sk_buff *skb)
1409{
1410 return skb->len <= WR_LEN;
1411}
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1424 struct sk_buff *skb)
1425{
1426 int ret;
1427 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1428
1429 if (unlikely(!immediate(skb))) {
1430 WARN_ON(1);
1431 dev_kfree_skb(skb);
1432 return NET_XMIT_SUCCESS;
1433 }
1434
1435 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1436 wrp->wr_lo = htonl(V_WR_TID(q->token));
1437
1438 spin_lock(&q->lock);
1439 again:reclaim_completed_tx_imm(q);
1440
1441 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1442 if (unlikely(ret)) {
1443 if (ret == 1) {
1444 spin_unlock(&q->lock);
1445 return NET_XMIT_CN;
1446 }
1447 goto again;
1448 }
1449
1450 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1451
1452 q->in_use++;
1453 if (++q->pidx >= q->size) {
1454 q->pidx = 0;
1455 q->gen ^= 1;
1456 }
1457 spin_unlock(&q->lock);
1458 wmb();
1459 t3_write_reg(adap, A_SG_KDOORBELL,
1460 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1461 return NET_XMIT_SUCCESS;
1462}
1463
1464
1465
1466
1467
1468
1469
1470static void restart_ctrlq(unsigned long data)
1471{
1472 struct sk_buff *skb;
1473 struct sge_qset *qs = (struct sge_qset *)data;
1474 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1475
1476 spin_lock(&q->lock);
1477 again:reclaim_completed_tx_imm(q);
1478
1479 while (q->in_use < q->size &&
1480 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1481
1482 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1483
1484 if (++q->pidx >= q->size) {
1485 q->pidx = 0;
1486 q->gen ^= 1;
1487 }
1488 q->in_use++;
1489 }
1490
1491 if (!skb_queue_empty(&q->sendq)) {
1492 set_bit(TXQ_CTRL, &qs->txq_stopped);
1493 smp_mb__after_clear_bit();
1494
1495 if (should_restart_tx(q) &&
1496 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1497 goto again;
1498 q->stops++;
1499 }
1500
1501 spin_unlock(&q->lock);
1502 wmb();
1503 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1504 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1505}
1506
1507
1508
1509
1510int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1511{
1512 int ret;
1513 local_bh_disable();
1514 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1515 local_bh_enable();
1516
1517 return ret;
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528static void deferred_unmap_destructor(struct sk_buff *skb)
1529{
1530 int i;
1531 const dma_addr_t *p;
1532 const struct skb_shared_info *si;
1533 const struct deferred_unmap_info *dui;
1534
1535 dui = (struct deferred_unmap_info *)skb->head;
1536 p = dui->addr;
1537
1538 if (skb->tail - skb->transport_header)
1539 pci_unmap_single(dui->pdev, *p++,
1540 skb->tail - skb->transport_header,
1541 PCI_DMA_TODEVICE);
1542
1543 si = skb_shinfo(skb);
1544 for (i = 0; i < si->nr_frags; i++)
1545 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1546 PCI_DMA_TODEVICE);
1547}
1548
1549static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1550 const struct sg_ent *sgl, int sgl_flits)
1551{
1552 dma_addr_t *p;
1553 struct deferred_unmap_info *dui;
1554
1555 dui = (struct deferred_unmap_info *)skb->head;
1556 dui->pdev = pdev;
1557 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1558 *p++ = be64_to_cpu(sgl->addr[0]);
1559 *p++ = be64_to_cpu(sgl->addr[1]);
1560 }
1561 if (sgl_flits)
1562 *p = be64_to_cpu(sgl->addr[0]);
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1578 struct sge_txq *q, unsigned int pidx,
1579 unsigned int gen, unsigned int ndesc)
1580{
1581 unsigned int sgl_flits, flits;
1582 struct work_request_hdr *from;
1583 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1584 struct tx_desc *d = &q->desc[pidx];
1585
1586 if (immediate(skb)) {
1587 q->sdesc[pidx].skb = NULL;
1588 write_imm(d, skb, skb->len, gen);
1589 return;
1590 }
1591
1592
1593
1594 from = (struct work_request_hdr *)skb->data;
1595 memcpy(&d->flit[1], &from[1],
1596 skb_transport_offset(skb) - sizeof(*from));
1597
1598 flits = skb_transport_offset(skb) / 8;
1599 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1600 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1601 skb->tail - skb->transport_header,
1602 adap->pdev);
1603 if (need_skb_unmap()) {
1604 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1605 skb->destructor = deferred_unmap_destructor;
1606 }
1607
1608 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1609 gen, from->wr_hi, from->wr_lo);
1610}
1611
1612
1613
1614
1615
1616
1617
1618
1619static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1620{
1621 unsigned int flits, cnt;
1622
1623 if (skb->len <= WR_LEN)
1624 return 1;
1625
1626 flits = skb_transport_offset(skb) / 8;
1627 cnt = skb_shinfo(skb)->nr_frags;
1628 if (skb->tail != skb->transport_header)
1629 cnt++;
1630 return flits_to_desc(flits + sgl_len(cnt));
1631}
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1642 struct sk_buff *skb)
1643{
1644 int ret;
1645 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1646
1647 spin_lock(&q->lock);
1648again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1649
1650 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1651 if (unlikely(ret)) {
1652 if (ret == 1) {
1653 skb->priority = ndesc;
1654 spin_unlock(&q->lock);
1655 return NET_XMIT_CN;
1656 }
1657 goto again;
1658 }
1659
1660 gen = q->gen;
1661 q->in_use += ndesc;
1662 pidx = q->pidx;
1663 q->pidx += ndesc;
1664 if (q->pidx >= q->size) {
1665 q->pidx -= q->size;
1666 q->gen ^= 1;
1667 }
1668 spin_unlock(&q->lock);
1669
1670 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1671 check_ring_tx_db(adap, q);
1672 return NET_XMIT_SUCCESS;
1673}
1674
1675
1676
1677
1678
1679
1680
1681static void restart_offloadq(unsigned long data)
1682{
1683 struct sk_buff *skb;
1684 struct sge_qset *qs = (struct sge_qset *)data;
1685 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1686 const struct port_info *pi = netdev_priv(qs->netdev);
1687 struct adapter *adap = pi->adapter;
1688
1689 spin_lock(&q->lock);
1690again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1691
1692 while ((skb = skb_peek(&q->sendq)) != NULL) {
1693 unsigned int gen, pidx;
1694 unsigned int ndesc = skb->priority;
1695
1696 if (unlikely(q->size - q->in_use < ndesc)) {
1697 set_bit(TXQ_OFLD, &qs->txq_stopped);
1698 smp_mb__after_clear_bit();
1699
1700 if (should_restart_tx(q) &&
1701 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1702 goto again;
1703 q->stops++;
1704 break;
1705 }
1706
1707 gen = q->gen;
1708 q->in_use += ndesc;
1709 pidx = q->pidx;
1710 q->pidx += ndesc;
1711 if (q->pidx >= q->size) {
1712 q->pidx -= q->size;
1713 q->gen ^= 1;
1714 }
1715 __skb_unlink(skb, &q->sendq);
1716 spin_unlock(&q->lock);
1717
1718 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1719 spin_lock(&q->lock);
1720 }
1721 spin_unlock(&q->lock);
1722
1723#if USE_GTS
1724 set_bit(TXQ_RUNNING, &q->flags);
1725 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1726#endif
1727 wmb();
1728 t3_write_reg(adap, A_SG_KDOORBELL,
1729 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1730}
1731
1732
1733
1734
1735
1736
1737
1738
1739static inline int queue_set(const struct sk_buff *skb)
1740{
1741 return skb->priority >> 1;
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751static inline int is_ctrl_pkt(const struct sk_buff *skb)
1752{
1753 return skb->priority & 1;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1766{
1767 struct adapter *adap = tdev2adap(tdev);
1768 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1769
1770 if (unlikely(is_ctrl_pkt(skb)))
1771 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1772
1773 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1774}
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1786{
1787 int was_empty = skb_queue_empty(&q->rx_queue);
1788
1789 __skb_queue_tail(&q->rx_queue, skb);
1790
1791 if (was_empty) {
1792 struct sge_qset *qs = rspq_to_qset(q);
1793
1794 napi_schedule(&qs->napi);
1795 }
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807static inline void deliver_partial_bundle(struct t3cdev *tdev,
1808 struct sge_rspq *q,
1809 struct sk_buff *skbs[], int n)
1810{
1811 if (n) {
1812 q->offload_bundles++;
1813 tdev->recv(tdev, skbs, n);
1814 }
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828static int ofld_poll(struct napi_struct *napi, int budget)
1829{
1830 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1831 struct sge_rspq *q = &qs->rspq;
1832 struct adapter *adapter = qs->adap;
1833 int work_done = 0;
1834
1835 while (work_done < budget) {
1836 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1837 struct sk_buff_head queue;
1838 int ngathered;
1839
1840 spin_lock_irq(&q->lock);
1841 __skb_queue_head_init(&queue);
1842 skb_queue_splice_init(&q->rx_queue, &queue);
1843 if (skb_queue_empty(&queue)) {
1844 napi_complete(napi);
1845 spin_unlock_irq(&q->lock);
1846 return work_done;
1847 }
1848 spin_unlock_irq(&q->lock);
1849
1850 ngathered = 0;
1851 skb_queue_walk_safe(&queue, skb, tmp) {
1852 if (work_done >= budget)
1853 break;
1854 work_done++;
1855
1856 __skb_unlink(skb, &queue);
1857 prefetch(skb->data);
1858 skbs[ngathered] = skb;
1859 if (++ngathered == RX_BUNDLE_SIZE) {
1860 q->offload_bundles++;
1861 adapter->tdev.recv(&adapter->tdev, skbs,
1862 ngathered);
1863 ngathered = 0;
1864 }
1865 }
1866 if (!skb_queue_empty(&queue)) {
1867
1868 spin_lock_irq(&q->lock);
1869 skb_queue_splice(&queue, &q->rx_queue);
1870 spin_unlock_irq(&q->lock);
1871 }
1872 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1873 }
1874
1875 return work_done;
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1890 struct sk_buff *skb, struct sk_buff *rx_gather[],
1891 unsigned int gather_idx)
1892{
1893 skb_reset_mac_header(skb);
1894 skb_reset_network_header(skb);
1895 skb_reset_transport_header(skb);
1896
1897 if (rq->polling) {
1898 rx_gather[gather_idx++] = skb;
1899 if (gather_idx == RX_BUNDLE_SIZE) {
1900 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1901 gather_idx = 0;
1902 rq->offload_bundles++;
1903 }
1904 } else
1905 offload_enqueue(rq, skb);
1906
1907 return gather_idx;
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917static void restart_tx(struct sge_qset *qs)
1918{
1919 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1920 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1921 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1922 qs->txq[TXQ_ETH].restarts++;
1923 if (netif_running(qs->netdev))
1924 netif_tx_wake_queue(qs->tx_q);
1925 }
1926
1927 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1928 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1929 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1930 qs->txq[TXQ_OFLD].restarts++;
1931 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1932 }
1933 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1934 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1935 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1936 qs->txq[TXQ_CTRL].restarts++;
1937 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1938 }
1939}
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
1950{
1951 struct net_device *dev = skb->dev;
1952 struct port_info *pi;
1953 struct arphdr *arp;
1954 unsigned char *arp_ptr;
1955 unsigned char *sha;
1956 __be32 sip, tip;
1957
1958 if (!dev)
1959 return;
1960
1961 skb_reset_network_header(skb);
1962 arp = arp_hdr(skb);
1963
1964 if (arp->ar_op != htons(ARPOP_REQUEST))
1965 return;
1966
1967 arp_ptr = (unsigned char *)(arp + 1);
1968 sha = arp_ptr;
1969 arp_ptr += dev->addr_len;
1970 memcpy(&sip, arp_ptr, sizeof(sip));
1971 arp_ptr += sizeof(sip);
1972 arp_ptr += dev->addr_len;
1973 memcpy(&tip, arp_ptr, sizeof(tip));
1974
1975 pi = netdev_priv(dev);
1976 if (tip != pi->iscsi_ipv4addr)
1977 return;
1978
1979 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1980 dev->dev_addr, sha);
1981
1982}
1983
1984static inline int is_arp(struct sk_buff *skb)
1985{
1986 return skb->protocol == htons(ETH_P_ARP);
1987}
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2001 struct sk_buff *skb, int pad, int lro)
2002{
2003 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2004 struct sge_qset *qs = rspq_to_qset(rq);
2005 struct port_info *pi;
2006
2007 skb_pull(skb, sizeof(*p) + pad);
2008 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2009 pi = netdev_priv(skb->dev);
2010 if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid &&
2011 p->csum == htons(0xffff) && !p->fragment) {
2012 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2013 skb->ip_summed = CHECKSUM_UNNECESSARY;
2014 } else
2015 skb->ip_summed = CHECKSUM_NONE;
2016 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2017
2018 if (unlikely(p->vlan_valid)) {
2019 struct vlan_group *grp = pi->vlan_grp;
2020
2021 qs->port_stats[SGE_PSTAT_VLANEX]++;
2022 if (likely(grp))
2023 if (lro)
2024 vlan_gro_receive(&qs->napi, grp,
2025 ntohs(p->vlan), skb);
2026 else {
2027 if (unlikely(pi->iscsi_ipv4addr &&
2028 is_arp(skb))) {
2029 unsigned short vtag = ntohs(p->vlan) &
2030 VLAN_VID_MASK;
2031 skb->dev = vlan_group_get_device(grp,
2032 vtag);
2033 cxgb3_arp_process(adap, skb);
2034 }
2035 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
2036 rq->polling);
2037 }
2038 else
2039 dev_kfree_skb_any(skb);
2040 } else if (rq->polling) {
2041 if (lro)
2042 napi_gro_receive(&qs->napi, skb);
2043 else {
2044 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
2045 cxgb3_arp_process(adap, skb);
2046 netif_receive_skb(skb);
2047 }
2048 } else
2049 netif_rx(skb);
2050}
2051
2052static inline int is_eth_tcp(u32 rss)
2053{
2054 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2055}
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2069 struct sge_fl *fl, int len, int complete)
2070{
2071 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2072 struct sk_buff *skb = NULL;
2073 struct cpl_rx_pkt *cpl;
2074 struct skb_frag_struct *rx_frag;
2075 int nr_frags;
2076 int offset = 0;
2077
2078 if (!qs->nomem) {
2079 skb = napi_get_frags(&qs->napi);
2080 qs->nomem = !skb;
2081 }
2082
2083 fl->credits--;
2084
2085 pci_dma_sync_single_for_cpu(adap->pdev,
2086 pci_unmap_addr(sd, dma_addr),
2087 fl->buf_size - SGE_PG_RSVD,
2088 PCI_DMA_FROMDEVICE);
2089
2090 (*sd->pg_chunk.p_cnt)--;
2091 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2092 pci_unmap_page(adap->pdev,
2093 sd->pg_chunk.mapping,
2094 fl->alloc_size,
2095 PCI_DMA_FROMDEVICE);
2096
2097 if (!skb) {
2098 put_page(sd->pg_chunk.page);
2099 if (complete)
2100 qs->nomem = 0;
2101 return;
2102 }
2103
2104 rx_frag = skb_shinfo(skb)->frags;
2105 nr_frags = skb_shinfo(skb)->nr_frags;
2106
2107 if (!nr_frags) {
2108 offset = 2 + sizeof(struct cpl_rx_pkt);
2109 qs->lro_va = sd->pg_chunk.va + 2;
2110 }
2111 len -= offset;
2112
2113 prefetch(qs->lro_va);
2114
2115 rx_frag += nr_frags;
2116 rx_frag->page = sd->pg_chunk.page;
2117 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2118 rx_frag->size = len;
2119
2120 skb->len += len;
2121 skb->data_len += len;
2122 skb->truesize += len;
2123 skb_shinfo(skb)->nr_frags++;
2124
2125 if (!complete)
2126 return;
2127
2128 skb->ip_summed = CHECKSUM_UNNECESSARY;
2129 cpl = qs->lro_va;
2130
2131 if (unlikely(cpl->vlan_valid)) {
2132 struct net_device *dev = qs->netdev;
2133 struct port_info *pi = netdev_priv(dev);
2134 struct vlan_group *grp = pi->vlan_grp;
2135
2136 if (likely(grp != NULL)) {
2137 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan));
2138 return;
2139 }
2140 }
2141 napi_gro_frags(&qs->napi);
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2154{
2155 unsigned int credits;
2156
2157#if USE_GTS
2158 if (flags & F_RSPD_TXQ0_GTS)
2159 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2160#endif
2161
2162 credits = G_RSPD_TXQ0_CR(flags);
2163 if (credits)
2164 qs->txq[TXQ_ETH].processed += credits;
2165
2166 credits = G_RSPD_TXQ2_CR(flags);
2167 if (credits)
2168 qs->txq[TXQ_CTRL].processed += credits;
2169
2170# if USE_GTS
2171 if (flags & F_RSPD_TXQ1_GTS)
2172 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2173# endif
2174 credits = G_RSPD_TXQ1_CR(flags);
2175 if (credits)
2176 qs->txq[TXQ_OFLD].processed += credits;
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2190 unsigned int sleeping)
2191{
2192 if (sleeping & F_RSPD_TXQ0_GTS) {
2193 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2194
2195 if (txq->cleaned + txq->in_use != txq->processed &&
2196 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2197 set_bit(TXQ_RUNNING, &txq->flags);
2198 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2199 V_EGRCNTX(txq->cntxt_id));
2200 }
2201 }
2202
2203 if (sleeping & F_RSPD_TXQ1_GTS) {
2204 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2205
2206 if (txq->cleaned + txq->in_use != txq->processed &&
2207 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2208 set_bit(TXQ_RUNNING, &txq->flags);
2209 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2210 V_EGRCNTX(txq->cntxt_id));
2211 }
2212 }
2213}
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223static inline int is_new_response(const struct rsp_desc *r,
2224 const struct sge_rspq *q)
2225{
2226 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2227}
2228
2229static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2230{
2231 q->pg_skb = NULL;
2232 q->rx_recycle_buf = 0;
2233}
2234
2235#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2236#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2237 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2238 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2239 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2240
2241
2242#define NOMEM_INTR_DELAY 2500
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259static int process_responses(struct adapter *adap, struct sge_qset *qs,
2260 int budget)
2261{
2262 struct sge_rspq *q = &qs->rspq;
2263 struct rsp_desc *r = &q->desc[q->cidx];
2264 int budget_left = budget;
2265 unsigned int sleeping = 0;
2266 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2267 int ngathered = 0;
2268
2269 q->next_holdoff = q->holdoff_tmr;
2270
2271 while (likely(budget_left && is_new_response(r, q))) {
2272 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
2273 struct sk_buff *skb = NULL;
2274 u32 len, flags = ntohl(r->flags);
2275 __be32 rss_hi = *(const __be32 *)r,
2276 rss_lo = r->rss_hdr.rss_hash_val;
2277
2278 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2279
2280 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2281 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2282 if (!skb)
2283 goto no_mem;
2284
2285 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2286 skb->data[0] = CPL_ASYNC_NOTIF;
2287 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2288 q->async_notif++;
2289 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2290 skb = get_imm_packet(r);
2291 if (unlikely(!skb)) {
2292no_mem:
2293 q->next_holdoff = NOMEM_INTR_DELAY;
2294 q->nomem++;
2295
2296 budget_left--;
2297 break;
2298 }
2299 q->imm_data++;
2300 ethpad = 0;
2301 } else if ((len = ntohl(r->len_cq)) != 0) {
2302 struct sge_fl *fl;
2303
2304 lro &= eth && is_eth_tcp(rss_hi);
2305
2306 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2307 if (fl->use_pages) {
2308 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2309
2310 prefetch(addr);
2311#if L1_CACHE_BYTES < 128
2312 prefetch(addr + L1_CACHE_BYTES);
2313#endif
2314 __refill_fl(adap, fl);
2315 if (lro > 0) {
2316 lro_add_page(adap, qs, fl,
2317 G_RSPD_LEN(len),
2318 flags & F_RSPD_EOP);
2319 goto next_fl;
2320 }
2321
2322 skb = get_packet_pg(adap, fl, q,
2323 G_RSPD_LEN(len),
2324 eth ?
2325 SGE_RX_DROP_THRES : 0);
2326 q->pg_skb = skb;
2327 } else
2328 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2329 eth ? SGE_RX_DROP_THRES : 0);
2330 if (unlikely(!skb)) {
2331 if (!eth)
2332 goto no_mem;
2333 q->rx_drops++;
2334 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2335 __skb_pull(skb, 2);
2336next_fl:
2337 if (++fl->cidx == fl->size)
2338 fl->cidx = 0;
2339 } else
2340 q->pure_rsps++;
2341
2342 if (flags & RSPD_CTRL_MASK) {
2343 sleeping |= flags & RSPD_GTS_MASK;
2344 handle_rsp_cntrl_info(qs, flags);
2345 }
2346
2347 r++;
2348 if (unlikely(++q->cidx == q->size)) {
2349 q->cidx = 0;
2350 q->gen ^= 1;
2351 r = q->desc;
2352 }
2353 prefetch(r);
2354
2355 if (++q->credits >= (q->size / 4)) {
2356 refill_rspq(adap, q, q->credits);
2357 q->credits = 0;
2358 }
2359
2360 packet_complete = flags &
2361 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2362 F_RSPD_ASYNC_NOTIF);
2363
2364 if (skb != NULL && packet_complete) {
2365 if (eth)
2366 rx_eth(adap, q, skb, ethpad, lro);
2367 else {
2368 q->offload_pkts++;
2369
2370 skb->csum = rss_hi;
2371 skb->priority = rss_lo;
2372 ngathered = rx_offload(&adap->tdev, q, skb,
2373 offload_skbs,
2374 ngathered);
2375 }
2376
2377 if (flags & F_RSPD_EOP)
2378 clear_rspq_bufstate(q);
2379 }
2380 --budget_left;
2381 }
2382
2383 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2384
2385 if (sleeping)
2386 check_ring_db(adap, qs, sleeping);
2387
2388 smp_mb();
2389 if (unlikely(qs->txq_stopped != 0))
2390 restart_tx(qs);
2391
2392 budget -= budget_left;
2393 return budget;
2394}
2395
2396static inline int is_pure_response(const struct rsp_desc *r)
2397{
2398 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2399
2400 return (n | r->len_cq) == 0;
2401}
2402
2403
2404
2405
2406
2407
2408
2409
2410static int napi_rx_handler(struct napi_struct *napi, int budget)
2411{
2412 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2413 struct adapter *adap = qs->adap;
2414 int work_done = process_responses(adap, qs, budget);
2415
2416 if (likely(work_done < budget)) {
2417 napi_complete(napi);
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2434 V_NEWTIMER(qs->rspq.next_holdoff) |
2435 V_NEWINDEX(qs->rspq.cidx));
2436 }
2437 return work_done;
2438}
2439
2440
2441
2442
2443static inline int napi_is_scheduled(struct napi_struct *napi)
2444{
2445 return test_bit(NAPI_STATE_SCHED, &napi->state);
2446}
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2463 struct rsp_desc *r)
2464{
2465 struct sge_rspq *q = &qs->rspq;
2466 unsigned int sleeping = 0;
2467
2468 do {
2469 u32 flags = ntohl(r->flags);
2470
2471 r++;
2472 if (unlikely(++q->cidx == q->size)) {
2473 q->cidx = 0;
2474 q->gen ^= 1;
2475 r = q->desc;
2476 }
2477 prefetch(r);
2478
2479 if (flags & RSPD_CTRL_MASK) {
2480 sleeping |= flags & RSPD_GTS_MASK;
2481 handle_rsp_cntrl_info(qs, flags);
2482 }
2483
2484 q->pure_rsps++;
2485 if (++q->credits >= (q->size / 4)) {
2486 refill_rspq(adap, q, q->credits);
2487 q->credits = 0;
2488 }
2489 } while (is_new_response(r, q) && is_pure_response(r));
2490
2491 if (sleeping)
2492 check_ring_db(adap, qs, sleeping);
2493
2494 smp_mb();
2495 if (unlikely(qs->txq_stopped != 0))
2496 restart_tx(qs);
2497
2498 return is_new_response(r, q);
2499}
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2517{
2518 struct sge_qset *qs = rspq_to_qset(q);
2519 struct rsp_desc *r = &q->desc[q->cidx];
2520
2521 if (!is_new_response(r, q))
2522 return -1;
2523 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2524 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2525 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2526 return 0;
2527 }
2528 napi_schedule(&qs->napi);
2529 return 1;
2530}
2531
2532
2533
2534
2535
2536irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2537{
2538 struct sge_qset *qs = cookie;
2539 struct adapter *adap = qs->adap;
2540 struct sge_rspq *q = &qs->rspq;
2541
2542 spin_lock(&q->lock);
2543 if (process_responses(adap, qs, -1) == 0)
2544 q->unhandled_irqs++;
2545 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2546 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2547 spin_unlock(&q->lock);
2548 return IRQ_HANDLED;
2549}
2550
2551
2552
2553
2554
2555static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2556{
2557 struct sge_qset *qs = cookie;
2558 struct sge_rspq *q = &qs->rspq;
2559
2560 spin_lock(&q->lock);
2561
2562 if (handle_responses(qs->adap, q) < 0)
2563 q->unhandled_irqs++;
2564 spin_unlock(&q->lock);
2565 return IRQ_HANDLED;
2566}
2567
2568
2569
2570
2571
2572
2573
2574static irqreturn_t t3_intr_msi(int irq, void *cookie)
2575{
2576 int new_packets = 0;
2577 struct adapter *adap = cookie;
2578 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2579
2580 spin_lock(&q->lock);
2581
2582 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2583 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2584 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2585 new_packets = 1;
2586 }
2587
2588 if (adap->params.nports == 2 &&
2589 process_responses(adap, &adap->sge.qs[1], -1)) {
2590 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2591
2592 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2593 V_NEWTIMER(q1->next_holdoff) |
2594 V_NEWINDEX(q1->cidx));
2595 new_packets = 1;
2596 }
2597
2598 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2599 q->unhandled_irqs++;
2600
2601 spin_unlock(&q->lock);
2602 return IRQ_HANDLED;
2603}
2604
2605static int rspq_check_napi(struct sge_qset *qs)
2606{
2607 struct sge_rspq *q = &qs->rspq;
2608
2609 if (!napi_is_scheduled(&qs->napi) &&
2610 is_new_response(&q->desc[q->cidx], q)) {
2611 napi_schedule(&qs->napi);
2612 return 1;
2613 }
2614 return 0;
2615}
2616
2617
2618
2619
2620
2621
2622
2623
2624static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2625{
2626 int new_packets;
2627 struct adapter *adap = cookie;
2628 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2629
2630 spin_lock(&q->lock);
2631
2632 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2633 if (adap->params.nports == 2)
2634 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2635 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2636 q->unhandled_irqs++;
2637
2638 spin_unlock(&q->lock);
2639 return IRQ_HANDLED;
2640}
2641
2642
2643
2644
2645static inline int process_responses_gts(struct adapter *adap,
2646 struct sge_rspq *rq)
2647{
2648 int work;
2649
2650 work = process_responses(adap, rspq_to_qset(rq), -1);
2651 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2652 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2653 return work;
2654}
2655
2656
2657
2658
2659
2660
2661
2662static irqreturn_t t3_intr(int irq, void *cookie)
2663{
2664 int work_done, w0, w1;
2665 struct adapter *adap = cookie;
2666 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2667 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2668
2669 spin_lock(&q0->lock);
2670
2671 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2672 w1 = adap->params.nports == 2 &&
2673 is_new_response(&q1->desc[q1->cidx], q1);
2674
2675 if (likely(w0 | w1)) {
2676 t3_write_reg(adap, A_PL_CLI, 0);
2677 t3_read_reg(adap, A_PL_CLI);
2678
2679 if (likely(w0))
2680 process_responses_gts(adap, q0);
2681
2682 if (w1)
2683 process_responses_gts(adap, q1);
2684
2685 work_done = w0 | w1;
2686 } else
2687 work_done = t3_slow_intr_handler(adap);
2688
2689 spin_unlock(&q0->lock);
2690 return IRQ_RETVAL(work_done != 0);
2691}
2692
2693
2694
2695
2696
2697
2698
2699
2700static irqreturn_t t3b_intr(int irq, void *cookie)
2701{
2702 u32 map;
2703 struct adapter *adap = cookie;
2704 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2705
2706 t3_write_reg(adap, A_PL_CLI, 0);
2707 map = t3_read_reg(adap, A_SG_DATA_INTR);
2708
2709 if (unlikely(!map))
2710 return IRQ_NONE;
2711
2712 spin_lock(&q0->lock);
2713
2714 if (unlikely(map & F_ERRINTR))
2715 t3_slow_intr_handler(adap);
2716
2717 if (likely(map & 1))
2718 process_responses_gts(adap, q0);
2719
2720 if (map & 2)
2721 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2722
2723 spin_unlock(&q0->lock);
2724 return IRQ_HANDLED;
2725}
2726
2727
2728
2729
2730
2731
2732
2733
2734static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2735{
2736 u32 map;
2737 struct adapter *adap = cookie;
2738 struct sge_qset *qs0 = &adap->sge.qs[0];
2739 struct sge_rspq *q0 = &qs0->rspq;
2740
2741 t3_write_reg(adap, A_PL_CLI, 0);
2742 map = t3_read_reg(adap, A_SG_DATA_INTR);
2743
2744 if (unlikely(!map))
2745 return IRQ_NONE;
2746
2747 spin_lock(&q0->lock);
2748
2749 if (unlikely(map & F_ERRINTR))
2750 t3_slow_intr_handler(adap);
2751
2752 if (likely(map & 1))
2753 napi_schedule(&qs0->napi);
2754
2755 if (map & 2)
2756 napi_schedule(&adap->sge.qs[1].napi);
2757
2758 spin_unlock(&q0->lock);
2759 return IRQ_HANDLED;
2760}
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2772{
2773 if (adap->flags & USING_MSIX)
2774 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2775 if (adap->flags & USING_MSI)
2776 return polling ? t3_intr_msi_napi : t3_intr_msi;
2777 if (adap->params.rev > 0)
2778 return polling ? t3b_intr_napi : t3b_intr;
2779 return t3_intr;
2780}
2781
2782#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2783 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2784 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2785 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2786 F_HIRCQPARITYERROR)
2787#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2788#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2789 F_RSPQDISABLED)
2790
2791
2792
2793
2794
2795
2796
2797void t3_sge_err_intr_handler(struct adapter *adapter)
2798{
2799 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2800 ~F_FLEMPTY;
2801
2802 if (status & SGE_PARERR)
2803 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2804 status & SGE_PARERR);
2805 if (status & SGE_FRAMINGERR)
2806 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2807 status & SGE_FRAMINGERR);
2808
2809 if (status & F_RSPQCREDITOVERFOW)
2810 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2811
2812 if (status & F_RSPQDISABLED) {
2813 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2814
2815 CH_ALERT(adapter,
2816 "packet delivered to disabled response queue "
2817 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2818 }
2819
2820 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2821 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2822 status & F_HIPIODRBDROPERR ? "high" : "lo");
2823
2824 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2825 if (status & SGE_FATALERR)
2826 t3_fatal_err(adapter);
2827}
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847static void sge_timer_tx(unsigned long data)
2848{
2849 struct sge_qset *qs = (struct sge_qset *)data;
2850 struct port_info *pi = netdev_priv(qs->netdev);
2851 struct adapter *adap = pi->adapter;
2852 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2853 unsigned long next_period;
2854
2855 if (__netif_tx_trylock(qs->tx_q)) {
2856 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2857 TX_RECLAIM_TIMER_CHUNK);
2858 __netif_tx_unlock(qs->tx_q);
2859 }
2860
2861 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2862 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2863 TX_RECLAIM_TIMER_CHUNK);
2864 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2865 }
2866
2867 next_period = TX_RECLAIM_PERIOD >>
2868 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2869 TX_RECLAIM_TIMER_CHUNK);
2870 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2871}
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887static void sge_timer_rx(unsigned long data)
2888{
2889 spinlock_t *lock;
2890 struct sge_qset *qs = (struct sge_qset *)data;
2891 struct port_info *pi = netdev_priv(qs->netdev);
2892 struct adapter *adap = pi->adapter;
2893 u32 status;
2894
2895 lock = adap->params.rev > 0 ?
2896 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2897
2898 if (!spin_trylock_irq(lock))
2899 goto out;
2900
2901 if (napi_is_scheduled(&qs->napi))
2902 goto unlock;
2903
2904 if (adap->params.rev < 4) {
2905 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2906
2907 if (status & (1 << qs->rspq.cntxt_id)) {
2908 qs->rspq.starved++;
2909 if (qs->rspq.credits) {
2910 qs->rspq.credits--;
2911 refill_rspq(adap, &qs->rspq, 1);
2912 qs->rspq.restarted++;
2913 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2914 1 << qs->rspq.cntxt_id);
2915 }
2916 }
2917 }
2918
2919 if (qs->fl[0].credits < qs->fl[0].size)
2920 __refill_fl(adap, &qs->fl[0]);
2921 if (qs->fl[1].credits < qs->fl[1].size)
2922 __refill_fl(adap, &qs->fl[1]);
2923
2924unlock:
2925 spin_unlock_irq(lock);
2926out:
2927 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2928}
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2939{
2940 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
2941 qs->rspq.polling = p->polling;
2942 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2943}
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2962 int irq_vec_idx, const struct qset_params *p,
2963 int ntxq, struct net_device *dev,
2964 struct netdev_queue *netdevq)
2965{
2966 int i, avail, ret = -ENOMEM;
2967 struct sge_qset *q = &adapter->sge.qs[id];
2968
2969 init_qset_cntxt(q, id);
2970 setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
2971 setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
2972
2973 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2974 sizeof(struct rx_desc),
2975 sizeof(struct rx_sw_desc),
2976 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2977 if (!q->fl[0].desc)
2978 goto err;
2979
2980 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2981 sizeof(struct rx_desc),
2982 sizeof(struct rx_sw_desc),
2983 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2984 if (!q->fl[1].desc)
2985 goto err;
2986
2987 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2988 sizeof(struct rsp_desc), 0,
2989 &q->rspq.phys_addr, NULL);
2990 if (!q->rspq.desc)
2991 goto err;
2992
2993 for (i = 0; i < ntxq; ++i) {
2994
2995
2996
2997
2998 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2999
3000 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3001 sizeof(struct tx_desc), sz,
3002 &q->txq[i].phys_addr,
3003 &q->txq[i].sdesc);
3004 if (!q->txq[i].desc)
3005 goto err;
3006
3007 q->txq[i].gen = 1;
3008 q->txq[i].size = p->txq_size[i];
3009 spin_lock_init(&q->txq[i].lock);
3010 skb_queue_head_init(&q->txq[i].sendq);
3011 }
3012
3013 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
3014 (unsigned long)q);
3015 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
3016 (unsigned long)q);
3017
3018 q->fl[0].gen = q->fl[1].gen = 1;
3019 q->fl[0].size = p->fl_size;
3020 q->fl[1].size = p->jumbo_size;
3021
3022 q->rspq.gen = 1;
3023 q->rspq.size = p->rspq_size;
3024 spin_lock_init(&q->rspq.lock);
3025 skb_queue_head_init(&q->rspq.rx_queue);
3026
3027 q->txq[TXQ_ETH].stop_thres = nports *
3028 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3029
3030#if FL0_PG_CHUNK_SIZE > 0
3031 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3032#else
3033 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3034#endif
3035#if FL1_PG_CHUNK_SIZE > 0
3036 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3037#else
3038 q->fl[1].buf_size = is_offload(adapter) ?
3039 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3040 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3041#endif
3042
3043 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3044 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3045 q->fl[0].order = FL0_PG_ORDER;
3046 q->fl[1].order = FL1_PG_ORDER;
3047 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3048 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3049
3050 spin_lock_irq(&adapter->sge.reg_lock);
3051
3052
3053 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3054 q->rspq.phys_addr, q->rspq.size,
3055 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3056 if (ret)
3057 goto err_unlock;
3058
3059 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3060 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3061 q->fl[i].phys_addr, q->fl[i].size,
3062 q->fl[i].buf_size - SGE_PG_RSVD,
3063 p->cong_thres, 1, 0);
3064 if (ret)
3065 goto err_unlock;
3066 }
3067
3068 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3069 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3070 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3071 1, 0);
3072 if (ret)
3073 goto err_unlock;
3074
3075 if (ntxq > 1) {
3076 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3077 USE_GTS, SGE_CNTXT_OFLD, id,
3078 q->txq[TXQ_OFLD].phys_addr,
3079 q->txq[TXQ_OFLD].size, 0, 1, 0);
3080 if (ret)
3081 goto err_unlock;
3082 }
3083
3084 if (ntxq > 2) {
3085 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3086 SGE_CNTXT_CTRL, id,
3087 q->txq[TXQ_CTRL].phys_addr,
3088 q->txq[TXQ_CTRL].size,
3089 q->txq[TXQ_CTRL].token, 1, 0);
3090 if (ret)
3091 goto err_unlock;
3092 }
3093
3094 spin_unlock_irq(&adapter->sge.reg_lock);
3095
3096 q->adap = adapter;
3097 q->netdev = dev;
3098 q->tx_q = netdevq;
3099 t3_update_qset_coalesce(q, p);
3100
3101 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3102 GFP_KERNEL | __GFP_COMP);
3103 if (!avail) {
3104 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3105 goto err;
3106 }
3107 if (avail < q->fl[0].size)
3108 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3109 avail);
3110
3111 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3112 GFP_KERNEL | __GFP_COMP);
3113 if (avail < q->fl[1].size)
3114 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3115 avail);
3116 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3117
3118 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3119 V_NEWTIMER(q->rspq.holdoff_tmr));
3120
3121 return 0;
3122
3123err_unlock:
3124 spin_unlock_irq(&adapter->sge.reg_lock);
3125err:
3126 t3_free_qset(adapter, q);
3127 return ret;
3128}
3129
3130
3131
3132
3133
3134
3135
3136void t3_start_sge_timers(struct adapter *adap)
3137{
3138 int i;
3139
3140 for (i = 0; i < SGE_QSETS; ++i) {
3141 struct sge_qset *q = &adap->sge.qs[i];
3142
3143 if (q->tx_reclaim_timer.function)
3144 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3145
3146 if (q->rx_reclaim_timer.function)
3147 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3148 }
3149}
3150
3151
3152
3153
3154
3155
3156
3157void t3_stop_sge_timers(struct adapter *adap)
3158{
3159 int i;
3160
3161 for (i = 0; i < SGE_QSETS; ++i) {
3162 struct sge_qset *q = &adap->sge.qs[i];
3163
3164 if (q->tx_reclaim_timer.function)
3165 del_timer_sync(&q->tx_reclaim_timer);
3166 if (q->rx_reclaim_timer.function)
3167 del_timer_sync(&q->rx_reclaim_timer);
3168 }
3169}
3170
3171
3172
3173
3174
3175
3176
3177void t3_free_sge_resources(struct adapter *adap)
3178{
3179 int i;
3180
3181 for (i = 0; i < SGE_QSETS; ++i)
3182 t3_free_qset(adap, &adap->sge.qs[i]);
3183}
3184
3185
3186
3187
3188
3189
3190
3191
3192void t3_sge_start(struct adapter *adap)
3193{
3194 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3195}
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210void t3_sge_stop(struct adapter *adap)
3211{
3212 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3213 if (!in_interrupt()) {
3214 int i;
3215
3216 for (i = 0; i < SGE_QSETS; ++i) {
3217 struct sge_qset *qs = &adap->sge.qs[i];
3218
3219 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3220 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3221 }
3222 }
3223}
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235void t3_sge_init(struct adapter *adap, struct sge_params *p)
3236{
3237 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3238
3239 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3240 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3241 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3242 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3243#if SGE_NUM_GENBITS == 1
3244 ctrl |= F_EGRGENCTRL;
3245#endif
3246 if (adap->params.rev > 0) {
3247 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3248 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3249 }
3250 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3251 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3252 V_LORCQDRBTHRSH(512));
3253 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3254 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3255 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3256 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3257 adap->params.rev < T3_REV_C ? 1000 : 500);
3258 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3259 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3260 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3261 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3262 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3263}
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3275{
3276 int i;
3277
3278 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3279 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3280
3281 for (i = 0; i < SGE_QSETS; ++i) {
3282 struct qset_params *q = p->qset + i;
3283
3284 q->polling = adap->params.rev > 0;
3285 q->coalesce_usecs = 5;
3286 q->rspq_size = 1024;
3287 q->fl_size = 1024;
3288 q->jumbo_size = 512;
3289 q->txq_size[TXQ_ETH] = 1024;
3290 q->txq_size[TXQ_OFLD] = 1024;
3291 q->txq_size[TXQ_CTRL] = 256;
3292 q->cong_thres = 0;
3293 }
3294
3295 spin_lock_init(&adap->sge.reg_lock);
3296}
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3309 unsigned char *data)
3310{
3311 if (qnum >= 6)
3312 return -EINVAL;
3313
3314 if (qnum < 3) {
3315 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3316 return -EINVAL;
3317 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3318 return sizeof(struct tx_desc);
3319 }
3320
3321 if (qnum == 3) {
3322 if (!qs->rspq.desc || idx >= qs->rspq.size)
3323 return -EINVAL;
3324 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3325 return sizeof(struct rsp_desc);
3326 }
3327
3328 qnum -= 4;
3329 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3330 return -EINVAL;
3331 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3332 return sizeof(struct rx_desc);
3333}
3334