1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include "common.h"
40
41#include <linux/types.h>
42#include <linux/errno.h>
43#include <linux/pci.h>
44#include <linux/ktime.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/if_vlan.h>
48#include <linux/skbuff.h>
49#include <linux/mm.h>
50#include <linux/tcp.h>
51#include <linux/ip.h>
52#include <linux/in.h>
53#include <linux/if_arp.h>
54#include <linux/slab.h>
55#include <linux/prefetch.h>
56
57#include "cpl5_cmd.h"
58#include "sge.h"
59#include "regs.h"
60#include "espi.h"
61
62
63#define ETH_P_CPL5 0xf
64
65#define SGE_CMDQ_N 2
66#define SGE_FREELQ_N 2
67#define SGE_CMDQ0_E_N 1024
68#define SGE_CMDQ1_E_N 128
69#define SGE_FREEL_SIZE 4096
70#define SGE_JUMBO_FREEL_SIZE 512
71#define SGE_FREEL_REFILL_THRESH 16
72#define SGE_RESPQ_E_N 1024
73#define SGE_INTRTIMER_NRES 1000
74#define SGE_RX_SM_BUF_SIZE 1536
75#define SGE_TX_DESC_MAX_PLEN 16384
76
77#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
78
79
80
81
82
83#define TX_RECLAIM_PERIOD (HZ / 4)
84
85#define M_CMD_LEN 0x7fffffff
86#define V_CMD_LEN(v) (v)
87#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
88#define V_CMD_GEN1(v) ((v) << 31)
89#define V_CMD_GEN2(v) (v)
90#define F_CMD_DATAVALID (1 << 1)
91#define F_CMD_SOP (1 << 2)
92#define V_CMD_EOP(v) ((v) << 3)
93
94
95
96
97#if defined(__BIG_ENDIAN_BITFIELD)
98struct cmdQ_e {
99 u32 addr_lo;
100 u32 len_gen;
101 u32 flags;
102 u32 addr_hi;
103};
104
105struct freelQ_e {
106 u32 addr_lo;
107 u32 len_gen;
108 u32 gen2;
109 u32 addr_hi;
110};
111
112struct respQ_e {
113 u32 Qsleeping : 4;
114 u32 Cmdq1CreditReturn : 5;
115 u32 Cmdq1DmaComplete : 5;
116 u32 Cmdq0CreditReturn : 5;
117 u32 Cmdq0DmaComplete : 5;
118 u32 FreelistQid : 2;
119 u32 CreditValid : 1;
120 u32 DataValid : 1;
121 u32 Offload : 1;
122 u32 Eop : 1;
123 u32 Sop : 1;
124 u32 GenerationBit : 1;
125 u32 BufferLength;
126};
127#elif defined(__LITTLE_ENDIAN_BITFIELD)
128struct cmdQ_e {
129 u32 len_gen;
130 u32 addr_lo;
131 u32 addr_hi;
132 u32 flags;
133};
134
135struct freelQ_e {
136 u32 len_gen;
137 u32 addr_lo;
138 u32 addr_hi;
139 u32 gen2;
140};
141
142struct respQ_e {
143 u32 BufferLength;
144 u32 GenerationBit : 1;
145 u32 Sop : 1;
146 u32 Eop : 1;
147 u32 Offload : 1;
148 u32 DataValid : 1;
149 u32 CreditValid : 1;
150 u32 FreelistQid : 2;
151 u32 Cmdq0DmaComplete : 5;
152 u32 Cmdq0CreditReturn : 5;
153 u32 Cmdq1DmaComplete : 5;
154 u32 Cmdq1CreditReturn : 5;
155 u32 Qsleeping : 4;
156} ;
157#endif
158
159
160
161
162struct cmdQ_ce {
163 struct sk_buff *skb;
164 DEFINE_DMA_UNMAP_ADDR(dma_addr);
165 DEFINE_DMA_UNMAP_LEN(dma_len);
166};
167
168struct freelQ_ce {
169 struct sk_buff *skb;
170 DEFINE_DMA_UNMAP_ADDR(dma_addr);
171 DEFINE_DMA_UNMAP_LEN(dma_len);
172};
173
174
175
176
177struct cmdQ {
178 unsigned long status;
179 unsigned int in_use;
180 unsigned int size;
181 unsigned int processed;
182 unsigned int cleaned;
183 unsigned int stop_thres;
184 u16 pidx;
185 u16 cidx;
186 u8 genbit;
187 u8 sop;
188 struct cmdQ_e *entries;
189 struct cmdQ_ce *centries;
190 dma_addr_t dma_addr;
191 spinlock_t lock;
192};
193
194struct freelQ {
195 unsigned int credits;
196 unsigned int size;
197 u16 pidx;
198 u16 cidx;
199 u16 rx_buffer_size;
200 u16 dma_offset;
201 u16 recycleq_idx;
202 u8 genbit;
203 struct freelQ_e *entries;
204 struct freelQ_ce *centries;
205 dma_addr_t dma_addr;
206};
207
208struct respQ {
209 unsigned int credits;
210 unsigned int size;
211 u16 cidx;
212 u8 genbit;
213 struct respQ_e *entries;
214 dma_addr_t dma_addr;
215};
216
217
218enum {
219 CMDQ_STAT_RUNNING = 1,
220 CMDQ_STAT_LAST_PKT_DB = 2
221};
222
223
224
225
226struct sched_port {
227 unsigned int avail;
228 unsigned int drain_bits_per_1024ns;
229 unsigned int speed;
230 unsigned int mtu;
231 struct sk_buff_head skbq;
232};
233
234
235struct sched {
236 ktime_t last_updated;
237 unsigned int max_avail;
238 unsigned int port;
239 unsigned int num;
240 struct sched_port p[MAX_NPORTS];
241 struct tasklet_struct sched_tsk;
242};
243static void restart_sched(unsigned long);
244
245
246
247
248
249
250
251
252
253
254struct sge {
255 struct adapter *adapter;
256 struct net_device *netdev;
257 struct freelQ freelQ[SGE_FREELQ_N];
258 struct respQ respQ;
259 unsigned long stopped_tx_queues;
260 unsigned int rx_pkt_pad;
261 unsigned int jumbo_fl;
262 unsigned int intrtimer_nres;
263 unsigned int fixed_intrtimer;
264 struct timer_list tx_reclaim_timer;
265 struct timer_list espibug_timer;
266 unsigned long espibug_timeout;
267 struct sk_buff *espibug_skb[MAX_NPORTS];
268 u32 sge_control;
269 struct sge_intr_counts stats;
270 struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
271 struct sched *tx_sched;
272 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
273};
274
275static const u8 ch_mac_addr[ETH_ALEN] = {
276 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
277};
278
279
280
281
282static void tx_sched_stop(struct sge *sge)
283{
284 struct sched *s = sge->tx_sched;
285 int i;
286
287 tasklet_kill(&s->sched_tsk);
288
289 for (i = 0; i < MAX_NPORTS; i++)
290 __skb_queue_purge(&s->p[s->port].skbq);
291}
292
293
294
295
296
297unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
298 unsigned int mtu, unsigned int speed)
299{
300 struct sched *s = sge->tx_sched;
301 struct sched_port *p = &s->p[port];
302 unsigned int max_avail_segs;
303
304 pr_debug("%s mtu=%d speed=%d\n", __func__, mtu, speed);
305 if (speed)
306 p->speed = speed;
307 if (mtu)
308 p->mtu = mtu;
309
310 if (speed || mtu) {
311 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
312 do_div(drain, (p->mtu + 50) * 1000);
313 p->drain_bits_per_1024ns = (unsigned int) drain;
314
315 if (p->speed < 1000)
316 p->drain_bits_per_1024ns =
317 90 * p->drain_bits_per_1024ns / 100;
318 }
319
320 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
321 p->drain_bits_per_1024ns -= 16;
322 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
323 max_avail_segs = max(1U, 4096 / (p->mtu - 40));
324 } else {
325 s->max_avail = 16384;
326 max_avail_segs = max(1U, 9000 / (p->mtu - 40));
327 }
328
329 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
330 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
331 p->speed, s->max_avail, max_avail_segs,
332 p->drain_bits_per_1024ns);
333
334 return max_avail_segs * (p->mtu - 40);
335}
336
337#if 0
338
339
340
341
342
343void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
344{
345 struct sched *s = sge->tx_sched;
346 unsigned int i;
347
348 s->max_avail = val;
349 for (i = 0; i < MAX_NPORTS; i++)
350 t1_sched_update_parms(sge, i, 0, 0);
351}
352
353
354
355
356
357void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
358 unsigned int val)
359{
360 struct sched *s = sge->tx_sched;
361 struct sched_port *p = &s->p[port];
362 p->drain_bits_per_1024ns = val * 1024 / 1000;
363 t1_sched_update_parms(sge, port, 0, 0);
364}
365
366#endif
367
368
369
370
371static int tx_sched_init(struct sge *sge)
372{
373 struct sched *s;
374 int i;
375
376 s = kzalloc(sizeof (struct sched), GFP_KERNEL);
377 if (!s)
378 return -ENOMEM;
379
380 pr_debug("tx_sched_init\n");
381 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
382 sge->tx_sched = s;
383
384 for (i = 0; i < MAX_NPORTS; i++) {
385 skb_queue_head_init(&s->p[i].skbq);
386 t1_sched_update_parms(sge, i, 1500, 1000);
387 }
388
389 return 0;
390}
391
392
393
394
395
396
397static inline int sched_update_avail(struct sge *sge)
398{
399 struct sched *s = sge->tx_sched;
400 ktime_t now = ktime_get();
401 unsigned int i;
402 long long delta_time_ns;
403
404 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
405
406 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
407 if (delta_time_ns < 15000)
408 return 0;
409
410 for (i = 0; i < MAX_NPORTS; i++) {
411 struct sched_port *p = &s->p[i];
412 unsigned int delta_avail;
413
414 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
415 p->avail = min(p->avail + delta_avail, s->max_avail);
416 }
417
418 s->last_updated = now;
419
420 return 1;
421}
422
423
424
425
426
427
428
429
430
431static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
432 unsigned int credits)
433{
434 struct sched *s = sge->tx_sched;
435 struct sk_buff_head *skbq;
436 unsigned int i, len, update = 1;
437
438 pr_debug("sched_skb %p\n", skb);
439 if (!skb) {
440 if (!s->num)
441 return NULL;
442 } else {
443 skbq = &s->p[skb->dev->if_port].skbq;
444 __skb_queue_tail(skbq, skb);
445 s->num++;
446 skb = NULL;
447 }
448
449 if (credits < MAX_SKB_FRAGS + 1)
450 goto out;
451
452again:
453 for (i = 0; i < MAX_NPORTS; i++) {
454 s->port = (s->port + 1) & (MAX_NPORTS - 1);
455 skbq = &s->p[s->port].skbq;
456
457 skb = skb_peek(skbq);
458
459 if (!skb)
460 continue;
461
462 len = skb->len;
463 if (len <= s->p[s->port].avail) {
464 s->p[s->port].avail -= len;
465 s->num--;
466 __skb_unlink(skb, skbq);
467 goto out;
468 }
469 skb = NULL;
470 }
471
472 if (update-- && sched_update_avail(sge))
473 goto again;
474
475out:
476
477
478
479 if (s->num && !skb) {
480 struct cmdQ *q = &sge->cmdQ[0];
481 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
482 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
483 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
484 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
485 }
486 }
487 pr_debug("sched_skb ret %p\n", skb);
488
489 return skb;
490}
491
492
493
494
495static inline void doorbell_pio(struct adapter *adapter, u32 val)
496{
497 wmb();
498 writel(val, adapter->regs + A_SG_DOORBELL);
499}
500
501
502
503
504
505static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
506{
507 unsigned int cidx = q->cidx;
508
509 while (q->credits--) {
510 struct freelQ_ce *ce = &q->centries[cidx];
511
512 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
513 dma_unmap_len(ce, dma_len),
514 PCI_DMA_FROMDEVICE);
515 dev_kfree_skb(ce->skb);
516 ce->skb = NULL;
517 if (++cidx == q->size)
518 cidx = 0;
519 }
520}
521
522
523
524
525static void free_rx_resources(struct sge *sge)
526{
527 struct pci_dev *pdev = sge->adapter->pdev;
528 unsigned int size, i;
529
530 if (sge->respQ.entries) {
531 size = sizeof(struct respQ_e) * sge->respQ.size;
532 pci_free_consistent(pdev, size, sge->respQ.entries,
533 sge->respQ.dma_addr);
534 }
535
536 for (i = 0; i < SGE_FREELQ_N; i++) {
537 struct freelQ *q = &sge->freelQ[i];
538
539 if (q->centries) {
540 free_freelQ_buffers(pdev, q);
541 kfree(q->centries);
542 }
543 if (q->entries) {
544 size = sizeof(struct freelQ_e) * q->size;
545 pci_free_consistent(pdev, size, q->entries,
546 q->dma_addr);
547 }
548 }
549}
550
551
552
553
554
555static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
556{
557 struct pci_dev *pdev = sge->adapter->pdev;
558 unsigned int size, i;
559
560 for (i = 0; i < SGE_FREELQ_N; i++) {
561 struct freelQ *q = &sge->freelQ[i];
562
563 q->genbit = 1;
564 q->size = p->freelQ_size[i];
565 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
566 size = sizeof(struct freelQ_e) * q->size;
567 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
568 if (!q->entries)
569 goto err_no_mem;
570
571 size = sizeof(struct freelQ_ce) * q->size;
572 q->centries = kzalloc(size, GFP_KERNEL);
573 if (!q->centries)
574 goto err_no_mem;
575 }
576
577
578
579
580
581
582
583
584 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
585 sizeof(struct cpl_rx_data) +
586 sge->freelQ[!sge->jumbo_fl].dma_offset;
587
588 size = (16 * 1024) -
589 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
590
591 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
592
593
594
595
596
597 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
598 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
599
600 sge->respQ.genbit = 1;
601 sge->respQ.size = SGE_RESPQ_E_N;
602 sge->respQ.credits = 0;
603 size = sizeof(struct respQ_e) * sge->respQ.size;
604 sge->respQ.entries =
605 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
606 if (!sge->respQ.entries)
607 goto err_no_mem;
608 return 0;
609
610err_no_mem:
611 free_rx_resources(sge);
612 return -ENOMEM;
613}
614
615
616
617
618static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
619{
620 struct cmdQ_ce *ce;
621 struct pci_dev *pdev = sge->adapter->pdev;
622 unsigned int cidx = q->cidx;
623
624 q->in_use -= n;
625 ce = &q->centries[cidx];
626 while (n--) {
627 if (likely(dma_unmap_len(ce, dma_len))) {
628 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
629 dma_unmap_len(ce, dma_len),
630 PCI_DMA_TODEVICE);
631 if (q->sop)
632 q->sop = 0;
633 }
634 if (ce->skb) {
635 dev_kfree_skb_any(ce->skb);
636 q->sop = 1;
637 }
638 ce++;
639 if (++cidx == q->size) {
640 cidx = 0;
641 ce = q->centries;
642 }
643 }
644 q->cidx = cidx;
645}
646
647
648
649
650
651
652static void free_tx_resources(struct sge *sge)
653{
654 struct pci_dev *pdev = sge->adapter->pdev;
655 unsigned int size, i;
656
657 for (i = 0; i < SGE_CMDQ_N; i++) {
658 struct cmdQ *q = &sge->cmdQ[i];
659
660 if (q->centries) {
661 if (q->in_use)
662 free_cmdQ_buffers(sge, q, q->in_use);
663 kfree(q->centries);
664 }
665 if (q->entries) {
666 size = sizeof(struct cmdQ_e) * q->size;
667 pci_free_consistent(pdev, size, q->entries,
668 q->dma_addr);
669 }
670 }
671}
672
673
674
675
676static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
677{
678 struct pci_dev *pdev = sge->adapter->pdev;
679 unsigned int size, i;
680
681 for (i = 0; i < SGE_CMDQ_N; i++) {
682 struct cmdQ *q = &sge->cmdQ[i];
683
684 q->genbit = 1;
685 q->sop = 1;
686 q->size = p->cmdQ_size[i];
687 q->in_use = 0;
688 q->status = 0;
689 q->processed = q->cleaned = 0;
690 q->stop_thres = 0;
691 spin_lock_init(&q->lock);
692 size = sizeof(struct cmdQ_e) * q->size;
693 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
694 if (!q->entries)
695 goto err_no_mem;
696
697 size = sizeof(struct cmdQ_ce) * q->size;
698 q->centries = kzalloc(size, GFP_KERNEL);
699 if (!q->centries)
700 goto err_no_mem;
701 }
702
703
704
705
706
707
708
709
710 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
711 (MAX_SKB_FRAGS + 1);
712 return 0;
713
714err_no_mem:
715 free_tx_resources(sge);
716 return -ENOMEM;
717}
718
719static inline void setup_ring_params(struct adapter *adapter, u64 addr,
720 u32 size, int base_reg_lo,
721 int base_reg_hi, int size_reg)
722{
723 writel((u32)addr, adapter->regs + base_reg_lo);
724 writel(addr >> 32, adapter->regs + base_reg_hi);
725 writel(size, adapter->regs + size_reg);
726}
727
728
729
730
731void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
732{
733 struct sge *sge = adapter->sge;
734
735 if (features & NETIF_F_HW_VLAN_CTAG_RX)
736 sge->sge_control |= F_VLAN_XTRACT;
737 else
738 sge->sge_control &= ~F_VLAN_XTRACT;
739 if (adapter->open_device_map) {
740 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
741 readl(adapter->regs + A_SG_CONTROL);
742 }
743}
744
745
746
747
748
749static void configure_sge(struct sge *sge, struct sge_params *p)
750{
751 struct adapter *ap = sge->adapter;
752
753 writel(0, ap->regs + A_SG_CONTROL);
754 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
755 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
756 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
757 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
758 setup_ring_params(ap, sge->freelQ[0].dma_addr,
759 sge->freelQ[0].size, A_SG_FL0BASELWR,
760 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
761 setup_ring_params(ap, sge->freelQ[1].dma_addr,
762 sge->freelQ[1].size, A_SG_FL1BASELWR,
763 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
764
765
766 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
767
768 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
769 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
770 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
771
772 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
773 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
774 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
775 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
776
777#if defined(__BIG_ENDIAN_BITFIELD)
778 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
779#endif
780
781
782 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
783
784 t1_sge_set_coalesce_params(sge, p);
785}
786
787
788
789
790static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
791{
792 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
793 sge->freelQ[sge->jumbo_fl].dma_offset -
794 sizeof(struct cpl_rx_data);
795}
796
797
798
799
800void t1_sge_destroy(struct sge *sge)
801{
802 int i;
803
804 for_each_port(sge->adapter, i)
805 free_percpu(sge->port_stats[i]);
806
807 kfree(sge->tx_sched);
808 free_tx_resources(sge);
809 free_rx_resources(sge);
810 kfree(sge);
811}
812
813
814
815
816
817
818
819
820
821
822
823
824
825static void refill_free_list(struct sge *sge, struct freelQ *q)
826{
827 struct pci_dev *pdev = sge->adapter->pdev;
828 struct freelQ_ce *ce = &q->centries[q->pidx];
829 struct freelQ_e *e = &q->entries[q->pidx];
830 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
831
832 while (q->credits < q->size) {
833 struct sk_buff *skb;
834 dma_addr_t mapping;
835
836 skb = dev_alloc_skb(q->rx_buffer_size);
837 if (!skb)
838 break;
839
840 skb_reserve(skb, q->dma_offset);
841 mapping = pci_map_single(pdev, skb->data, dma_len,
842 PCI_DMA_FROMDEVICE);
843 skb_reserve(skb, sge->rx_pkt_pad);
844
845 ce->skb = skb;
846 dma_unmap_addr_set(ce, dma_addr, mapping);
847 dma_unmap_len_set(ce, dma_len, dma_len);
848 e->addr_lo = (u32)mapping;
849 e->addr_hi = (u64)mapping >> 32;
850 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
851 wmb();
852 e->gen2 = V_CMD_GEN2(q->genbit);
853
854 e++;
855 ce++;
856 if (++q->pidx == q->size) {
857 q->pidx = 0;
858 q->genbit ^= 1;
859 ce = q->centries;
860 e = q->entries;
861 }
862 q->credits++;
863 }
864}
865
866
867
868
869
870
871static void freelQs_empty(struct sge *sge)
872{
873 struct adapter *adapter = sge->adapter;
874 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
875 u32 irqholdoff_reg;
876
877 refill_free_list(sge, &sge->freelQ[0]);
878 refill_free_list(sge, &sge->freelQ[1]);
879
880 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
881 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
882 irq_reg |= F_FL_EXHAUSTED;
883 irqholdoff_reg = sge->fixed_intrtimer;
884 } else {
885
886 irq_reg &= ~F_FL_EXHAUSTED;
887 irqholdoff_reg = sge->intrtimer_nres;
888 }
889 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
890 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
891
892
893 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
894}
895
896#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
897#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
898#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
899 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
900
901
902
903
904void t1_sge_intr_disable(struct sge *sge)
905{
906 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
907
908 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
909 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
910}
911
912
913
914
915void t1_sge_intr_enable(struct sge *sge)
916{
917 u32 en = SGE_INT_ENABLE;
918 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
919
920 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
921 en &= ~F_PACKET_TOO_BIG;
922 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
923 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
924}
925
926
927
928
929void t1_sge_intr_clear(struct sge *sge)
930{
931 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
932 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
933}
934
935
936
937
938int t1_sge_intr_error_handler(struct sge *sge)
939{
940 struct adapter *adapter = sge->adapter;
941 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
942
943 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
944 cause &= ~F_PACKET_TOO_BIG;
945 if (cause & F_RESPQ_EXHAUSTED)
946 sge->stats.respQ_empty++;
947 if (cause & F_RESPQ_OVERFLOW) {
948 sge->stats.respQ_overflow++;
949 pr_alert("%s: SGE response queue overflow\n",
950 adapter->name);
951 }
952 if (cause & F_FL_EXHAUSTED) {
953 sge->stats.freelistQ_empty++;
954 freelQs_empty(sge);
955 }
956 if (cause & F_PACKET_TOO_BIG) {
957 sge->stats.pkt_too_big++;
958 pr_alert("%s: SGE max packet size exceeded\n",
959 adapter->name);
960 }
961 if (cause & F_PACKET_MISMATCH) {
962 sge->stats.pkt_mismatch++;
963 pr_alert("%s: SGE packet mismatch\n", adapter->name);
964 }
965 if (cause & SGE_INT_FATAL)
966 t1_fatal_err(adapter);
967
968 writel(cause, adapter->regs + A_SG_INT_CAUSE);
969 return 0;
970}
971
972const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
973{
974 return &sge->stats;
975}
976
977void t1_sge_get_port_stats(const struct sge *sge, int port,
978 struct sge_port_stats *ss)
979{
980 int cpu;
981
982 memset(ss, 0, sizeof(*ss));
983 for_each_possible_cpu(cpu) {
984 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
985
986 ss->rx_cso_good += st->rx_cso_good;
987 ss->tx_cso += st->tx_cso;
988 ss->tx_tso += st->tx_tso;
989 ss->tx_need_hdrroom += st->tx_need_hdrroom;
990 ss->vlan_xtract += st->vlan_xtract;
991 ss->vlan_insert += st->vlan_insert;
992 }
993}
994
995
996
997
998
999
1000
1001
1002
1003static void recycle_fl_buf(struct freelQ *fl, int idx)
1004{
1005 struct freelQ_e *from = &fl->entries[idx];
1006 struct freelQ_e *to = &fl->entries[fl->pidx];
1007
1008 fl->centries[fl->pidx] = fl->centries[idx];
1009 to->addr_lo = from->addr_lo;
1010 to->addr_hi = from->addr_hi;
1011 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1012 wmb();
1013 to->gen2 = V_CMD_GEN2(fl->genbit);
1014 fl->credits++;
1015
1016 if (++fl->pidx == fl->size) {
1017 fl->pidx = 0;
1018 fl->genbit ^= 1;
1019 }
1020}
1021
1022static int copybreak __read_mostly = 256;
1023module_param(copybreak, int, 0);
1024MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static inline struct sk_buff *get_packet(struct adapter *adapter,
1041 struct freelQ *fl, unsigned int len)
1042{
1043 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1044 struct pci_dev *pdev = adapter->pdev;
1045 struct sk_buff *skb;
1046
1047 if (len < copybreak) {
1048 skb = napi_alloc_skb(&adapter->napi, len);
1049 if (!skb)
1050 goto use_orig_buf;
1051
1052 skb_put(skb, len);
1053 pci_dma_sync_single_for_cpu(pdev,
1054 dma_unmap_addr(ce, dma_addr),
1055 dma_unmap_len(ce, dma_len),
1056 PCI_DMA_FROMDEVICE);
1057 skb_copy_from_linear_data(ce->skb, skb->data, len);
1058 pci_dma_sync_single_for_device(pdev,
1059 dma_unmap_addr(ce, dma_addr),
1060 dma_unmap_len(ce, dma_len),
1061 PCI_DMA_FROMDEVICE);
1062 recycle_fl_buf(fl, fl->cidx);
1063 return skb;
1064 }
1065
1066use_orig_buf:
1067 if (fl->credits < 2) {
1068 recycle_fl_buf(fl, fl->cidx);
1069 return NULL;
1070 }
1071
1072 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1073 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1074 skb = ce->skb;
1075 prefetch(skb->data);
1076
1077 skb_put(skb, len);
1078 return skb;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1091{
1092 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1093 struct sk_buff *skb = ce->skb;
1094
1095 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1096 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1097 pr_err("%s: unexpected offload packet, cmd %u\n",
1098 adapter->name, *skb->data);
1099 recycle_fl_buf(fl, fl->cidx);
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1113{
1114 unsigned int count = 0;
1115
1116 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1117 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1118 unsigned int i, len = skb_headlen(skb);
1119 while (len > SGE_TX_DESC_MAX_PLEN) {
1120 count++;
1121 len -= SGE_TX_DESC_MAX_PLEN;
1122 }
1123 for (i = 0; nfrags--; i++) {
1124 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1125 len = skb_frag_size(frag);
1126 while (len > SGE_TX_DESC_MAX_PLEN) {
1127 count++;
1128 len -= SGE_TX_DESC_MAX_PLEN;
1129 }
1130 }
1131 }
1132 return count;
1133}
1134
1135
1136
1137
1138
1139
1140
1141static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1142 unsigned int len, unsigned int gen,
1143 unsigned int eop)
1144{
1145 BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1146
1147 e->addr_lo = (u32)mapping;
1148 e->addr_hi = (u64)mapping >> 32;
1149 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1150 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1151}
1152
1153
1154
1155
1156
1157
1158
1159static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1160 struct cmdQ_e **e,
1161 struct cmdQ_ce **ce,
1162 unsigned int *gen,
1163 dma_addr_t *desc_mapping,
1164 unsigned int *desc_len,
1165 unsigned int nfrags,
1166 struct cmdQ *q)
1167{
1168 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1169 struct cmdQ_e *e1 = *e;
1170 struct cmdQ_ce *ce1 = *ce;
1171
1172 while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1173 *desc_len -= SGE_TX_DESC_MAX_PLEN;
1174 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1175 *gen, nfrags == 0 && *desc_len == 0);
1176 ce1->skb = NULL;
1177 dma_unmap_len_set(ce1, dma_len, 0);
1178 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1179 if (*desc_len) {
1180 ce1++;
1181 e1++;
1182 if (++pidx == q->size) {
1183 pidx = 0;
1184 *gen ^= 1;
1185 ce1 = q->centries;
1186 e1 = q->entries;
1187 }
1188 }
1189 }
1190 *e = e1;
1191 *ce = ce1;
1192 }
1193 return pidx;
1194}
1195
1196
1197
1198
1199
1200static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1201 unsigned int pidx, unsigned int gen,
1202 struct cmdQ *q)
1203{
1204 dma_addr_t mapping, desc_mapping;
1205 struct cmdQ_e *e, *e1;
1206 struct cmdQ_ce *ce;
1207 unsigned int i, flags, first_desc_len, desc_len,
1208 nfrags = skb_shinfo(skb)->nr_frags;
1209
1210 e = e1 = &q->entries[pidx];
1211 ce = &q->centries[pidx];
1212
1213 mapping = pci_map_single(adapter->pdev, skb->data,
1214 skb_headlen(skb), PCI_DMA_TODEVICE);
1215
1216 desc_mapping = mapping;
1217 desc_len = skb_headlen(skb);
1218
1219 flags = F_CMD_DATAVALID | F_CMD_SOP |
1220 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1221 V_CMD_GEN2(gen);
1222 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1223 desc_len : SGE_TX_DESC_MAX_PLEN;
1224 e->addr_lo = (u32)desc_mapping;
1225 e->addr_hi = (u64)desc_mapping >> 32;
1226 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1227 ce->skb = NULL;
1228 dma_unmap_len_set(ce, dma_len, 0);
1229
1230 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1231 desc_len > SGE_TX_DESC_MAX_PLEN) {
1232 desc_mapping += first_desc_len;
1233 desc_len -= first_desc_len;
1234 e1++;
1235 ce++;
1236 if (++pidx == q->size) {
1237 pidx = 0;
1238 gen ^= 1;
1239 e1 = q->entries;
1240 ce = q->centries;
1241 }
1242 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1243 &desc_mapping, &desc_len,
1244 nfrags, q);
1245
1246 if (likely(desc_len))
1247 write_tx_desc(e1, desc_mapping, desc_len, gen,
1248 nfrags == 0);
1249 }
1250
1251 ce->skb = NULL;
1252 dma_unmap_addr_set(ce, dma_addr, mapping);
1253 dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
1254
1255 for (i = 0; nfrags--; i++) {
1256 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1257 e1++;
1258 ce++;
1259 if (++pidx == q->size) {
1260 pidx = 0;
1261 gen ^= 1;
1262 e1 = q->entries;
1263 ce = q->centries;
1264 }
1265
1266 mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
1267 skb_frag_size(frag), DMA_TO_DEVICE);
1268 desc_mapping = mapping;
1269 desc_len = skb_frag_size(frag);
1270
1271 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1272 &desc_mapping, &desc_len,
1273 nfrags, q);
1274 if (likely(desc_len))
1275 write_tx_desc(e1, desc_mapping, desc_len, gen,
1276 nfrags == 0);
1277 ce->skb = NULL;
1278 dma_unmap_addr_set(ce, dma_addr, mapping);
1279 dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
1280 }
1281 ce->skb = skb;
1282 wmb();
1283 e->flags = flags;
1284}
1285
1286
1287
1288
1289static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1290{
1291 unsigned int reclaim = q->processed - q->cleaned;
1292
1293 if (reclaim) {
1294 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1295 q->processed, q->cleaned);
1296 free_cmdQ_buffers(sge, q, reclaim);
1297 q->cleaned += reclaim;
1298 }
1299}
1300
1301
1302
1303
1304
1305static void restart_sched(unsigned long arg)
1306{
1307 struct sge *sge = (struct sge *) arg;
1308 struct adapter *adapter = sge->adapter;
1309 struct cmdQ *q = &sge->cmdQ[0];
1310 struct sk_buff *skb;
1311 unsigned int credits, queued_skb = 0;
1312
1313 spin_lock(&q->lock);
1314 reclaim_completed_tx(sge, q);
1315
1316 credits = q->size - q->in_use;
1317 pr_debug("restart_sched credits=%d\n", credits);
1318 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1319 unsigned int genbit, pidx, count;
1320 count = 1 + skb_shinfo(skb)->nr_frags;
1321 count += compute_large_page_tx_descs(skb);
1322 q->in_use += count;
1323 genbit = q->genbit;
1324 pidx = q->pidx;
1325 q->pidx += count;
1326 if (q->pidx >= q->size) {
1327 q->pidx -= q->size;
1328 q->genbit ^= 1;
1329 }
1330 write_tx_descs(adapter, skb, pidx, genbit, q);
1331 credits = q->size - q->in_use;
1332 queued_skb = 1;
1333 }
1334
1335 if (queued_skb) {
1336 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1337 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1338 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1339 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1340 }
1341 }
1342 spin_unlock(&q->lock);
1343}
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1354{
1355 struct sk_buff *skb;
1356 const struct cpl_rx_pkt *p;
1357 struct adapter *adapter = sge->adapter;
1358 struct sge_port_stats *st;
1359 struct net_device *dev;
1360
1361 skb = get_packet(adapter, fl, len - sge->rx_pkt_pad);
1362 if (unlikely(!skb)) {
1363 sge->stats.rx_drops++;
1364 return;
1365 }
1366
1367 p = (const struct cpl_rx_pkt *) skb->data;
1368 if (p->iff >= adapter->params.nports) {
1369 kfree_skb(skb);
1370 return;
1371 }
1372 __skb_pull(skb, sizeof(*p));
1373
1374 st = this_cpu_ptr(sge->port_stats[p->iff]);
1375 dev = adapter->port[p->iff].dev;
1376
1377 skb->protocol = eth_type_trans(skb, dev);
1378 if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
1379 skb->protocol == htons(ETH_P_IP) &&
1380 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
1381 ++st->rx_cso_good;
1382 skb->ip_summed = CHECKSUM_UNNECESSARY;
1383 } else
1384 skb_checksum_none_assert(skb);
1385
1386 if (p->vlan_valid) {
1387 st->vlan_xtract++;
1388 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
1389 }
1390 netif_receive_skb(skb);
1391}
1392
1393
1394
1395
1396
1397static inline int enough_free_Tx_descs(const struct cmdQ *q)
1398{
1399 unsigned int r = q->processed - q->cleaned;
1400
1401 return q->in_use - r < (q->size >> 1);
1402}
1403
1404
1405
1406
1407
1408static void restart_tx_queues(struct sge *sge)
1409{
1410 struct adapter *adap = sge->adapter;
1411 int i;
1412
1413 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1414 return;
1415
1416 for_each_port(adap, i) {
1417 struct net_device *nd = adap->port[i].dev;
1418
1419 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1420 netif_running(nd)) {
1421 sge->stats.cmdQ_restarted[2]++;
1422 netif_wake_queue(nd);
1423 }
1424 }
1425}
1426
1427
1428
1429
1430
1431static unsigned int update_tx_info(struct adapter *adapter,
1432 unsigned int flags,
1433 unsigned int pr0)
1434{
1435 struct sge *sge = adapter->sge;
1436 struct cmdQ *cmdq = &sge->cmdQ[0];
1437
1438 cmdq->processed += pr0;
1439 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1440 freelQs_empty(sge);
1441 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1442 }
1443 if (flags & F_CMDQ0_ENABLE) {
1444 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1445
1446 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1447 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1448 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1449 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1450 }
1451 if (sge->tx_sched)
1452 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1453
1454 flags &= ~F_CMDQ0_ENABLE;
1455 }
1456
1457 if (unlikely(sge->stopped_tx_queues != 0))
1458 restart_tx_queues(sge);
1459
1460 return flags;
1461}
1462
1463
1464
1465
1466
1467static int process_responses(struct adapter *adapter, int budget)
1468{
1469 struct sge *sge = adapter->sge;
1470 struct respQ *q = &sge->respQ;
1471 struct respQ_e *e = &q->entries[q->cidx];
1472 int done = 0;
1473 unsigned int flags = 0;
1474 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1475
1476 while (done < budget && e->GenerationBit == q->genbit) {
1477 flags |= e->Qsleeping;
1478
1479 cmdq_processed[0] += e->Cmdq0CreditReturn;
1480 cmdq_processed[1] += e->Cmdq1CreditReturn;
1481
1482
1483
1484
1485
1486 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
1487 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1488 cmdq_processed[0] = 0;
1489 }
1490
1491 if (unlikely(cmdq_processed[1] > 16)) {
1492 sge->cmdQ[1].processed += cmdq_processed[1];
1493 cmdq_processed[1] = 0;
1494 }
1495
1496 if (likely(e->DataValid)) {
1497 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1498
1499 BUG_ON(!e->Sop || !e->Eop);
1500 if (unlikely(e->Offload))
1501 unexpected_offload(adapter, fl);
1502 else
1503 sge_rx(sge, fl, e->BufferLength);
1504
1505 ++done;
1506
1507
1508
1509
1510
1511 if (++fl->cidx == fl->size)
1512 fl->cidx = 0;
1513 prefetch(fl->centries[fl->cidx].skb);
1514
1515 if (unlikely(--fl->credits <
1516 fl->size - SGE_FREEL_REFILL_THRESH))
1517 refill_free_list(sge, fl);
1518 } else
1519 sge->stats.pure_rsps++;
1520
1521 e++;
1522 if (unlikely(++q->cidx == q->size)) {
1523 q->cidx = 0;
1524 q->genbit ^= 1;
1525 e = q->entries;
1526 }
1527 prefetch(e);
1528
1529 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1530 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1531 q->credits = 0;
1532 }
1533 }
1534
1535 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1536 sge->cmdQ[1].processed += cmdq_processed[1];
1537
1538 return done;
1539}
1540
1541static inline int responses_pending(const struct adapter *adapter)
1542{
1543 const struct respQ *Q = &adapter->sge->respQ;
1544 const struct respQ_e *e = &Q->entries[Q->cidx];
1545
1546 return e->GenerationBit == Q->genbit;
1547}
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557static int process_pure_responses(struct adapter *adapter)
1558{
1559 struct sge *sge = adapter->sge;
1560 struct respQ *q = &sge->respQ;
1561 struct respQ_e *e = &q->entries[q->cidx];
1562 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1563 unsigned int flags = 0;
1564 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1565
1566 prefetch(fl->centries[fl->cidx].skb);
1567 if (e->DataValid)
1568 return 1;
1569
1570 do {
1571 flags |= e->Qsleeping;
1572
1573 cmdq_processed[0] += e->Cmdq0CreditReturn;
1574 cmdq_processed[1] += e->Cmdq1CreditReturn;
1575
1576 e++;
1577 if (unlikely(++q->cidx == q->size)) {
1578 q->cidx = 0;
1579 q->genbit ^= 1;
1580 e = q->entries;
1581 }
1582 prefetch(e);
1583
1584 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1585 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1586 q->credits = 0;
1587 }
1588 sge->stats.pure_rsps++;
1589 } while (e->GenerationBit == q->genbit && !e->DataValid);
1590
1591 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1592 sge->cmdQ[1].processed += cmdq_processed[1];
1593
1594 return e->GenerationBit == q->genbit;
1595}
1596
1597
1598
1599
1600
1601
1602int t1_poll(struct napi_struct *napi, int budget)
1603{
1604 struct adapter *adapter = container_of(napi, struct adapter, napi);
1605 int work_done = process_responses(adapter, budget);
1606
1607 if (likely(work_done < budget)) {
1608 napi_complete(napi);
1609 writel(adapter->sge->respQ.cidx,
1610 adapter->regs + A_SG_SLEEPING);
1611 }
1612 return work_done;
1613}
1614
1615irqreturn_t t1_interrupt(int irq, void *data)
1616{
1617 struct adapter *adapter = data;
1618 struct sge *sge = adapter->sge;
1619 int handled;
1620
1621 if (likely(responses_pending(adapter))) {
1622 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1623
1624 if (napi_schedule_prep(&adapter->napi)) {
1625 if (process_pure_responses(adapter))
1626 __napi_schedule(&adapter->napi);
1627 else {
1628
1629 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1630
1631 napi_enable(&adapter->napi);
1632 }
1633 }
1634 return IRQ_HANDLED;
1635 }
1636
1637 spin_lock(&adapter->async_lock);
1638 handled = t1_slow_intr_handler(adapter);
1639 spin_unlock(&adapter->async_lock);
1640
1641 if (!handled)
1642 sge->stats.unhandled_irqs++;
1643
1644 return IRQ_RETVAL(handled != 0);
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1661 unsigned int qid, struct net_device *dev)
1662{
1663 struct sge *sge = adapter->sge;
1664 struct cmdQ *q = &sge->cmdQ[qid];
1665 unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
1666
1667 if (!spin_trylock(&q->lock))
1668 return NETDEV_TX_LOCKED;
1669
1670 reclaim_completed_tx(sge, q);
1671
1672 pidx = q->pidx;
1673 credits = q->size - q->in_use;
1674 count = 1 + skb_shinfo(skb)->nr_frags;
1675 count += compute_large_page_tx_descs(skb);
1676
1677
1678 if (unlikely(credits < count)) {
1679 if (!netif_queue_stopped(dev)) {
1680 netif_stop_queue(dev);
1681 set_bit(dev->if_port, &sge->stopped_tx_queues);
1682 sge->stats.cmdQ_full[2]++;
1683 pr_err("%s: Tx ring full while queue awake!\n",
1684 adapter->name);
1685 }
1686 spin_unlock(&q->lock);
1687 return NETDEV_TX_BUSY;
1688 }
1689
1690 if (unlikely(credits - count < q->stop_thres)) {
1691 netif_stop_queue(dev);
1692 set_bit(dev->if_port, &sge->stopped_tx_queues);
1693 sge->stats.cmdQ_full[2]++;
1694 }
1695
1696
1697
1698
1699 if (sge->tx_sched && !qid && skb->dev) {
1700use_sched:
1701 use_sched_skb = 1;
1702
1703
1704
1705 skb = sched_skb(sge, skb, credits);
1706 if (!skb) {
1707 spin_unlock(&q->lock);
1708 return NETDEV_TX_OK;
1709 }
1710 pidx = q->pidx;
1711 count = 1 + skb_shinfo(skb)->nr_frags;
1712 count += compute_large_page_tx_descs(skb);
1713 }
1714
1715 q->in_use += count;
1716 genbit = q->genbit;
1717 pidx = q->pidx;
1718 q->pidx += count;
1719 if (q->pidx >= q->size) {
1720 q->pidx -= q->size;
1721 q->genbit ^= 1;
1722 }
1723 spin_unlock(&q->lock);
1724
1725 write_tx_descs(adapter, skb, pidx, genbit, q);
1726
1727
1728
1729
1730
1731
1732
1733
1734 if (qid)
1735 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1736 else {
1737 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1738 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1739 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1740 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1741 }
1742 }
1743
1744 if (use_sched_skb) {
1745 if (spin_trylock(&q->lock)) {
1746 credits = q->size - q->in_use;
1747 skb = NULL;
1748 goto use_sched;
1749 }
1750 }
1751 return NETDEV_TX_OK;
1752}
1753
1754#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1755
1756
1757
1758
1759
1760
1761
1762static inline int eth_hdr_len(const void *data)
1763{
1764 const struct ethhdr *e = data;
1765
1766 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1767}
1768
1769
1770
1771
1772netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1773{
1774 struct adapter *adapter = dev->ml_priv;
1775 struct sge *sge = adapter->sge;
1776 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1777 struct cpl_tx_pkt *cpl;
1778 struct sk_buff *orig_skb = skb;
1779 int ret;
1780
1781 if (skb->protocol == htons(ETH_P_CPL5))
1782 goto send;
1783
1784
1785
1786
1787
1788 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1789 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1790 ++st->tx_need_hdrroom;
1791 dev_kfree_skb_any(orig_skb);
1792 if (!skb)
1793 return NETDEV_TX_OK;
1794 }
1795
1796 if (skb_shinfo(skb)->gso_size) {
1797 int eth_type;
1798 struct cpl_tx_pkt_lso *hdr;
1799
1800 ++st->tx_tso;
1801
1802 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1803 CPL_ETH_II : CPL_ETH_II_VLAN;
1804
1805 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1806 hdr->opcode = CPL_TX_PKT_LSO;
1807 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1808 hdr->ip_hdr_words = ip_hdr(skb)->ihl;
1809 hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
1810 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1811 skb_shinfo(skb)->gso_size));
1812 hdr->len = htonl(skb->len - sizeof(*hdr));
1813 cpl = (struct cpl_tx_pkt *)hdr;
1814 } else {
1815
1816
1817
1818
1819
1820
1821 if (unlikely(skb->len < ETH_HLEN ||
1822 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1823 netdev_dbg(dev, "packet size %d hdr %d mtu%d\n",
1824 skb->len, eth_hdr_len(skb->data), dev->mtu);
1825 dev_kfree_skb_any(skb);
1826 return NETDEV_TX_OK;
1827 }
1828
1829 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1830 ip_hdr(skb)->protocol == IPPROTO_UDP) {
1831 if (unlikely(skb_checksum_help(skb))) {
1832 netdev_dbg(dev, "unable to do udp checksum\n");
1833 dev_kfree_skb_any(skb);
1834 return NETDEV_TX_OK;
1835 }
1836 }
1837
1838
1839
1840
1841 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1842 if (skb->protocol == htons(ETH_P_ARP) &&
1843 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
1844 adapter->sge->espibug_skb[dev->if_port] = skb;
1845
1846
1847
1848
1849 skb = skb_get(skb);
1850 }
1851 }
1852
1853 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1854 cpl->opcode = CPL_TX_PKT;
1855 cpl->ip_csum_dis = 1;
1856 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1857
1858
1859 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1860 }
1861 cpl->iff = dev->if_port;
1862
1863 if (skb_vlan_tag_present(skb)) {
1864 cpl->vlan_valid = 1;
1865 cpl->vlan = htons(skb_vlan_tag_get(skb));
1866 st->vlan_insert++;
1867 } else
1868 cpl->vlan_valid = 0;
1869
1870send:
1871 ret = t1_sge_tx(skb, adapter, 0, dev);
1872
1873
1874
1875
1876 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1877 dev_kfree_skb_any(skb);
1878 ret = NETDEV_TX_OK;
1879 }
1880 return ret;
1881}
1882
1883
1884
1885
1886static void sge_tx_reclaim_cb(unsigned long data)
1887{
1888 int i;
1889 struct sge *sge = (struct sge *)data;
1890
1891 for (i = 0; i < SGE_CMDQ_N; ++i) {
1892 struct cmdQ *q = &sge->cmdQ[i];
1893
1894 if (!spin_trylock(&q->lock))
1895 continue;
1896
1897 reclaim_completed_tx(sge, q);
1898 if (i == 0 && q->in_use) {
1899 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1900 }
1901 spin_unlock(&q->lock);
1902 }
1903 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1904}
1905
1906
1907
1908
1909int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1910{
1911 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1912 core_ticks_per_usec(sge->adapter);
1913 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1914 return 0;
1915}
1916
1917
1918
1919
1920
1921int t1_sge_configure(struct sge *sge, struct sge_params *p)
1922{
1923 if (alloc_rx_resources(sge, p))
1924 return -ENOMEM;
1925 if (alloc_tx_resources(sge, p)) {
1926 free_rx_resources(sge);
1927 return -ENOMEM;
1928 }
1929 configure_sge(sge, p);
1930
1931
1932
1933
1934
1935
1936
1937 p->large_buf_capacity = jumbo_payload_capacity(sge);
1938 return 0;
1939}
1940
1941
1942
1943
1944void t1_sge_stop(struct sge *sge)
1945{
1946 int i;
1947 writel(0, sge->adapter->regs + A_SG_CONTROL);
1948 readl(sge->adapter->regs + A_SG_CONTROL);
1949
1950 if (is_T2(sge->adapter))
1951 del_timer_sync(&sge->espibug_timer);
1952
1953 del_timer_sync(&sge->tx_reclaim_timer);
1954 if (sge->tx_sched)
1955 tx_sched_stop(sge);
1956
1957 for (i = 0; i < MAX_NPORTS; i++)
1958 kfree_skb(sge->espibug_skb[i]);
1959}
1960
1961
1962
1963
1964void t1_sge_start(struct sge *sge)
1965{
1966 refill_free_list(sge, &sge->freelQ[0]);
1967 refill_free_list(sge, &sge->freelQ[1]);
1968
1969 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1970 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1971 readl(sge->adapter->regs + A_SG_CONTROL);
1972
1973 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1974
1975 if (is_T2(sge->adapter))
1976 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1977}
1978
1979
1980
1981
1982static void espibug_workaround_t204(unsigned long data)
1983{
1984 struct adapter *adapter = (struct adapter *)data;
1985 struct sge *sge = adapter->sge;
1986 unsigned int nports = adapter->params.nports;
1987 u32 seop[MAX_NPORTS];
1988
1989 if (adapter->open_device_map & PORT_MASK) {
1990 int i;
1991
1992 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
1993 return;
1994
1995 for (i = 0; i < nports; i++) {
1996 struct sk_buff *skb = sge->espibug_skb[i];
1997
1998 if (!netif_running(adapter->port[i].dev) ||
1999 netif_queue_stopped(adapter->port[i].dev) ||
2000 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2001 continue;
2002
2003 if (!skb->cb[0]) {
2004 skb_copy_to_linear_data_offset(skb,
2005 sizeof(struct cpl_tx_pkt),
2006 ch_mac_addr,
2007 ETH_ALEN);
2008 skb_copy_to_linear_data_offset(skb,
2009 skb->len - 10,
2010 ch_mac_addr,
2011 ETH_ALEN);
2012 skb->cb[0] = 0xff;
2013 }
2014
2015
2016
2017
2018 skb = skb_get(skb);
2019 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2020 }
2021 }
2022 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2023}
2024
2025static void espibug_workaround(unsigned long data)
2026{
2027 struct adapter *adapter = (struct adapter *)data;
2028 struct sge *sge = adapter->sge;
2029
2030 if (netif_running(adapter->port[0].dev)) {
2031 struct sk_buff *skb = sge->espibug_skb[0];
2032 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
2033
2034 if ((seop & 0xfff0fff) == 0xfff && skb) {
2035 if (!skb->cb[0]) {
2036 skb_copy_to_linear_data_offset(skb,
2037 sizeof(struct cpl_tx_pkt),
2038 ch_mac_addr,
2039 ETH_ALEN);
2040 skb_copy_to_linear_data_offset(skb,
2041 skb->len - 10,
2042 ch_mac_addr,
2043 ETH_ALEN);
2044 skb->cb[0] = 0xff;
2045 }
2046
2047
2048
2049
2050 skb = skb_get(skb);
2051 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2052 }
2053 }
2054 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2055}
2056
2057
2058
2059
2060struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
2061{
2062 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2063 int i;
2064
2065 if (!sge)
2066 return NULL;
2067
2068 sge->adapter = adapter;
2069 sge->netdev = adapter->port[0].dev;
2070 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2071 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2072
2073 for_each_port(adapter, i) {
2074 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2075 if (!sge->port_stats[i])
2076 goto nomem_port;
2077 }
2078
2079 init_timer(&sge->tx_reclaim_timer);
2080 sge->tx_reclaim_timer.data = (unsigned long)sge;
2081 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2082
2083 if (is_T2(sge->adapter)) {
2084 init_timer(&sge->espibug_timer);
2085
2086 if (adapter->params.nports > 1) {
2087 tx_sched_init(sge);
2088 sge->espibug_timer.function = espibug_workaround_t204;
2089 } else
2090 sge->espibug_timer.function = espibug_workaround;
2091 sge->espibug_timer.data = (unsigned long)sge->adapter;
2092
2093 sge->espibug_timeout = 1;
2094
2095 if (adapter->params.nports > 1)
2096 sge->espibug_timeout = HZ/100;
2097 }
2098
2099
2100 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2101 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2102 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2103 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
2104 if (sge->tx_sched) {
2105 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2106 p->rx_coalesce_usecs = 15;
2107 else
2108 p->rx_coalesce_usecs = 50;
2109 } else
2110 p->rx_coalesce_usecs = 50;
2111
2112 p->coalesce_enable = 0;
2113 p->sample_interval_usecs = 0;
2114
2115 return sge;
2116nomem_port:
2117 while (i >= 0) {
2118 free_percpu(sge->port_stats[i]);
2119 --i;
2120 }
2121 kfree(sge);
2122 return NULL;
2123
2124}
2125