1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if.h>
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <net/addrconf.h>
64#include <net/bonding.h>
65#include <net/addrconf.h>
66#include <asm/uaccess.h>
67
68#include "cxgb4.h"
69#include "t4_regs.h"
70#include "t4_values.h"
71#include "t4_msg.h"
72#include "t4fw_api.h"
73#include "t4fw_version.h"
74#include "cxgb4_dcb.h"
75#include "cxgb4_debugfs.h"
76#include "clip_tbl.h"
77#include "l2t.h"
78
79char cxgb4_driver_name[] = KBUILD_MODNAME;
80
81#ifdef DRV_VERSION
82#undef DRV_VERSION
83#endif
84#define DRV_VERSION "2.0.0-ko"
85const char cxgb4_driver_version[] = DRV_VERSION;
86#define DRV_DESC "Chelsio T4/T5 Network Driver"
87
88
89
90
91
92
93
94struct filter_entry {
95
96
97 u32 valid:1;
98 u32 locked:1;
99
100 u32 pending:1;
101 u32 smtidx:8;
102 struct l2t_entry *l2t;
103
104
105
106
107
108
109 struct ch_filter_specification fs;
110};
111
112#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
113 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
114 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
115
116
117
118#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
119 static const struct pci_device_id cxgb4_pci_tbl[] = {
120#define CH_PCI_DEVICE_ID_FUNCTION 0x4
121
122
123
124
125#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
126
127#define CH_PCI_ID_TABLE_ENTRY(devid) \
128 {PCI_VDEVICE(CHELSIO, (devid)), 4}
129
130#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
131 { 0, } \
132 }
133
134#include "t4_pci_id_tbl.h"
135
136#define FW4_FNAME "cxgb4/t4fw.bin"
137#define FW5_FNAME "cxgb4/t5fw.bin"
138#define FW4_CFNAME "cxgb4/t4-config.txt"
139#define FW5_CFNAME "cxgb4/t5-config.txt"
140
141MODULE_DESCRIPTION(DRV_DESC);
142MODULE_AUTHOR("Chelsio Communications");
143MODULE_LICENSE("Dual BSD/GPL");
144MODULE_VERSION(DRV_VERSION);
145MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
146MODULE_FIRMWARE(FW4_FNAME);
147MODULE_FIRMWARE(FW5_FNAME);
148
149
150
151
152
153
154
155static uint force_init;
156
157module_param(force_init, uint, 0644);
158MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
159
160
161
162
163
164
165
166static uint force_old_init;
167
168module_param(force_old_init, uint, 0644);
169MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
170 " parameter");
171
172static int dflt_msg_enable = DFLT_MSG_ENABLE;
173
174module_param(dflt_msg_enable, int, 0644);
175MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
176
177
178
179
180
181
182
183
184
185
186static int msi = 2;
187
188module_param(msi, int, 0644);
189MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
190
191
192
193
194
195static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
196
197module_param_array(intr_holdoff, uint, NULL, 0644);
198MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
199 "0..4 in microseconds, deprecated parameter");
200
201static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
202
203module_param_array(intr_cnt, uint, NULL, 0644);
204MODULE_PARM_DESC(intr_cnt,
205 "thresholds 1..3 for queue interrupt packet counters, "
206 "deprecated parameter");
207
208
209
210
211
212
213
214
215
216
217
218
219
220static int rx_dma_offset = 2;
221
222static bool vf_acls;
223
224#ifdef CONFIG_PCI_IOV
225module_param(vf_acls, bool, 0644);
226MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
227 "deprecated parameter");
228
229
230
231
232static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
233
234module_param_array(num_vf, uint, NULL, 0644);
235MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
236#endif
237
238
239
240
241
242
243
244static int select_queue;
245module_param(select_queue, int, 0644);
246MODULE_PARM_DESC(select_queue,
247 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
248
249static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
250
251module_param(tp_vlan_pri_map, uint, 0644);
252MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
253 "deprecated parameter");
254
255static struct dentry *cxgb4_debugfs_root;
256
257static LIST_HEAD(adapter_list);
258static DEFINE_MUTEX(uld_mutex);
259
260static LIST_HEAD(adap_rcu_list);
261static DEFINE_SPINLOCK(adap_rcu_lock);
262static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
263static const char *uld_str[] = { "RDMA", "iSCSI" };
264
265static void link_report(struct net_device *dev)
266{
267 if (!netif_carrier_ok(dev))
268 netdev_info(dev, "link down\n");
269 else {
270 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
271
272 const char *s = "10Mbps";
273 const struct port_info *p = netdev_priv(dev);
274
275 switch (p->link_cfg.speed) {
276 case 10000:
277 s = "10Gbps";
278 break;
279 case 1000:
280 s = "1000Mbps";
281 break;
282 case 100:
283 s = "100Mbps";
284 break;
285 case 40000:
286 s = "40Gbps";
287 break;
288 }
289
290 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
291 fc[p->link_cfg.fc]);
292 }
293}
294
295#ifdef CONFIG_CHELSIO_T4_DCB
296
297static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
298{
299 struct port_info *pi = netdev_priv(dev);
300 struct adapter *adap = pi->adapter;
301 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
302 int i;
303
304
305
306
307 for (i = 0; i < pi->nqsets; i++, txq++) {
308 u32 name, value;
309 int err;
310
311 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
312 FW_PARAMS_PARAM_X_V(
313 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
314 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
315 value = enable ? i : 0xffffffff;
316
317
318
319
320
321 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
322 &name, &value);
323
324 if (err)
325 dev_err(adap->pdev_dev,
326 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
327 enable ? "set" : "unset", pi->port_id, i, -err);
328 else
329 txq->dcb_prio = value;
330 }
331}
332#endif
333
334void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
335{
336 struct net_device *dev = adapter->port[port_id];
337
338
339 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
340 if (link_stat)
341 netif_carrier_on(dev);
342 else {
343#ifdef CONFIG_CHELSIO_T4_DCB
344 cxgb4_dcb_state_init(dev);
345 dcb_tx_queue_prio_enable(dev, false);
346#endif
347 netif_carrier_off(dev);
348 }
349
350 link_report(dev);
351 }
352}
353
354void t4_os_portmod_changed(const struct adapter *adap, int port_id)
355{
356 static const char *mod_str[] = {
357 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
358 };
359
360 const struct net_device *dev = adap->port[port_id];
361 const struct port_info *pi = netdev_priv(dev);
362
363 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
364 netdev_info(dev, "port module unplugged\n");
365 else if (pi->mod_type < ARRAY_SIZE(mod_str))
366 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
367}
368
369
370
371
372
373static int set_addr_filters(const struct net_device *dev, bool sleep)
374{
375 u64 mhash = 0;
376 u64 uhash = 0;
377 bool free = true;
378 u16 filt_idx[7];
379 const u8 *addr[7];
380 int ret, naddr = 0;
381 const struct netdev_hw_addr *ha;
382 int uc_cnt = netdev_uc_count(dev);
383 int mc_cnt = netdev_mc_count(dev);
384 const struct port_info *pi = netdev_priv(dev);
385 unsigned int mb = pi->adapter->fn;
386
387
388 netdev_for_each_uc_addr(ha, dev) {
389 addr[naddr++] = ha->addr;
390 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
391 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
392 naddr, addr, filt_idx, &uhash, sleep);
393 if (ret < 0)
394 return ret;
395
396 free = false;
397 naddr = 0;
398 }
399 }
400
401
402 netdev_for_each_mc_addr(ha, dev) {
403 addr[naddr++] = ha->addr;
404 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
405 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
406 naddr, addr, filt_idx, &mhash, sleep);
407 if (ret < 0)
408 return ret;
409
410 free = false;
411 naddr = 0;
412 }
413 }
414
415 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
416 uhash | mhash, sleep);
417}
418
419int dbfifo_int_thresh = 10;
420module_param(dbfifo_int_thresh, int, 0644);
421MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
422
423
424
425
426static int dbfifo_drain_delay = 1000;
427module_param(dbfifo_drain_delay, int, 0644);
428MODULE_PARM_DESC(dbfifo_drain_delay,
429 "usecs to sleep while draining the dbfifo");
430
431
432
433
434
435static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
436{
437 int ret;
438 struct port_info *pi = netdev_priv(dev);
439
440 ret = set_addr_filters(dev, sleep_ok);
441 if (ret == 0)
442 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
443 (dev->flags & IFF_PROMISC) ? 1 : 0,
444 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
445 sleep_ok);
446 return ret;
447}
448
449
450
451
452
453
454
455static int link_start(struct net_device *dev)
456{
457 int ret;
458 struct port_info *pi = netdev_priv(dev);
459 unsigned int mb = pi->adapter->fn;
460
461
462
463
464
465 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
466 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
467 if (ret == 0) {
468 ret = t4_change_mac(pi->adapter, mb, pi->viid,
469 pi->xact_addr_filt, dev->dev_addr, true,
470 true);
471 if (ret >= 0) {
472 pi->xact_addr_filt = ret;
473 ret = 0;
474 }
475 }
476 if (ret == 0)
477 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
478 &pi->link_cfg);
479 if (ret == 0) {
480 local_bh_disable();
481 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
482 true, CXGB4_DCB_ENABLED);
483 local_bh_enable();
484 }
485
486 return ret;
487}
488
489int cxgb4_dcb_enabled(const struct net_device *dev)
490{
491#ifdef CONFIG_CHELSIO_T4_DCB
492 struct port_info *pi = netdev_priv(dev);
493
494 if (!pi->dcb.enabled)
495 return 0;
496
497 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
498 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
499#else
500 return 0;
501#endif
502}
503EXPORT_SYMBOL(cxgb4_dcb_enabled);
504
505#ifdef CONFIG_CHELSIO_T4_DCB
506
507static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
508{
509 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
510 struct net_device *dev = adap->port[port];
511 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
512 int new_dcb_enabled;
513
514 cxgb4_dcb_handle_fw_update(adap, pcmd);
515 new_dcb_enabled = cxgb4_dcb_enabled(dev);
516
517
518
519
520
521 if (new_dcb_enabled != old_dcb_enabled)
522 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
523}
524#endif
525
526
527
528
529static void clear_filter(struct adapter *adap, struct filter_entry *f)
530{
531
532
533
534
535
536
537 if (f->l2t)
538 cxgb4_l2t_release(f->l2t);
539
540
541
542
543
544 memset(f, 0, sizeof(*f));
545}
546
547
548
549static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
550{
551 unsigned int idx = GET_TID(rpl);
552 unsigned int nidx = idx - adap->tids.ftid_base;
553 unsigned int ret;
554 struct filter_entry *f;
555
556 if (idx >= adap->tids.ftid_base && nidx <
557 (adap->tids.nftids + adap->tids.nsftids)) {
558 idx = nidx;
559 ret = TCB_COOKIE_G(rpl->cookie);
560 f = &adap->tids.ftid_tab[idx];
561
562 if (ret == FW_FILTER_WR_FLT_DELETED) {
563
564
565
566 clear_filter(adap, f);
567 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
568 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
569 idx);
570 clear_filter(adap, f);
571 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
572 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
573 f->pending = 0;
574 f->valid = 1;
575 } else {
576
577
578
579 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
580 idx, ret);
581 clear_filter(adap, f);
582 }
583 }
584}
585
586
587
588static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
589 const struct pkt_gl *gl)
590{
591 u8 opcode = ((const struct rss_header *)rsp)->opcode;
592
593 rsp++;
594
595
596
597 if (unlikely(opcode == CPL_FW4_MSG &&
598 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
599 rsp++;
600 opcode = ((const struct rss_header *)rsp)->opcode;
601 rsp++;
602 if (opcode != CPL_SGE_EGR_UPDATE) {
603 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
604 , opcode);
605 goto out;
606 }
607 }
608
609 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
610 const struct cpl_sge_egr_update *p = (void *)rsp;
611 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
612 struct sge_txq *txq;
613
614 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
615 txq->restarts++;
616 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
617 struct sge_eth_txq *eq;
618
619 eq = container_of(txq, struct sge_eth_txq, q);
620 netif_tx_wake_queue(eq->txq);
621 } else {
622 struct sge_ofld_txq *oq;
623
624 oq = container_of(txq, struct sge_ofld_txq, q);
625 tasklet_schedule(&oq->qresume_tsk);
626 }
627 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
628 const struct cpl_fw6_msg *p = (void *)rsp;
629
630#ifdef CONFIG_CHELSIO_T4_DCB
631 const struct fw_port_cmd *pcmd = (const void *)p->data;
632 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
633 unsigned int action =
634 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
635
636 if (cmd == FW_PORT_CMD &&
637 action == FW_PORT_ACTION_GET_PORT_INFO) {
638 int port = FW_PORT_CMD_PORTID_G(
639 be32_to_cpu(pcmd->op_to_portid));
640 struct net_device *dev = q->adap->port[port];
641 int state_input = ((pcmd->u.info.dcbxdis_pkd &
642 FW_PORT_CMD_DCBXDIS_F)
643 ? CXGB4_DCB_INPUT_FW_DISABLED
644 : CXGB4_DCB_INPUT_FW_ENABLED);
645
646 cxgb4_dcb_state_fsm(dev, state_input);
647 }
648
649 if (cmd == FW_PORT_CMD &&
650 action == FW_PORT_ACTION_L2_DCB_CFG)
651 dcb_rpl(q->adap, pcmd);
652 else
653#endif
654 if (p->type == 0)
655 t4_handle_fw_rpl(q->adap, p->data);
656 } else if (opcode == CPL_L2T_WRITE_RPL) {
657 const struct cpl_l2t_write_rpl *p = (void *)rsp;
658
659 do_l2t_write_rpl(q->adap, p);
660 } else if (opcode == CPL_SET_TCB_RPL) {
661 const struct cpl_set_tcb_rpl *p = (void *)rsp;
662
663 filter_rpl(q->adap, p);
664 } else
665 dev_err(q->adap->pdev_dev,
666 "unexpected CPL %#x on FW event queue\n", opcode);
667out:
668 return 0;
669}
670
671
672
673
674
675
676
677
678
679
680static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
681 const struct pkt_gl *gl)
682{
683 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
684
685
686
687 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
688 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
689 rsp += 2;
690
691 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
692 rxq->stats.nomem++;
693 return -1;
694 }
695 if (gl == NULL)
696 rxq->stats.imm++;
697 else if (gl == CXGB4_MSG_AN)
698 rxq->stats.an++;
699 else
700 rxq->stats.pkts++;
701 return 0;
702}
703
704static void disable_msi(struct adapter *adapter)
705{
706 if (adapter->flags & USING_MSIX) {
707 pci_disable_msix(adapter->pdev);
708 adapter->flags &= ~USING_MSIX;
709 } else if (adapter->flags & USING_MSI) {
710 pci_disable_msi(adapter->pdev);
711 adapter->flags &= ~USING_MSI;
712 }
713}
714
715
716
717
718static irqreturn_t t4_nondata_intr(int irq, void *cookie)
719{
720 struct adapter *adap = cookie;
721 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
722
723 if (v & PFSW_F) {
724 adap->swintr = 1;
725 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
726 }
727 if (adap->flags & MASTER_PF)
728 t4_slow_intr_handler(adap);
729 return IRQ_HANDLED;
730}
731
732
733
734
735static void name_msix_vecs(struct adapter *adap)
736{
737 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
738
739
740 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
741
742
743 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
744 adap->port[0]->name);
745
746
747 for_each_port(adap, j) {
748 struct net_device *d = adap->port[j];
749 const struct port_info *pi = netdev_priv(d);
750
751 for (i = 0; i < pi->nqsets; i++, msi_idx++)
752 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
753 d->name, i);
754 }
755
756
757 for_each_ofldrxq(&adap->sge, i)
758 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
759 adap->port[0]->name, i);
760
761 for_each_rdmarxq(&adap->sge, i)
762 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
763 adap->port[0]->name, i);
764
765 for_each_rdmaciq(&adap->sge, i)
766 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
767 adap->port[0]->name, i);
768}
769
770static int request_msix_queue_irqs(struct adapter *adap)
771{
772 struct sge *s = &adap->sge;
773 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
774 int msi_index = 2;
775
776 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
777 adap->msix_info[1].desc, &s->fw_evtq);
778 if (err)
779 return err;
780
781 for_each_ethrxq(s, ethqidx) {
782 err = request_irq(adap->msix_info[msi_index].vec,
783 t4_sge_intr_msix, 0,
784 adap->msix_info[msi_index].desc,
785 &s->ethrxq[ethqidx].rspq);
786 if (err)
787 goto unwind;
788 msi_index++;
789 }
790 for_each_ofldrxq(s, ofldqidx) {
791 err = request_irq(adap->msix_info[msi_index].vec,
792 t4_sge_intr_msix, 0,
793 adap->msix_info[msi_index].desc,
794 &s->ofldrxq[ofldqidx].rspq);
795 if (err)
796 goto unwind;
797 msi_index++;
798 }
799 for_each_rdmarxq(s, rdmaqidx) {
800 err = request_irq(adap->msix_info[msi_index].vec,
801 t4_sge_intr_msix, 0,
802 adap->msix_info[msi_index].desc,
803 &s->rdmarxq[rdmaqidx].rspq);
804 if (err)
805 goto unwind;
806 msi_index++;
807 }
808 for_each_rdmaciq(s, rdmaciqqidx) {
809 err = request_irq(adap->msix_info[msi_index].vec,
810 t4_sge_intr_msix, 0,
811 adap->msix_info[msi_index].desc,
812 &s->rdmaciq[rdmaciqqidx].rspq);
813 if (err)
814 goto unwind;
815 msi_index++;
816 }
817 return 0;
818
819unwind:
820 while (--rdmaciqqidx >= 0)
821 free_irq(adap->msix_info[--msi_index].vec,
822 &s->rdmaciq[rdmaciqqidx].rspq);
823 while (--rdmaqidx >= 0)
824 free_irq(adap->msix_info[--msi_index].vec,
825 &s->rdmarxq[rdmaqidx].rspq);
826 while (--ofldqidx >= 0)
827 free_irq(adap->msix_info[--msi_index].vec,
828 &s->ofldrxq[ofldqidx].rspq);
829 while (--ethqidx >= 0)
830 free_irq(adap->msix_info[--msi_index].vec,
831 &s->ethrxq[ethqidx].rspq);
832 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
833 return err;
834}
835
836static void free_msix_queue_irqs(struct adapter *adap)
837{
838 int i, msi_index = 2;
839 struct sge *s = &adap->sge;
840
841 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
842 for_each_ethrxq(s, i)
843 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
844 for_each_ofldrxq(s, i)
845 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
846 for_each_rdmarxq(s, i)
847 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
848 for_each_rdmaciq(s, i)
849 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
850}
851
852
853
854
855
856
857
858
859
860int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
861{
862 u16 *rss;
863 int i, err;
864 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
865
866 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
867 if (!rss)
868 return -ENOMEM;
869
870
871 for (i = 0; i < pi->rss_size; i++, queues++)
872 rss[i] = q[*queues].rspq.abs_id;
873
874 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
875 pi->rss_size, rss, pi->rss_size);
876 kfree(rss);
877 return err;
878}
879
880
881
882
883
884
885
886static int setup_rss(struct adapter *adap)
887{
888 int i, err;
889
890 for_each_port(adap, i) {
891 const struct port_info *pi = adap2pinfo(adap, i);
892
893 err = cxgb4_write_rss(pi, pi->rss);
894 if (err)
895 return err;
896 }
897 return 0;
898}
899
900
901
902
903static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
904{
905 qid -= p->ingr_start;
906 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
907}
908
909
910
911
912static void quiesce_rx(struct adapter *adap)
913{
914 int i;
915
916 for (i = 0; i < adap->sge.ingr_sz; i++) {
917 struct sge_rspq *q = adap->sge.ingr_map[i];
918
919 if (q && q->handler) {
920 napi_disable(&q->napi);
921 local_bh_disable();
922 while (!cxgb_poll_lock_napi(q))
923 mdelay(1);
924 local_bh_enable();
925 }
926
927 }
928}
929
930
931static void disable_interrupts(struct adapter *adap)
932{
933 if (adap->flags & FULL_INIT_DONE) {
934 t4_intr_disable(adap);
935 if (adap->flags & USING_MSIX) {
936 free_msix_queue_irqs(adap);
937 free_irq(adap->msix_info[0].vec, adap);
938 } else {
939 free_irq(adap->pdev->irq, adap);
940 }
941 quiesce_rx(adap);
942 }
943}
944
945
946
947
948static void enable_rx(struct adapter *adap)
949{
950 int i;
951
952 for (i = 0; i < adap->sge.ingr_sz; i++) {
953 struct sge_rspq *q = adap->sge.ingr_map[i];
954
955 if (!q)
956 continue;
957 if (q->handler) {
958 cxgb_busy_poll_init_lock(q);
959 napi_enable(&q->napi);
960 }
961
962 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
963 SEINTARM_V(q->intr_params) |
964 INGRESSQID_V(q->cntxt_id));
965 }
966}
967
968static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
969 unsigned int nq, unsigned int per_chan, int msi_idx,
970 u16 *ids)
971{
972 int i, err;
973
974 for (i = 0; i < nq; i++, q++) {
975 if (msi_idx > 0)
976 msi_idx++;
977 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
978 adap->port[i / per_chan],
979 msi_idx, q->fl.size ? &q->fl : NULL,
980 uldrx_handler);
981 if (err)
982 return err;
983 memset(&q->stats, 0, sizeof(q->stats));
984 if (ids)
985 ids[i] = q->rspq.abs_id;
986 }
987 return 0;
988}
989
990
991
992
993
994
995
996
997
998static int setup_sge_queues(struct adapter *adap)
999{
1000 int err, msi_idx, i, j;
1001 struct sge *s = &adap->sge;
1002
1003 bitmap_zero(s->starving_fl, s->egr_sz);
1004 bitmap_zero(s->txq_maperr, s->egr_sz);
1005
1006 if (adap->flags & USING_MSIX)
1007 msi_idx = 1;
1008 else {
1009 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1010 NULL, NULL);
1011 if (err)
1012 return err;
1013 msi_idx = -((int)s->intrq.abs_id + 1);
1014 }
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1030 msi_idx, NULL, fwevtq_handler);
1031 if (err) {
1032freeout: t4_free_sge_resources(adap);
1033 return err;
1034 }
1035
1036 for_each_port(adap, i) {
1037 struct net_device *dev = adap->port[i];
1038 struct port_info *pi = netdev_priv(dev);
1039 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1040 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1041
1042 for (j = 0; j < pi->nqsets; j++, q++) {
1043 if (msi_idx > 0)
1044 msi_idx++;
1045 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1046 msi_idx, &q->fl,
1047 t4_ethrx_handler);
1048 if (err)
1049 goto freeout;
1050 q->rspq.idx = j;
1051 memset(&q->stats, 0, sizeof(q->stats));
1052 }
1053 for (j = 0; j < pi->nqsets; j++, t++) {
1054 err = t4_sge_alloc_eth_txq(adap, t, dev,
1055 netdev_get_tx_queue(dev, j),
1056 s->fw_evtq.cntxt_id);
1057 if (err)
1058 goto freeout;
1059 }
1060 }
1061
1062 j = s->ofldqsets / adap->params.nports;
1063 for_each_ofldrxq(s, i) {
1064 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
1065 adap->port[i / j],
1066 s->fw_evtq.cntxt_id);
1067 if (err)
1068 goto freeout;
1069 }
1070
1071#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
1072 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
1073 if (err) \
1074 goto freeout; \
1075 if (msi_idx > 0) \
1076 msi_idx += nq; \
1077} while (0)
1078
1079 ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
1080 ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
1081 j = s->rdmaciqs / adap->params.nports;
1082 ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
1083
1084#undef ALLOC_OFLD_RXQS
1085
1086 for_each_port(adap, i) {
1087
1088
1089
1090
1091 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1092 s->fw_evtq.cntxt_id,
1093 s->rdmarxq[i].rspq.cntxt_id);
1094 if (err)
1095 goto freeout;
1096 }
1097
1098 t4_write_reg(adap, is_t4(adap->params.chip) ?
1099 MPS_TRC_RSS_CONTROL_A :
1100 MPS_T5_TRC_RSS_CONTROL_A,
1101 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1102 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1103 return 0;
1104}
1105
1106
1107
1108
1109
1110void *t4_alloc_mem(size_t size)
1111{
1112 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1113
1114 if (!p)
1115 p = vzalloc(size);
1116 return p;
1117}
1118
1119
1120
1121
1122void t4_free_mem(void *addr)
1123{
1124 if (is_vmalloc_addr(addr))
1125 vfree(addr);
1126 else
1127 kfree(addr);
1128}
1129
1130
1131
1132
1133
1134
1135
1136static int set_filter_wr(struct adapter *adapter, int fidx)
1137{
1138 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1139 struct sk_buff *skb;
1140 struct fw_filter_wr *fwr;
1141 unsigned int ftid;
1142
1143 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
1144 if (!skb)
1145 return -ENOMEM;
1146
1147
1148
1149
1150
1151 if (f->fs.newdmac || f->fs.newvlan) {
1152
1153 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1154 if (f->l2t == NULL) {
1155 kfree_skb(skb);
1156 return -EAGAIN;
1157 }
1158 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1159 f->fs.eport, f->fs.dmac)) {
1160 cxgb4_l2t_release(f->l2t);
1161 f->l2t = NULL;
1162 kfree_skb(skb);
1163 return -ENOMEM;
1164 }
1165 }
1166
1167 ftid = adapter->tids.ftid_base + fidx;
1168
1169 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1170 memset(fwr, 0, sizeof(*fwr));
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1181 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
1182 fwr->tid_to_iq =
1183 htonl(FW_FILTER_WR_TID_V(ftid) |
1184 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1185 FW_FILTER_WR_NOREPLY_V(0) |
1186 FW_FILTER_WR_IQ_V(f->fs.iq));
1187 fwr->del_filter_to_l2tix =
1188 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1189 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1190 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1191 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1192 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1193 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1194 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1195 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1196 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
1197 f->fs.newvlan == VLAN_REWRITE) |
1198 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
1199 f->fs.newvlan == VLAN_REWRITE) |
1200 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1201 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1202 FW_FILTER_WR_PRIO_V(f->fs.prio) |
1203 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
1204 fwr->ethtype = htons(f->fs.val.ethtype);
1205 fwr->ethtypem = htons(f->fs.mask.ethtype);
1206 fwr->frag_to_ovlan_vldm =
1207 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1208 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1209 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1210 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1211 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1212 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
1213 fwr->smac_sel = 0;
1214 fwr->rx_chan_rx_rpl_iq =
1215 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1216 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
1217 fwr->maci_to_matchtypem =
1218 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1219 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1220 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1221 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1222 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1223 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1224 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1225 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
1226 fwr->ptcl = f->fs.val.proto;
1227 fwr->ptclm = f->fs.mask.proto;
1228 fwr->ttyp = f->fs.val.tos;
1229 fwr->ttypm = f->fs.mask.tos;
1230 fwr->ivlan = htons(f->fs.val.ivlan);
1231 fwr->ivlanm = htons(f->fs.mask.ivlan);
1232 fwr->ovlan = htons(f->fs.val.ovlan);
1233 fwr->ovlanm = htons(f->fs.mask.ovlan);
1234 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1235 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1236 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1237 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1238 fwr->lp = htons(f->fs.val.lport);
1239 fwr->lpm = htons(f->fs.mask.lport);
1240 fwr->fp = htons(f->fs.val.fport);
1241 fwr->fpm = htons(f->fs.mask.fport);
1242 if (f->fs.newsmac)
1243 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1244
1245
1246
1247
1248 f->pending = 1;
1249 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1250 t4_ofld_send(adapter, skb);
1251 return 0;
1252}
1253
1254
1255
1256static int del_filter_wr(struct adapter *adapter, int fidx)
1257{
1258 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1259 struct sk_buff *skb;
1260 struct fw_filter_wr *fwr;
1261 unsigned int len, ftid;
1262
1263 len = sizeof(*fwr);
1264 ftid = adapter->tids.ftid_base + fidx;
1265
1266 skb = alloc_skb(len, GFP_KERNEL);
1267 if (!skb)
1268 return -ENOMEM;
1269
1270 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1271 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1272
1273
1274
1275
1276 f->pending = 1;
1277 t4_mgmt_tx(adapter, skb);
1278 return 0;
1279}
1280
1281static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1282 void *accel_priv, select_queue_fallback_t fallback)
1283{
1284 int txq;
1285
1286#ifdef CONFIG_CHELSIO_T4_DCB
1287
1288
1289
1290
1291
1292 if (cxgb4_dcb_enabled(dev)) {
1293 u16 vlan_tci;
1294 int err;
1295
1296 err = vlan_get_tag(skb, &vlan_tci);
1297 if (unlikely(err)) {
1298 if (net_ratelimit())
1299 netdev_warn(dev,
1300 "TX Packet without VLAN Tag on DCB Link\n");
1301 txq = 0;
1302 } else {
1303 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1304#ifdef CONFIG_CHELSIO_T4_FCOE
1305 if (skb->protocol == htons(ETH_P_FCOE))
1306 txq = skb->priority & 0x7;
1307#endif
1308 }
1309 return txq;
1310 }
1311#endif
1312
1313 if (select_queue) {
1314 txq = (skb_rx_queue_recorded(skb)
1315 ? skb_get_rx_queue(skb)
1316 : smp_processor_id());
1317
1318 while (unlikely(txq >= dev->real_num_tx_queues))
1319 txq -= dev->real_num_tx_queues;
1320
1321 return txq;
1322 }
1323
1324 return fallback(dev, skb) % dev->real_num_tx_queues;
1325}
1326
1327static inline int is_offload(const struct adapter *adap)
1328{
1329 return adap->params.offload;
1330}
1331
1332static int closest_timer(const struct sge *s, int time)
1333{
1334 int i, delta, match = 0, min_delta = INT_MAX;
1335
1336 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1337 delta = time - s->timer_val[i];
1338 if (delta < 0)
1339 delta = -delta;
1340 if (delta < min_delta) {
1341 min_delta = delta;
1342 match = i;
1343 }
1344 }
1345 return match;
1346}
1347
1348static int closest_thres(const struct sge *s, int thres)
1349{
1350 int i, delta, match = 0, min_delta = INT_MAX;
1351
1352 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1353 delta = thres - s->counter_val[i];
1354 if (delta < 0)
1355 delta = -delta;
1356 if (delta < min_delta) {
1357 min_delta = delta;
1358 match = i;
1359 }
1360 }
1361 return match;
1362}
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1374 unsigned int us, unsigned int cnt)
1375{
1376 struct adapter *adap = q->adap;
1377
1378 if ((us | cnt) == 0)
1379 cnt = 1;
1380
1381 if (cnt) {
1382 int err;
1383 u32 v, new_idx;
1384
1385 new_idx = closest_thres(&adap->sge, cnt);
1386 if (q->desc && q->pktcnt_idx != new_idx) {
1387
1388 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1389 FW_PARAMS_PARAM_X_V(
1390 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1391 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1392 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1393 &new_idx);
1394 if (err)
1395 return err;
1396 }
1397 q->pktcnt_idx = new_idx;
1398 }
1399
1400 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1401 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1402 return 0;
1403}
1404
1405static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1406{
1407 const struct port_info *pi = netdev_priv(dev);
1408 netdev_features_t changed = dev->features ^ features;
1409 int err;
1410
1411 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1412 return 0;
1413
1414 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1415 -1, -1, -1,
1416 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1417 if (unlikely(err))
1418 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1419 return err;
1420}
1421
1422static int setup_debugfs(struct adapter *adap)
1423{
1424 if (IS_ERR_OR_NULL(adap->debugfs_root))
1425 return -1;
1426
1427#ifdef CONFIG_DEBUG_FS
1428 t4_setup_debugfs(adap);
1429#endif
1430 return 0;
1431}
1432
1433
1434
1435
1436
1437
1438
1439
1440int cxgb4_alloc_atid(struct tid_info *t, void *data)
1441{
1442 int atid = -1;
1443
1444 spin_lock_bh(&t->atid_lock);
1445 if (t->afree) {
1446 union aopen_entry *p = t->afree;
1447
1448 atid = (p - t->atid_tab) + t->atid_base;
1449 t->afree = p->next;
1450 p->data = data;
1451 t->atids_in_use++;
1452 }
1453 spin_unlock_bh(&t->atid_lock);
1454 return atid;
1455}
1456EXPORT_SYMBOL(cxgb4_alloc_atid);
1457
1458
1459
1460
1461void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1462{
1463 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1464
1465 spin_lock_bh(&t->atid_lock);
1466 p->next = t->afree;
1467 t->afree = p;
1468 t->atids_in_use--;
1469 spin_unlock_bh(&t->atid_lock);
1470}
1471EXPORT_SYMBOL(cxgb4_free_atid);
1472
1473
1474
1475
1476int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1477{
1478 int stid;
1479
1480 spin_lock_bh(&t->stid_lock);
1481 if (family == PF_INET) {
1482 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1483 if (stid < t->nstids)
1484 __set_bit(stid, t->stid_bmap);
1485 else
1486 stid = -1;
1487 } else {
1488 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
1489 if (stid < 0)
1490 stid = -1;
1491 }
1492 if (stid >= 0) {
1493 t->stid_tab[stid].data = data;
1494 stid += t->stid_base;
1495
1496
1497
1498
1499 if (family == PF_INET)
1500 t->stids_in_use++;
1501 else
1502 t->stids_in_use += 4;
1503 }
1504 spin_unlock_bh(&t->stid_lock);
1505 return stid;
1506}
1507EXPORT_SYMBOL(cxgb4_alloc_stid);
1508
1509
1510
1511int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1512{
1513 int stid;
1514
1515 spin_lock_bh(&t->stid_lock);
1516 if (family == PF_INET) {
1517 stid = find_next_zero_bit(t->stid_bmap,
1518 t->nstids + t->nsftids, t->nstids);
1519 if (stid < (t->nstids + t->nsftids))
1520 __set_bit(stid, t->stid_bmap);
1521 else
1522 stid = -1;
1523 } else {
1524 stid = -1;
1525 }
1526 if (stid >= 0) {
1527 t->stid_tab[stid].data = data;
1528 stid -= t->nstids;
1529 stid += t->sftid_base;
1530 t->stids_in_use++;
1531 }
1532 spin_unlock_bh(&t->stid_lock);
1533 return stid;
1534}
1535EXPORT_SYMBOL(cxgb4_alloc_sftid);
1536
1537
1538
1539void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1540{
1541
1542 if (t->nsftids && (stid >= t->sftid_base)) {
1543 stid -= t->sftid_base;
1544 stid += t->nstids;
1545 } else {
1546 stid -= t->stid_base;
1547 }
1548
1549 spin_lock_bh(&t->stid_lock);
1550 if (family == PF_INET)
1551 __clear_bit(stid, t->stid_bmap);
1552 else
1553 bitmap_release_region(t->stid_bmap, stid, 2);
1554 t->stid_tab[stid].data = NULL;
1555 if (family == PF_INET)
1556 t->stids_in_use--;
1557 else
1558 t->stids_in_use -= 4;
1559 spin_unlock_bh(&t->stid_lock);
1560}
1561EXPORT_SYMBOL(cxgb4_free_stid);
1562
1563
1564
1565
1566static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1567 unsigned int tid)
1568{
1569 struct cpl_tid_release *req;
1570
1571 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1572 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1573 INIT_TP_WR(req, tid);
1574 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1575}
1576
1577
1578
1579
1580
1581static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1582 unsigned int tid)
1583{
1584 void **p = &t->tid_tab[tid];
1585 struct adapter *adap = container_of(t, struct adapter, tids);
1586
1587 spin_lock_bh(&adap->tid_release_lock);
1588 *p = adap->tid_release_head;
1589
1590 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1591 if (!adap->tid_release_task_busy) {
1592 adap->tid_release_task_busy = true;
1593 queue_work(adap->workq, &adap->tid_release_task);
1594 }
1595 spin_unlock_bh(&adap->tid_release_lock);
1596}
1597
1598
1599
1600
1601static void process_tid_release_list(struct work_struct *work)
1602{
1603 struct sk_buff *skb;
1604 struct adapter *adap;
1605
1606 adap = container_of(work, struct adapter, tid_release_task);
1607
1608 spin_lock_bh(&adap->tid_release_lock);
1609 while (adap->tid_release_head) {
1610 void **p = adap->tid_release_head;
1611 unsigned int chan = (uintptr_t)p & 3;
1612 p = (void *)p - chan;
1613
1614 adap->tid_release_head = *p;
1615 *p = NULL;
1616 spin_unlock_bh(&adap->tid_release_lock);
1617
1618 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1619 GFP_KERNEL)))
1620 schedule_timeout_uninterruptible(1);
1621
1622 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1623 t4_ofld_send(adap, skb);
1624 spin_lock_bh(&adap->tid_release_lock);
1625 }
1626 adap->tid_release_task_busy = false;
1627 spin_unlock_bh(&adap->tid_release_lock);
1628}
1629
1630
1631
1632
1633
1634void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1635{
1636 void *old;
1637 struct sk_buff *skb;
1638 struct adapter *adap = container_of(t, struct adapter, tids);
1639
1640 old = t->tid_tab[tid];
1641 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1642 if (likely(skb)) {
1643 t->tid_tab[tid] = NULL;
1644 mk_tid_release(skb, chan, tid);
1645 t4_ofld_send(adap, skb);
1646 } else
1647 cxgb4_queue_tid_release(t, chan, tid);
1648 if (old)
1649 atomic_dec(&t->tids_in_use);
1650}
1651EXPORT_SYMBOL(cxgb4_remove_tid);
1652
1653
1654
1655
1656static int tid_init(struct tid_info *t)
1657{
1658 size_t size;
1659 unsigned int stid_bmap_size;
1660 unsigned int natids = t->natids;
1661 struct adapter *adap = container_of(t, struct adapter, tids);
1662
1663 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1664 size = t->ntids * sizeof(*t->tid_tab) +
1665 natids * sizeof(*t->atid_tab) +
1666 t->nstids * sizeof(*t->stid_tab) +
1667 t->nsftids * sizeof(*t->stid_tab) +
1668 stid_bmap_size * sizeof(long) +
1669 t->nftids * sizeof(*t->ftid_tab) +
1670 t->nsftids * sizeof(*t->ftid_tab);
1671
1672 t->tid_tab = t4_alloc_mem(size);
1673 if (!t->tid_tab)
1674 return -ENOMEM;
1675
1676 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1677 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1678 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1679 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1680 spin_lock_init(&t->stid_lock);
1681 spin_lock_init(&t->atid_lock);
1682
1683 t->stids_in_use = 0;
1684 t->afree = NULL;
1685 t->atids_in_use = 0;
1686 atomic_set(&t->tids_in_use, 0);
1687
1688
1689 if (natids) {
1690 while (--natids)
1691 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1692 t->afree = t->atid_tab;
1693 }
1694 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1695
1696 if (!t->stid_base &&
1697 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
1698 __set_bit(0, t->stid_bmap);
1699
1700 return 0;
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1715 __be32 sip, __be16 sport, __be16 vlan,
1716 unsigned int queue)
1717{
1718 unsigned int chan;
1719 struct sk_buff *skb;
1720 struct adapter *adap;
1721 struct cpl_pass_open_req *req;
1722 int ret;
1723
1724 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1725 if (!skb)
1726 return -ENOMEM;
1727
1728 adap = netdev2adap(dev);
1729 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
1730 INIT_TP_WR(req, 0);
1731 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1732 req->local_port = sport;
1733 req->peer_port = htons(0);
1734 req->local_ip = sip;
1735 req->peer_ip = htonl(0);
1736 chan = rxq_to_chan(&adap->sge, queue);
1737 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1738 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1739 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1740 ret = t4_mgmt_tx(adap, skb);
1741 return net_xmit_eval(ret);
1742}
1743EXPORT_SYMBOL(cxgb4_create_server);
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1756 const struct in6_addr *sip, __be16 sport,
1757 unsigned int queue)
1758{
1759 unsigned int chan;
1760 struct sk_buff *skb;
1761 struct adapter *adap;
1762 struct cpl_pass_open_req6 *req;
1763 int ret;
1764
1765 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1766 if (!skb)
1767 return -ENOMEM;
1768
1769 adap = netdev2adap(dev);
1770 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
1771 INIT_TP_WR(req, 0);
1772 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1773 req->local_port = sport;
1774 req->peer_port = htons(0);
1775 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1776 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1777 req->peer_ip_hi = cpu_to_be64(0);
1778 req->peer_ip_lo = cpu_to_be64(0);
1779 chan = rxq_to_chan(&adap->sge, queue);
1780 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1781 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1782 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1783 ret = t4_mgmt_tx(adap, skb);
1784 return net_xmit_eval(ret);
1785}
1786EXPORT_SYMBOL(cxgb4_create_server6);
1787
1788int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1789 unsigned int queue, bool ipv6)
1790{
1791 struct sk_buff *skb;
1792 struct adapter *adap;
1793 struct cpl_close_listsvr_req *req;
1794 int ret;
1795
1796 adap = netdev2adap(dev);
1797
1798 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1799 if (!skb)
1800 return -ENOMEM;
1801
1802 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
1803 INIT_TP_WR(req, 0);
1804 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1805 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1806 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1807 ret = t4_mgmt_tx(adap, skb);
1808 return net_xmit_eval(ret);
1809}
1810EXPORT_SYMBOL(cxgb4_remove_server);
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1823 unsigned int *idx)
1824{
1825 unsigned int i = 0;
1826
1827 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1828 ++i;
1829 if (idx)
1830 *idx = i;
1831 return mtus[i];
1832}
1833EXPORT_SYMBOL(cxgb4_best_mtu);
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1852 unsigned short header_size,
1853 unsigned short data_size_max,
1854 unsigned short data_size_align,
1855 unsigned int *mtu_idxp)
1856{
1857 unsigned short max_mtu = header_size + data_size_max;
1858 unsigned short data_size_align_mask = data_size_align - 1;
1859 int mtu_idx, aligned_mtu_idx;
1860
1861
1862
1863
1864
1865
1866 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1867 unsigned short data_size = mtus[mtu_idx] - header_size;
1868
1869
1870
1871
1872 if ((data_size & data_size_align_mask) == 0)
1873 aligned_mtu_idx = mtu_idx;
1874
1875
1876
1877
1878
1879 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1880 break;
1881 }
1882
1883
1884
1885
1886 if (mtu_idx == NMTUS)
1887 mtu_idx--;
1888
1889
1890
1891
1892
1893 if (aligned_mtu_idx >= 0 &&
1894 mtu_idx - aligned_mtu_idx <= 1)
1895 mtu_idx = aligned_mtu_idx;
1896
1897
1898
1899
1900 if (mtu_idxp)
1901 *mtu_idxp = mtu_idx;
1902 return mtus[mtu_idx];
1903}
1904EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1905
1906
1907
1908
1909
1910
1911
1912unsigned int cxgb4_port_chan(const struct net_device *dev)
1913{
1914 return netdev2pinfo(dev)->tx_chan;
1915}
1916EXPORT_SYMBOL(cxgb4_port_chan);
1917
1918unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1919{
1920 struct adapter *adap = netdev2adap(dev);
1921 u32 v1, v2, lp_count, hp_count;
1922
1923 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1924 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1925 if (is_t4(adap->params.chip)) {
1926 lp_count = LP_COUNT_G(v1);
1927 hp_count = HP_COUNT_G(v1);
1928 } else {
1929 lp_count = LP_COUNT_T5_G(v1);
1930 hp_count = HP_COUNT_T5_G(v2);
1931 }
1932 return lpfifo ? lp_count : hp_count;
1933}
1934EXPORT_SYMBOL(cxgb4_dbfifo_count);
1935
1936
1937
1938
1939
1940
1941
1942unsigned int cxgb4_port_viid(const struct net_device *dev)
1943{
1944 return netdev2pinfo(dev)->viid;
1945}
1946EXPORT_SYMBOL(cxgb4_port_viid);
1947
1948
1949
1950
1951
1952
1953
1954unsigned int cxgb4_port_idx(const struct net_device *dev)
1955{
1956 return netdev2pinfo(dev)->port_id;
1957}
1958EXPORT_SYMBOL(cxgb4_port_idx);
1959
1960void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1961 struct tp_tcp_stats *v6)
1962{
1963 struct adapter *adap = pci_get_drvdata(pdev);
1964
1965 spin_lock(&adap->stats_lock);
1966 t4_tp_get_tcp_stats(adap, v4, v6);
1967 spin_unlock(&adap->stats_lock);
1968}
1969EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1970
1971void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1972 const unsigned int *pgsz_order)
1973{
1974 struct adapter *adap = netdev2adap(dev);
1975
1976 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1977 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1978 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1979 HPZ3_V(pgsz_order[3]));
1980}
1981EXPORT_SYMBOL(cxgb4_iscsi_init);
1982
1983int cxgb4_flush_eq_cache(struct net_device *dev)
1984{
1985 struct adapter *adap = netdev2adap(dev);
1986 int ret;
1987
1988 ret = t4_fwaddrspace_write(adap, adap->mbox,
1989 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
1990 return ret;
1991}
1992EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1993
1994static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1995{
1996 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
1997 __be64 indices;
1998 int ret;
1999
2000 spin_lock(&adap->win0_lock);
2001 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2002 sizeof(indices), (__be32 *)&indices,
2003 T4_MEMORY_READ);
2004 spin_unlock(&adap->win0_lock);
2005 if (!ret) {
2006 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2007 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2008 }
2009 return ret;
2010}
2011
2012int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2013 u16 size)
2014{
2015 struct adapter *adap = netdev2adap(dev);
2016 u16 hw_pidx, hw_cidx;
2017 int ret;
2018
2019 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2020 if (ret)
2021 goto out;
2022
2023 if (pidx != hw_pidx) {
2024 u16 delta;
2025 u32 val;
2026
2027 if (pidx >= hw_pidx)
2028 delta = pidx - hw_pidx;
2029 else
2030 delta = size - hw_pidx + pidx;
2031
2032 if (is_t4(adap->params.chip))
2033 val = PIDX_V(delta);
2034 else
2035 val = PIDX_T5_V(delta);
2036 wmb();
2037 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2038 QID_V(qid) | val);
2039 }
2040out:
2041 return ret;
2042}
2043EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2044
2045void cxgb4_disable_db_coalescing(struct net_device *dev)
2046{
2047 struct adapter *adap;
2048
2049 adap = netdev2adap(dev);
2050 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
2051 NOCOALESCE_F);
2052}
2053EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
2054
2055void cxgb4_enable_db_coalescing(struct net_device *dev)
2056{
2057 struct adapter *adap;
2058
2059 adap = netdev2adap(dev);
2060 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
2061}
2062EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
2063
2064int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2065{
2066 struct adapter *adap;
2067 u32 offset, memtype, memaddr;
2068 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2069 u32 edc0_end, edc1_end, mc0_end, mc1_end;
2070 int ret;
2071
2072 adap = netdev2adap(dev);
2073
2074 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2075
2076
2077
2078
2079
2080
2081
2082 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2083 edc0_size = EDRAM0_SIZE_G(size) << 20;
2084 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2085 edc1_size = EDRAM1_SIZE_G(size) << 20;
2086 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2087 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2088
2089 edc0_end = edc0_size;
2090 edc1_end = edc0_end + edc1_size;
2091 mc0_end = edc1_end + mc0_size;
2092
2093 if (offset < edc0_end) {
2094 memtype = MEM_EDC0;
2095 memaddr = offset;
2096 } else if (offset < edc1_end) {
2097 memtype = MEM_EDC1;
2098 memaddr = offset - edc0_end;
2099 } else {
2100 if (offset < mc0_end) {
2101 memtype = MEM_MC0;
2102 memaddr = offset - edc1_end;
2103 } else if (is_t4(adap->params.chip)) {
2104
2105 goto err;
2106 } else {
2107 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2108 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2109 mc1_end = mc0_end + mc1_size;
2110 if (offset < mc1_end) {
2111 memtype = MEM_MC1;
2112 memaddr = offset - mc0_end;
2113 } else {
2114
2115 goto err;
2116 }
2117 }
2118 }
2119
2120 spin_lock(&adap->win0_lock);
2121 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2122 spin_unlock(&adap->win0_lock);
2123 return ret;
2124
2125err:
2126 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2127 stag, offset);
2128 return -EINVAL;
2129}
2130EXPORT_SYMBOL(cxgb4_read_tpte);
2131
2132u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2133{
2134 u32 hi, lo;
2135 struct adapter *adap;
2136
2137 adap = netdev2adap(dev);
2138 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2139 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2140
2141 return ((u64)hi << 32) | (u64)lo;
2142}
2143EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2144
2145int cxgb4_bar2_sge_qregs(struct net_device *dev,
2146 unsigned int qid,
2147 enum cxgb4_bar2_qtype qtype,
2148 u64 *pbar2_qoffset,
2149 unsigned int *pbar2_qid)
2150{
2151 return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
2152 qid,
2153 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2154 ? T4_BAR2_QTYPE_EGRESS
2155 : T4_BAR2_QTYPE_INGRESS),
2156 pbar2_qoffset,
2157 pbar2_qid);
2158}
2159EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2160
2161static struct pci_driver cxgb4_driver;
2162
2163static void check_neigh_update(struct neighbour *neigh)
2164{
2165 const struct device *parent;
2166 const struct net_device *netdev = neigh->dev;
2167
2168 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2169 netdev = vlan_dev_real_dev(netdev);
2170 parent = netdev->dev.parent;
2171 if (parent && parent->driver == &cxgb4_driver.driver)
2172 t4_l2t_update(dev_get_drvdata(parent), neigh);
2173}
2174
2175static int netevent_cb(struct notifier_block *nb, unsigned long event,
2176 void *data)
2177{
2178 switch (event) {
2179 case NETEVENT_NEIGH_UPDATE:
2180 check_neigh_update(data);
2181 break;
2182 case NETEVENT_REDIRECT:
2183 default:
2184 break;
2185 }
2186 return 0;
2187}
2188
2189static bool netevent_registered;
2190static struct notifier_block cxgb4_netevent_nb = {
2191 .notifier_call = netevent_cb
2192};
2193
2194static void drain_db_fifo(struct adapter *adap, int usecs)
2195{
2196 u32 v1, v2, lp_count, hp_count;
2197
2198 do {
2199 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2200 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2201 if (is_t4(adap->params.chip)) {
2202 lp_count = LP_COUNT_G(v1);
2203 hp_count = HP_COUNT_G(v1);
2204 } else {
2205 lp_count = LP_COUNT_T5_G(v1);
2206 hp_count = HP_COUNT_T5_G(v2);
2207 }
2208
2209 if (lp_count == 0 && hp_count == 0)
2210 break;
2211 set_current_state(TASK_UNINTERRUPTIBLE);
2212 schedule_timeout(usecs_to_jiffies(usecs));
2213 } while (1);
2214}
2215
2216static void disable_txq_db(struct sge_txq *q)
2217{
2218 unsigned long flags;
2219
2220 spin_lock_irqsave(&q->db_lock, flags);
2221 q->db_disabled = 1;
2222 spin_unlock_irqrestore(&q->db_lock, flags);
2223}
2224
2225static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2226{
2227 spin_lock_irq(&q->db_lock);
2228 if (q->db_pidx_inc) {
2229
2230
2231
2232 wmb();
2233 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2234 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2235 q->db_pidx_inc = 0;
2236 }
2237 q->db_disabled = 0;
2238 spin_unlock_irq(&q->db_lock);
2239}
2240
2241static void disable_dbs(struct adapter *adap)
2242{
2243 int i;
2244
2245 for_each_ethrxq(&adap->sge, i)
2246 disable_txq_db(&adap->sge.ethtxq[i].q);
2247 for_each_ofldrxq(&adap->sge, i)
2248 disable_txq_db(&adap->sge.ofldtxq[i].q);
2249 for_each_port(adap, i)
2250 disable_txq_db(&adap->sge.ctrlq[i].q);
2251}
2252
2253static void enable_dbs(struct adapter *adap)
2254{
2255 int i;
2256
2257 for_each_ethrxq(&adap->sge, i)
2258 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2259 for_each_ofldrxq(&adap->sge, i)
2260 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
2261 for_each_port(adap, i)
2262 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2263}
2264
2265static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2266{
2267 if (adap->uld_handle[CXGB4_ULD_RDMA])
2268 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2269 cmd);
2270}
2271
2272static void process_db_full(struct work_struct *work)
2273{
2274 struct adapter *adap;
2275
2276 adap = container_of(work, struct adapter, db_full_task);
2277
2278 drain_db_fifo(adap, dbfifo_drain_delay);
2279 enable_dbs(adap);
2280 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2281 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2282 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2283 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2284}
2285
2286static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2287{
2288 u16 hw_pidx, hw_cidx;
2289 int ret;
2290
2291 spin_lock_irq(&q->db_lock);
2292 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2293 if (ret)
2294 goto out;
2295 if (q->db_pidx != hw_pidx) {
2296 u16 delta;
2297 u32 val;
2298
2299 if (q->db_pidx >= hw_pidx)
2300 delta = q->db_pidx - hw_pidx;
2301 else
2302 delta = q->size - hw_pidx + q->db_pidx;
2303
2304 if (is_t4(adap->params.chip))
2305 val = PIDX_V(delta);
2306 else
2307 val = PIDX_T5_V(delta);
2308 wmb();
2309 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2310 QID_V(q->cntxt_id) | val);
2311 }
2312out:
2313 q->db_disabled = 0;
2314 q->db_pidx_inc = 0;
2315 spin_unlock_irq(&q->db_lock);
2316 if (ret)
2317 CH_WARN(adap, "DB drop recovery failed.\n");
2318}
2319static void recover_all_queues(struct adapter *adap)
2320{
2321 int i;
2322
2323 for_each_ethrxq(&adap->sge, i)
2324 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2325 for_each_ofldrxq(&adap->sge, i)
2326 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2327 for_each_port(adap, i)
2328 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2329}
2330
2331static void process_db_drop(struct work_struct *work)
2332{
2333 struct adapter *adap;
2334
2335 adap = container_of(work, struct adapter, db_drop_task);
2336
2337 if (is_t4(adap->params.chip)) {
2338 drain_db_fifo(adap, dbfifo_drain_delay);
2339 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2340 drain_db_fifo(adap, dbfifo_drain_delay);
2341 recover_all_queues(adap);
2342 drain_db_fifo(adap, dbfifo_drain_delay);
2343 enable_dbs(adap);
2344 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2345 } else {
2346 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2347 u16 qid = (dropped_db >> 15) & 0x1ffff;
2348 u16 pidx_inc = dropped_db & 0x1fff;
2349 u64 bar2_qoffset;
2350 unsigned int bar2_qid;
2351 int ret;
2352
2353 ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2354 &bar2_qoffset, &bar2_qid);
2355 if (ret)
2356 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2357 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2358 else
2359 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2360 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2361
2362
2363 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2364 }
2365
2366 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2367}
2368
2369void t4_db_full(struct adapter *adap)
2370{
2371 if (is_t4(adap->params.chip)) {
2372 disable_dbs(adap);
2373 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2374 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2375 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2376 queue_work(adap->workq, &adap->db_full_task);
2377 }
2378}
2379
2380void t4_db_dropped(struct adapter *adap)
2381{
2382 if (is_t4(adap->params.chip)) {
2383 disable_dbs(adap);
2384 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2385 }
2386 queue_work(adap->workq, &adap->db_drop_task);
2387}
2388
2389static void uld_attach(struct adapter *adap, unsigned int uld)
2390{
2391 void *handle;
2392 struct cxgb4_lld_info lli;
2393 unsigned short i;
2394
2395 lli.pdev = adap->pdev;
2396 lli.pf = adap->fn;
2397 lli.l2t = adap->l2t;
2398 lli.tids = &adap->tids;
2399 lli.ports = adap->port;
2400 lli.vr = &adap->vres;
2401 lli.mtus = adap->params.mtus;
2402 if (uld == CXGB4_ULD_RDMA) {
2403 lli.rxq_ids = adap->sge.rdma_rxq;
2404 lli.ciq_ids = adap->sge.rdma_ciq;
2405 lli.nrxq = adap->sge.rdmaqs;
2406 lli.nciq = adap->sge.rdmaciqs;
2407 } else if (uld == CXGB4_ULD_ISCSI) {
2408 lli.rxq_ids = adap->sge.ofld_rxq;
2409 lli.nrxq = adap->sge.ofldqsets;
2410 }
2411 lli.ntxq = adap->sge.ofldqsets;
2412 lli.nchan = adap->params.nports;
2413 lli.nports = adap->params.nports;
2414 lli.wr_cred = adap->params.ofldq_wr_cred;
2415 lli.adapter_type = adap->params.chip;
2416 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
2417 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
2418 lli.udb_density = 1 << adap->params.sge.eq_qpp;
2419 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
2420 lli.filt_mode = adap->params.tp.vlan_pri_map;
2421
2422 for (i = 0; i < NCHAN; i++)
2423 lli.tx_modq[i] = i;
2424 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
2425 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
2426 lli.fw_vers = adap->params.fw_vers;
2427 lli.dbfifo_int_thresh = dbfifo_int_thresh;
2428 lli.sge_ingpadboundary = adap->sge.fl_align;
2429 lli.sge_egrstatuspagesize = adap->sge.stat_len;
2430 lli.sge_pktshift = adap->sge.pktshift;
2431 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
2432 lli.max_ordird_qp = adap->params.max_ordird_qp;
2433 lli.max_ird_adapter = adap->params.max_ird_adapter;
2434 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
2435
2436 handle = ulds[uld].add(&lli);
2437 if (IS_ERR(handle)) {
2438 dev_warn(adap->pdev_dev,
2439 "could not attach to the %s driver, error %ld\n",
2440 uld_str[uld], PTR_ERR(handle));
2441 return;
2442 }
2443
2444 adap->uld_handle[uld] = handle;
2445
2446 if (!netevent_registered) {
2447 register_netevent_notifier(&cxgb4_netevent_nb);
2448 netevent_registered = true;
2449 }
2450
2451 if (adap->flags & FULL_INIT_DONE)
2452 ulds[uld].state_change(handle, CXGB4_STATE_UP);
2453}
2454
2455static void attach_ulds(struct adapter *adap)
2456{
2457 unsigned int i;
2458
2459 spin_lock(&adap_rcu_lock);
2460 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
2461 spin_unlock(&adap_rcu_lock);
2462
2463 mutex_lock(&uld_mutex);
2464 list_add_tail(&adap->list_node, &adapter_list);
2465 for (i = 0; i < CXGB4_ULD_MAX; i++)
2466 if (ulds[i].add)
2467 uld_attach(adap, i);
2468 mutex_unlock(&uld_mutex);
2469}
2470
2471static void detach_ulds(struct adapter *adap)
2472{
2473 unsigned int i;
2474
2475 mutex_lock(&uld_mutex);
2476 list_del(&adap->list_node);
2477 for (i = 0; i < CXGB4_ULD_MAX; i++)
2478 if (adap->uld_handle[i]) {
2479 ulds[i].state_change(adap->uld_handle[i],
2480 CXGB4_STATE_DETACH);
2481 adap->uld_handle[i] = NULL;
2482 }
2483 if (netevent_registered && list_empty(&adapter_list)) {
2484 unregister_netevent_notifier(&cxgb4_netevent_nb);
2485 netevent_registered = false;
2486 }
2487 mutex_unlock(&uld_mutex);
2488
2489 spin_lock(&adap_rcu_lock);
2490 list_del_rcu(&adap->rcu_node);
2491 spin_unlock(&adap_rcu_lock);
2492}
2493
2494static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2495{
2496 unsigned int i;
2497
2498 mutex_lock(&uld_mutex);
2499 for (i = 0; i < CXGB4_ULD_MAX; i++)
2500 if (adap->uld_handle[i])
2501 ulds[i].state_change(adap->uld_handle[i], new_state);
2502 mutex_unlock(&uld_mutex);
2503}
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2515{
2516 int ret = 0;
2517 struct adapter *adap;
2518
2519 if (type >= CXGB4_ULD_MAX)
2520 return -EINVAL;
2521 mutex_lock(&uld_mutex);
2522 if (ulds[type].add) {
2523 ret = -EBUSY;
2524 goto out;
2525 }
2526 ulds[type] = *p;
2527 list_for_each_entry(adap, &adapter_list, list_node)
2528 uld_attach(adap, type);
2529out: mutex_unlock(&uld_mutex);
2530 return ret;
2531}
2532EXPORT_SYMBOL(cxgb4_register_uld);
2533
2534
2535
2536
2537
2538
2539
2540int cxgb4_unregister_uld(enum cxgb4_uld type)
2541{
2542 struct adapter *adap;
2543
2544 if (type >= CXGB4_ULD_MAX)
2545 return -EINVAL;
2546 mutex_lock(&uld_mutex);
2547 list_for_each_entry(adap, &adapter_list, list_node)
2548 adap->uld_handle[type] = NULL;
2549 ulds[type].add = NULL;
2550 mutex_unlock(&uld_mutex);
2551 return 0;
2552}
2553EXPORT_SYMBOL(cxgb4_unregister_uld);
2554
2555#if IS_ENABLED(CONFIG_IPV6)
2556static int cxgb4_inet6addr_handler(struct notifier_block *this,
2557 unsigned long event, void *data)
2558{
2559 struct inet6_ifaddr *ifa = data;
2560 struct net_device *event_dev = ifa->idev->dev;
2561 const struct device *parent = NULL;
2562#if IS_ENABLED(CONFIG_BONDING)
2563 struct adapter *adap;
2564#endif
2565 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
2566 event_dev = vlan_dev_real_dev(event_dev);
2567#if IS_ENABLED(CONFIG_BONDING)
2568 if (event_dev->flags & IFF_MASTER) {
2569 list_for_each_entry(adap, &adapter_list, list_node) {
2570 switch (event) {
2571 case NETDEV_UP:
2572 cxgb4_clip_get(adap->port[0],
2573 (const u32 *)ifa, 1);
2574 break;
2575 case NETDEV_DOWN:
2576 cxgb4_clip_release(adap->port[0],
2577 (const u32 *)ifa, 1);
2578 break;
2579 default:
2580 break;
2581 }
2582 }
2583 return NOTIFY_OK;
2584 }
2585#endif
2586
2587 if (event_dev)
2588 parent = event_dev->dev.parent;
2589
2590 if (parent && parent->driver == &cxgb4_driver.driver) {
2591 switch (event) {
2592 case NETDEV_UP:
2593 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2594 break;
2595 case NETDEV_DOWN:
2596 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2597 break;
2598 default:
2599 break;
2600 }
2601 }
2602 return NOTIFY_OK;
2603}
2604
2605static bool inet6addr_registered;
2606static struct notifier_block cxgb4_inet6addr_notifier = {
2607 .notifier_call = cxgb4_inet6addr_handler
2608};
2609
2610static void update_clip(const struct adapter *adap)
2611{
2612 int i;
2613 struct net_device *dev;
2614 int ret;
2615
2616 rcu_read_lock();
2617
2618 for (i = 0; i < MAX_NPORTS; i++) {
2619 dev = adap->port[i];
2620 ret = 0;
2621
2622 if (dev)
2623 ret = cxgb4_update_root_dev_clip(dev);
2624
2625 if (ret < 0)
2626 break;
2627 }
2628 rcu_read_unlock();
2629}
2630#endif
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642static int cxgb_up(struct adapter *adap)
2643{
2644 int err;
2645
2646 err = setup_sge_queues(adap);
2647 if (err)
2648 goto out;
2649 err = setup_rss(adap);
2650 if (err)
2651 goto freeq;
2652
2653 if (adap->flags & USING_MSIX) {
2654 name_msix_vecs(adap);
2655 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2656 adap->msix_info[0].desc, adap);
2657 if (err)
2658 goto irq_err;
2659
2660 err = request_msix_queue_irqs(adap);
2661 if (err) {
2662 free_irq(adap->msix_info[0].vec, adap);
2663 goto irq_err;
2664 }
2665 } else {
2666 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2667 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2668 adap->port[0]->name, adap);
2669 if (err)
2670 goto irq_err;
2671 }
2672 enable_rx(adap);
2673 t4_sge_start(adap);
2674 t4_intr_enable(adap);
2675 adap->flags |= FULL_INIT_DONE;
2676 notify_ulds(adap, CXGB4_STATE_UP);
2677#if IS_ENABLED(CONFIG_IPV6)
2678 update_clip(adap);
2679#endif
2680 out:
2681 return err;
2682 irq_err:
2683 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2684 freeq:
2685 t4_free_sge_resources(adap);
2686 goto out;
2687}
2688
2689static void cxgb_down(struct adapter *adapter)
2690{
2691 cancel_work_sync(&adapter->tid_release_task);
2692 cancel_work_sync(&adapter->db_full_task);
2693 cancel_work_sync(&adapter->db_drop_task);
2694 adapter->tid_release_task_busy = false;
2695 adapter->tid_release_head = NULL;
2696
2697 t4_sge_stop(adapter);
2698 t4_free_sge_resources(adapter);
2699 adapter->flags &= ~FULL_INIT_DONE;
2700}
2701
2702
2703
2704
2705static int cxgb_open(struct net_device *dev)
2706{
2707 int err;
2708 struct port_info *pi = netdev_priv(dev);
2709 struct adapter *adapter = pi->adapter;
2710
2711 netif_carrier_off(dev);
2712
2713 if (!(adapter->flags & FULL_INIT_DONE)) {
2714 err = cxgb_up(adapter);
2715 if (err < 0)
2716 return err;
2717 }
2718
2719 err = link_start(dev);
2720 if (!err)
2721 netif_tx_start_all_queues(dev);
2722 return err;
2723}
2724
2725static int cxgb_close(struct net_device *dev)
2726{
2727 struct port_info *pi = netdev_priv(dev);
2728 struct adapter *adapter = pi->adapter;
2729
2730 netif_tx_stop_all_queues(dev);
2731 netif_carrier_off(dev);
2732 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
2733}
2734
2735
2736
2737static int writable_filter(struct filter_entry *f)
2738{
2739 if (f->locked)
2740 return -EPERM;
2741 if (f->pending)
2742 return -EBUSY;
2743
2744 return 0;
2745}
2746
2747
2748
2749
2750
2751static int delete_filter(struct adapter *adapter, unsigned int fidx)
2752{
2753 struct filter_entry *f;
2754 int ret;
2755
2756 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
2757 return -EINVAL;
2758
2759 f = &adapter->tids.ftid_tab[fidx];
2760 ret = writable_filter(f);
2761 if (ret)
2762 return ret;
2763 if (f->valid)
2764 return del_filter_wr(adapter, fidx);
2765
2766 return 0;
2767}
2768
2769int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2770 __be32 sip, __be16 sport, __be16 vlan,
2771 unsigned int queue, unsigned char port, unsigned char mask)
2772{
2773 int ret;
2774 struct filter_entry *f;
2775 struct adapter *adap;
2776 int i;
2777 u8 *val;
2778
2779 adap = netdev2adap(dev);
2780
2781
2782 stid -= adap->tids.sftid_base;
2783 stid += adap->tids.nftids;
2784
2785
2786
2787 f = &adap->tids.ftid_tab[stid];
2788 ret = writable_filter(f);
2789 if (ret)
2790 return ret;
2791
2792
2793
2794
2795 if (f->valid)
2796 clear_filter(adap, f);
2797
2798
2799 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2800 f->fs.val.lport = cpu_to_be16(sport);
2801 f->fs.mask.lport = ~0;
2802 val = (u8 *)&sip;
2803 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2804 for (i = 0; i < 4; i++) {
2805 f->fs.val.lip[i] = val[i];
2806 f->fs.mask.lip[i] = ~0;
2807 }
2808 if (adap->params.tp.vlan_pri_map & PORT_F) {
2809 f->fs.val.iport = port;
2810 f->fs.mask.iport = mask;
2811 }
2812 }
2813
2814 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2815 f->fs.val.proto = IPPROTO_TCP;
2816 f->fs.mask.proto = ~0;
2817 }
2818
2819 f->fs.dirsteer = 1;
2820 f->fs.iq = queue;
2821
2822 f->locked = 1;
2823 f->fs.rpttid = 1;
2824
2825 ret = set_filter_wr(adap, stid);
2826 if (ret) {
2827 clear_filter(adap, f);
2828 return ret;
2829 }
2830
2831 return 0;
2832}
2833EXPORT_SYMBOL(cxgb4_create_server_filter);
2834
2835int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2836 unsigned int queue, bool ipv6)
2837{
2838 int ret;
2839 struct filter_entry *f;
2840 struct adapter *adap;
2841
2842 adap = netdev2adap(dev);
2843
2844
2845 stid -= adap->tids.sftid_base;
2846 stid += adap->tids.nftids;
2847
2848 f = &adap->tids.ftid_tab[stid];
2849
2850 f->locked = 0;
2851
2852 ret = delete_filter(adap, stid);
2853 if (ret)
2854 return ret;
2855
2856 return 0;
2857}
2858EXPORT_SYMBOL(cxgb4_remove_server_filter);
2859
2860static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2861 struct rtnl_link_stats64 *ns)
2862{
2863 struct port_stats stats;
2864 struct port_info *p = netdev_priv(dev);
2865 struct adapter *adapter = p->adapter;
2866
2867
2868
2869
2870
2871 spin_lock(&adapter->stats_lock);
2872 if (!netif_device_present(dev)) {
2873 spin_unlock(&adapter->stats_lock);
2874 return ns;
2875 }
2876 t4_get_port_stats(adapter, p->tx_chan, &stats);
2877 spin_unlock(&adapter->stats_lock);
2878
2879 ns->tx_bytes = stats.tx_octets;
2880 ns->tx_packets = stats.tx_frames;
2881 ns->rx_bytes = stats.rx_octets;
2882 ns->rx_packets = stats.rx_frames;
2883 ns->multicast = stats.rx_mcast_frames;
2884
2885
2886 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2887 stats.rx_runt;
2888 ns->rx_over_errors = 0;
2889 ns->rx_crc_errors = stats.rx_fcs_err;
2890 ns->rx_frame_errors = stats.rx_symbol_err;
2891 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2892 stats.rx_ovflow2 + stats.rx_ovflow3 +
2893 stats.rx_trunc0 + stats.rx_trunc1 +
2894 stats.rx_trunc2 + stats.rx_trunc3;
2895 ns->rx_missed_errors = 0;
2896
2897
2898 ns->tx_aborted_errors = 0;
2899 ns->tx_carrier_errors = 0;
2900 ns->tx_fifo_errors = 0;
2901 ns->tx_heartbeat_errors = 0;
2902 ns->tx_window_errors = 0;
2903
2904 ns->tx_errors = stats.tx_error_frames;
2905 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2906 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2907 return ns;
2908}
2909
2910static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2911{
2912 unsigned int mbox;
2913 int ret = 0, prtad, devad;
2914 struct port_info *pi = netdev_priv(dev);
2915 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2916
2917 switch (cmd) {
2918 case SIOCGMIIPHY:
2919 if (pi->mdio_addr < 0)
2920 return -EOPNOTSUPP;
2921 data->phy_id = pi->mdio_addr;
2922 break;
2923 case SIOCGMIIREG:
2924 case SIOCSMIIREG:
2925 if (mdio_phy_id_is_c45(data->phy_id)) {
2926 prtad = mdio_phy_id_prtad(data->phy_id);
2927 devad = mdio_phy_id_devad(data->phy_id);
2928 } else if (data->phy_id < 32) {
2929 prtad = data->phy_id;
2930 devad = 0;
2931 data->reg_num &= 0x1f;
2932 } else
2933 return -EINVAL;
2934
2935 mbox = pi->adapter->fn;
2936 if (cmd == SIOCGMIIREG)
2937 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2938 data->reg_num, &data->val_out);
2939 else
2940 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2941 data->reg_num, data->val_in);
2942 break;
2943 default:
2944 return -EOPNOTSUPP;
2945 }
2946 return ret;
2947}
2948
2949static void cxgb_set_rxmode(struct net_device *dev)
2950{
2951
2952 set_rxmode(dev, -1, false);
2953}
2954
2955static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2956{
2957 int ret;
2958 struct port_info *pi = netdev_priv(dev);
2959
2960 if (new_mtu < 81 || new_mtu > MAX_MTU)
2961 return -EINVAL;
2962 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2963 -1, -1, -1, true);
2964 if (!ret)
2965 dev->mtu = new_mtu;
2966 return ret;
2967}
2968
2969static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2970{
2971 int ret;
2972 struct sockaddr *addr = p;
2973 struct port_info *pi = netdev_priv(dev);
2974
2975 if (!is_valid_ether_addr(addr->sa_data))
2976 return -EADDRNOTAVAIL;
2977
2978 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2979 pi->xact_addr_filt, addr->sa_data, true, true);
2980 if (ret < 0)
2981 return ret;
2982
2983 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2984 pi->xact_addr_filt = ret;
2985 return 0;
2986}
2987
2988#ifdef CONFIG_NET_POLL_CONTROLLER
2989static void cxgb_netpoll(struct net_device *dev)
2990{
2991 struct port_info *pi = netdev_priv(dev);
2992 struct adapter *adap = pi->adapter;
2993
2994 if (adap->flags & USING_MSIX) {
2995 int i;
2996 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2997
2998 for (i = pi->nqsets; i; i--, rx++)
2999 t4_sge_intr_msix(0, &rx->rspq);
3000 } else
3001 t4_intr_handler(adap)(0, adap);
3002}
3003#endif
3004
3005static const struct net_device_ops cxgb4_netdev_ops = {
3006 .ndo_open = cxgb_open,
3007 .ndo_stop = cxgb_close,
3008 .ndo_start_xmit = t4_eth_xmit,
3009 .ndo_select_queue = cxgb_select_queue,
3010 .ndo_get_stats64 = cxgb_get_stats,
3011 .ndo_set_rx_mode = cxgb_set_rxmode,
3012 .ndo_set_mac_address = cxgb_set_mac_addr,
3013 .ndo_set_features = cxgb_set_features,
3014 .ndo_validate_addr = eth_validate_addr,
3015 .ndo_do_ioctl = cxgb_ioctl,
3016 .ndo_change_mtu = cxgb_change_mtu,
3017#ifdef CONFIG_NET_POLL_CONTROLLER
3018 .ndo_poll_controller = cxgb_netpoll,
3019#endif
3020#ifdef CONFIG_CHELSIO_T4_FCOE
3021 .ndo_fcoe_enable = cxgb_fcoe_enable,
3022 .ndo_fcoe_disable = cxgb_fcoe_disable,
3023#endif
3024#ifdef CONFIG_NET_RX_BUSY_POLL
3025 .ndo_busy_poll = cxgb_busy_poll,
3026#endif
3027
3028};
3029
3030void t4_fatal_err(struct adapter *adap)
3031{
3032 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
3033 t4_intr_disable(adap);
3034 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3035}
3036
3037
3038
3039
3040
3041
3042static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
3043{
3044 struct fw_ldst_cmd ldst_cmd;
3045 u32 val;
3046 int ret;
3047
3048
3049
3050
3051 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
3052 ldst_cmd.op_to_addrspace =
3053 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
3054 FW_CMD_REQUEST_F |
3055 FW_CMD_READ_F |
3056 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
3057 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
3058 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
3059 ldst_cmd.u.pcie.ctrl_to_fn =
3060 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
3061 ldst_cmd.u.pcie.r = reg;
3062 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
3063 &ldst_cmd);
3064
3065
3066
3067
3068 if (ret == 0)
3069 val = ntohl(ldst_cmd.u.pcie.data[0]);
3070 else
3071 t4_hw_pci_read_cfg4(adap, reg, &val);
3072
3073 return val;
3074}
3075
3076static void setup_memwin(struct adapter *adap)
3077{
3078 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
3079
3080 if (is_t4(adap->params.chip)) {
3081 u32 bar0;
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
3093 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
3094 adap->t4_bar0 = bar0;
3095
3096 mem_win0_base = bar0 + MEMWIN0_BASE;
3097 mem_win1_base = bar0 + MEMWIN1_BASE;
3098 mem_win2_base = bar0 + MEMWIN2_BASE;
3099 mem_win2_aperture = MEMWIN2_APERTURE;
3100 } else {
3101
3102 mem_win0_base = MEMWIN0_BASE;
3103 mem_win1_base = MEMWIN1_BASE;
3104 mem_win2_base = MEMWIN2_BASE_T5;
3105 mem_win2_aperture = MEMWIN2_APERTURE_T5;
3106 }
3107 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
3108 mem_win0_base | BIR_V(0) |
3109 WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
3110 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
3111 mem_win1_base | BIR_V(0) |
3112 WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
3113 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
3114 mem_win2_base | BIR_V(0) |
3115 WINDOW_V(ilog2(mem_win2_aperture) - 10));
3116 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
3117}
3118
3119static void setup_memwin_rdma(struct adapter *adap)
3120{
3121 if (adap->vres.ocq.size) {
3122 u32 start;
3123 unsigned int sz_kb;
3124
3125 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3126 start &= PCI_BASE_ADDRESS_MEM_MASK;
3127 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3128 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3129 t4_write_reg(adap,
3130 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3131 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3132 t4_write_reg(adap,
3133 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3134 adap->vres.ocq.start);
3135 t4_read_reg(adap,
3136 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3137 }
3138}
3139
3140static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3141{
3142 u32 v;
3143 int ret;
3144
3145
3146 memset(c, 0, sizeof(*c));
3147 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3148 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3149 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3150 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
3151 if (ret < 0)
3152 return ret;
3153
3154
3155 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3156 if (!vf_acls)
3157 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3158 else
3159 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3160 } else if (vf_acls) {
3161 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
3162 return ret;
3163 }
3164 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3165 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3166 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
3167 if (ret < 0)
3168 return ret;
3169
3170 ret = t4_config_glbl_rss(adap, adap->fn,
3171 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3172 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3173 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3174 if (ret < 0)
3175 return ret;
3176
3177 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
3178 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3179 FW_CMD_CAP_PF);
3180 if (ret < 0)
3181 return ret;
3182
3183 t4_sge_init(adap);
3184
3185
3186 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3187 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3188 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3189 v = t4_read_reg(adap, TP_PIO_DATA_A);
3190 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3191
3192
3193 adap->params.tp.tx_modq_map = 0xE4;
3194 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3195 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3196
3197
3198 v = 0x84218421;
3199 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3200 &v, 1, TP_TX_SCHED_HDR_A);
3201 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3202 &v, 1, TP_TX_SCHED_FIFO_A);
3203 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3204 &v, 1, TP_TX_SCHED_PCMD_A);
3205
3206#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16
3207 if (is_offload(adap)) {
3208 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3209 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3210 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3211 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3212 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3213 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3214 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3215 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3216 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3217 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3218 }
3219
3220
3221 return t4_early_init(adap, adap->fn);
3222}
3223
3224
3225
3226
3227#define MAX_ATIDS 8192U
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245static int adap_init0_tweaks(struct adapter *adapter)
3246{
3247
3248
3249
3250
3251
3252 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3253
3254
3255
3256
3257 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3258 dev_err(&adapter->pdev->dev,
3259 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3260 rx_dma_offset);
3261 rx_dma_offset = 2;
3262 }
3263 t4_set_reg_field(adapter, SGE_CONTROL_A,
3264 PKTSHIFT_V(PKTSHIFT_M),
3265 PKTSHIFT_V(rx_dma_offset));
3266
3267
3268
3269
3270
3271 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3272 CSUM_HAS_PSEUDO_HDR_F, 0);
3273
3274 return 0;
3275}
3276
3277
3278
3279
3280static int adap_init0_config(struct adapter *adapter, int reset)
3281{
3282 struct fw_caps_config_cmd caps_cmd;
3283 const struct firmware *cf;
3284 unsigned long mtype = 0, maddr = 0;
3285 u32 finiver, finicsum, cfcsum;
3286 int ret;
3287 int config_issued = 0;
3288 char *fw_config_file, fw_config_file_path[256];
3289 char *config_name = NULL;
3290
3291
3292
3293
3294 if (reset) {
3295 ret = t4_fw_reset(adapter, adapter->mbox,
3296 PIORSTMODE_F | PIORST_F);
3297 if (ret < 0)
3298 goto bye;
3299 }
3300
3301
3302
3303
3304
3305
3306 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3307 case CHELSIO_T4:
3308 fw_config_file = FW4_CFNAME;
3309 break;
3310 case CHELSIO_T5:
3311 fw_config_file = FW5_CFNAME;
3312 break;
3313 default:
3314 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3315 adapter->pdev->device);
3316 ret = -EINVAL;
3317 goto bye;
3318 }
3319
3320 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3321 if (ret < 0) {
3322 config_name = "On FLASH";
3323 mtype = FW_MEMTYPE_CF_FLASH;
3324 maddr = t4_flash_cfg_addr(adapter);
3325 } else {
3326 u32 params[7], val[7];
3327
3328 sprintf(fw_config_file_path,
3329 "/lib/firmware/%s", fw_config_file);
3330 config_name = fw_config_file_path;
3331
3332 if (cf->size >= FLASH_CFG_MAX_SIZE)
3333 ret = -ENOMEM;
3334 else {
3335 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3336 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3337 ret = t4_query_params(adapter, adapter->mbox,
3338 adapter->fn, 0, 1, params, val);
3339 if (ret == 0) {
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350 size_t resid = cf->size & 0x3;
3351 size_t size = cf->size & ~0x3;
3352 __be32 *data = (__be32 *)cf->data;
3353
3354 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3355 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3356
3357 spin_lock(&adapter->win0_lock);
3358 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3359 size, data, T4_MEMORY_WRITE);
3360 if (ret == 0 && resid != 0) {
3361 union {
3362 __be32 word;
3363 char buf[4];
3364 } last;
3365 int i;
3366
3367 last.word = data[size >> 2];
3368 for (i = resid; i < 4; i++)
3369 last.buf[i] = 0;
3370 ret = t4_memory_rw(adapter, 0, mtype,
3371 maddr + size,
3372 4, &last.word,
3373 T4_MEMORY_WRITE);
3374 }
3375 spin_unlock(&adapter->win0_lock);
3376 }
3377 }
3378
3379 release_firmware(cf);
3380 if (ret)
3381 goto bye;
3382 }
3383
3384
3385
3386
3387
3388
3389
3390 memset(&caps_cmd, 0, sizeof(caps_cmd));
3391 caps_cmd.op_to_write =
3392 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3393 FW_CMD_REQUEST_F |
3394 FW_CMD_READ_F);
3395 caps_cmd.cfvalid_to_len16 =
3396 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3397 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3398 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3399 FW_LEN16(caps_cmd));
3400 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3401 &caps_cmd);
3402
3403
3404
3405
3406
3407
3408
3409 if (ret == -ENOENT) {
3410 memset(&caps_cmd, 0, sizeof(caps_cmd));
3411 caps_cmd.op_to_write =
3412 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3413 FW_CMD_REQUEST_F |
3414 FW_CMD_READ_F);
3415 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3416 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3417 sizeof(caps_cmd), &caps_cmd);
3418 config_name = "Firmware Default";
3419 }
3420
3421 config_issued = 1;
3422 if (ret < 0)
3423 goto bye;
3424
3425 finiver = ntohl(caps_cmd.finiver);
3426 finicsum = ntohl(caps_cmd.finicsum);
3427 cfcsum = ntohl(caps_cmd.cfcsum);
3428 if (finicsum != cfcsum)
3429 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3430 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3431 finicsum, cfcsum);
3432
3433
3434
3435
3436 caps_cmd.op_to_write =
3437 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3438 FW_CMD_REQUEST_F |
3439 FW_CMD_WRITE_F);
3440 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3441 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3442 NULL);
3443 if (ret < 0)
3444 goto bye;
3445
3446
3447
3448
3449
3450 ret = adap_init0_tweaks(adapter);
3451 if (ret < 0)
3452 goto bye;
3453
3454
3455
3456
3457
3458 ret = t4_fw_initialize(adapter, adapter->mbox);
3459 if (ret < 0)
3460 goto bye;
3461
3462
3463
3464
3465 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3466 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3467 config_name, finiver, cfcsum);
3468 return 0;
3469
3470
3471
3472
3473
3474
3475bye:
3476 if (config_issued && ret != -ENOENT)
3477 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
3478 config_name, -ret);
3479 return ret;
3480}
3481
3482static struct fw_info fw_info_array[] = {
3483 {
3484 .chip = CHELSIO_T4,
3485 .fs_name = FW4_CFNAME,
3486 .fw_mod_name = FW4_FNAME,
3487 .fw_hdr = {
3488 .chip = FW_HDR_CHIP_T4,
3489 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
3490 .intfver_nic = FW_INTFVER(T4, NIC),
3491 .intfver_vnic = FW_INTFVER(T4, VNIC),
3492 .intfver_ri = FW_INTFVER(T4, RI),
3493 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3494 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3495 },
3496 }, {
3497 .chip = CHELSIO_T5,
3498 .fs_name = FW5_CFNAME,
3499 .fw_mod_name = FW5_FNAME,
3500 .fw_hdr = {
3501 .chip = FW_HDR_CHIP_T5,
3502 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
3503 .intfver_nic = FW_INTFVER(T5, NIC),
3504 .intfver_vnic = FW_INTFVER(T5, VNIC),
3505 .intfver_ri = FW_INTFVER(T5, RI),
3506 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3507 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3508 },
3509 }
3510};
3511
3512static struct fw_info *find_fw_info(int chip)
3513{
3514 int i;
3515
3516 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
3517 if (fw_info_array[i].chip == chip)
3518 return &fw_info_array[i];
3519 }
3520 return NULL;
3521}
3522
3523
3524
3525
3526static int adap_init0(struct adapter *adap)
3527{
3528 int ret;
3529 u32 v, port_vec;
3530 enum dev_state state;
3531 u32 params[7], val[7];
3532 struct fw_caps_config_cmd caps_cmd;
3533 int reset = 1;
3534
3535
3536
3537
3538 ret = t4_init_devlog_params(adap);
3539 if (ret < 0)
3540 return ret;
3541
3542
3543 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
3544 if (ret < 0) {
3545 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3546 ret);
3547 return ret;
3548 }
3549 if (ret == adap->mbox)
3550 adap->flags |= MASTER_PF;
3551
3552
3553
3554
3555
3556
3557
3558
3559 t4_get_fw_version(adap, &adap->params.fw_vers);
3560 t4_get_tp_version(adap, &adap->params.tp_vers);
3561 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3562 struct fw_info *fw_info;
3563 struct fw_hdr *card_fw;
3564 const struct firmware *fw;
3565 const u8 *fw_data = NULL;
3566 unsigned int fw_size = 0;
3567
3568
3569
3570
3571 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
3572 if (fw_info == NULL) {
3573 dev_err(adap->pdev_dev,
3574 "unable to get firmware info for chip %d.\n",
3575 CHELSIO_CHIP_VERSION(adap->params.chip));
3576 return -EINVAL;
3577 }
3578
3579
3580
3581
3582 card_fw = t4_alloc_mem(sizeof(*card_fw));
3583
3584
3585 ret = request_firmware(&fw, fw_info->fw_mod_name,
3586 adap->pdev_dev);
3587 if (ret < 0) {
3588 dev_err(adap->pdev_dev,
3589 "unable to load firmware image %s, error %d\n",
3590 fw_info->fw_mod_name, ret);
3591 } else {
3592 fw_data = fw->data;
3593 fw_size = fw->size;
3594 }
3595
3596
3597 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
3598 state, &reset);
3599
3600
3601 release_firmware(fw);
3602 t4_free_mem(card_fw);
3603
3604 if (ret < 0)
3605 goto bye;
3606 }
3607
3608
3609
3610
3611
3612
3613
3614
3615 ret = get_vpd_params(adap, &adap->params.vpd);
3616 if (ret < 0)
3617 goto bye;
3618
3619
3620
3621
3622
3623
3624 v =
3625 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3626 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
3627 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
3628 if (ret < 0)
3629 goto bye;
3630
3631 adap->params.nports = hweight32(port_vec);
3632 adap->params.portvec = port_vec;
3633
3634
3635
3636
3637 if (state == DEV_STATE_INIT) {
3638 dev_info(adap->pdev_dev, "Coming up as %s: "\
3639 "Adapter already initialized\n",
3640 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3641 } else {
3642 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3643 "Initializing adapter\n");
3644
3645
3646
3647
3648 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3649 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3650 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
3651 params, val);
3652
3653
3654
3655
3656 if (ret < 0) {
3657 dev_err(adap->pdev_dev, "firmware doesn't support "
3658 "Firmware Configuration Files\n");
3659 goto bye;
3660 }
3661
3662
3663
3664
3665
3666 ret = adap_init0_config(adap, reset);
3667 if (ret == -ENOENT) {
3668 dev_err(adap->pdev_dev, "no Configuration File "
3669 "present on adapter.\n");
3670 goto bye;
3671 }
3672 if (ret < 0) {
3673 dev_err(adap->pdev_dev, "could not initialize "
3674 "adapter, error %d\n", -ret);
3675 goto bye;
3676 }
3677 }
3678
3679
3680
3681
3682
3683 ret = t4_sge_init(adap);
3684 if (ret < 0)
3685 goto bye;
3686
3687 if (is_bypass_device(adap->pdev->device))
3688 adap->params.bypass = 1;
3689
3690
3691
3692
3693#define FW_PARAM_DEV(param) \
3694 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3695 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3696
3697#define FW_PARAM_PFVF(param) \
3698 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3699 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3700 FW_PARAMS_PARAM_Y_V(0) | \
3701 FW_PARAMS_PARAM_Z_V(0)
3702
3703 params[0] = FW_PARAM_PFVF(EQ_START);
3704 params[1] = FW_PARAM_PFVF(L2T_START);
3705 params[2] = FW_PARAM_PFVF(L2T_END);
3706 params[3] = FW_PARAM_PFVF(FILTER_START);
3707 params[4] = FW_PARAM_PFVF(FILTER_END);
3708 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3709 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
3710 if (ret < 0)
3711 goto bye;
3712 adap->sge.egr_start = val[0];
3713 adap->l2t_start = val[1];
3714 adap->l2t_end = val[2];
3715 adap->tids.ftid_base = val[3];
3716 adap->tids.nftids = val[4] - val[3] + 1;
3717 adap->sge.ingr_start = val[5];
3718
3719
3720
3721
3722
3723
3724
3725 params[0] = FW_PARAM_PFVF(EQ_END);
3726 params[1] = FW_PARAM_PFVF(IQFLINT_END);
3727 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
3728 if (ret < 0)
3729 goto bye;
3730 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3731 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3732
3733 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3734 sizeof(*adap->sge.egr_map), GFP_KERNEL);
3735 if (!adap->sge.egr_map) {
3736 ret = -ENOMEM;
3737 goto bye;
3738 }
3739
3740 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3741 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3742 if (!adap->sge.ingr_map) {
3743 ret = -ENOMEM;
3744 goto bye;
3745 }
3746
3747
3748
3749
3750 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3751 sizeof(long), GFP_KERNEL);
3752 if (!adap->sge.starving_fl) {
3753 ret = -ENOMEM;
3754 goto bye;
3755 }
3756
3757 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3758 sizeof(long), GFP_KERNEL);
3759 if (!adap->sge.txq_maperr) {
3760 ret = -ENOMEM;
3761 goto bye;
3762 }
3763
3764 params[0] = FW_PARAM_PFVF(CLIP_START);
3765 params[1] = FW_PARAM_PFVF(CLIP_END);
3766 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
3767 if (ret < 0)
3768 goto bye;
3769 adap->clipt_start = val[0];
3770 adap->clipt_end = val[1];
3771
3772
3773 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3774 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3775 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
3776
3777
3778
3779 if ((val[0] != val[1]) && (ret >= 0)) {
3780 adap->flags |= FW_OFLD_CONN;
3781 adap->tids.aftid_base = val[0];
3782 adap->tids.aftid_end = val[1];
3783 }
3784
3785
3786
3787
3788
3789
3790 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3791 val[0] = 1;
3792 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
3793
3794
3795
3796
3797
3798
3799
3800 if (is_t4(adap->params.chip)) {
3801 adap->params.ulptx_memwrite_dsgl = false;
3802 } else {
3803 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
3804 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
3805 1, params, val);
3806 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
3807 }
3808
3809
3810
3811
3812
3813 memset(&caps_cmd, 0, sizeof(caps_cmd));
3814 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3815 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3816 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3817 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3818 &caps_cmd);
3819 if (ret < 0)
3820 goto bye;
3821
3822 if (caps_cmd.ofldcaps) {
3823
3824 params[0] = FW_PARAM_DEV(NTID);
3825 params[1] = FW_PARAM_PFVF(SERVER_START);
3826 params[2] = FW_PARAM_PFVF(SERVER_END);
3827 params[3] = FW_PARAM_PFVF(TDDP_START);
3828 params[4] = FW_PARAM_PFVF(TDDP_END);
3829 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3830 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
3831 params, val);
3832 if (ret < 0)
3833 goto bye;
3834 adap->tids.ntids = val[0];
3835 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3836 adap->tids.stid_base = val[1];
3837 adap->tids.nstids = val[2] - val[1] + 1;
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
3848 adap->tids.sftid_base = adap->tids.ftid_base +
3849 DIV_ROUND_UP(adap->tids.nftids, 3);
3850 adap->tids.nsftids = adap->tids.nftids -
3851 DIV_ROUND_UP(adap->tids.nftids, 3);
3852 adap->tids.nftids = adap->tids.sftid_base -
3853 adap->tids.ftid_base;
3854 }
3855 adap->vres.ddp.start = val[3];
3856 adap->vres.ddp.size = val[4] - val[3] + 1;
3857 adap->params.ofldq_wr_cred = val[5];
3858
3859 adap->params.offload = 1;
3860 }
3861 if (caps_cmd.rdmacaps) {
3862 params[0] = FW_PARAM_PFVF(STAG_START);
3863 params[1] = FW_PARAM_PFVF(STAG_END);
3864 params[2] = FW_PARAM_PFVF(RQ_START);
3865 params[3] = FW_PARAM_PFVF(RQ_END);
3866 params[4] = FW_PARAM_PFVF(PBL_START);
3867 params[5] = FW_PARAM_PFVF(PBL_END);
3868 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
3869 params, val);
3870 if (ret < 0)
3871 goto bye;
3872 adap->vres.stag.start = val[0];
3873 adap->vres.stag.size = val[1] - val[0] + 1;
3874 adap->vres.rq.start = val[2];
3875 adap->vres.rq.size = val[3] - val[2] + 1;
3876 adap->vres.pbl.start = val[4];
3877 adap->vres.pbl.size = val[5] - val[4] + 1;
3878
3879 params[0] = FW_PARAM_PFVF(SQRQ_START);
3880 params[1] = FW_PARAM_PFVF(SQRQ_END);
3881 params[2] = FW_PARAM_PFVF(CQ_START);
3882 params[3] = FW_PARAM_PFVF(CQ_END);
3883 params[4] = FW_PARAM_PFVF(OCQ_START);
3884 params[5] = FW_PARAM_PFVF(OCQ_END);
3885 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
3886 val);
3887 if (ret < 0)
3888 goto bye;
3889 adap->vres.qp.start = val[0];
3890 adap->vres.qp.size = val[1] - val[0] + 1;
3891 adap->vres.cq.start = val[2];
3892 adap->vres.cq.size = val[3] - val[2] + 1;
3893 adap->vres.ocq.start = val[4];
3894 adap->vres.ocq.size = val[5] - val[4] + 1;
3895
3896 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
3897 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
3898 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
3899 val);
3900 if (ret < 0) {
3901 adap->params.max_ordird_qp = 8;
3902 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
3903 ret = 0;
3904 } else {
3905 adap->params.max_ordird_qp = val[0];
3906 adap->params.max_ird_adapter = val[1];
3907 }
3908 dev_info(adap->pdev_dev,
3909 "max_ordird_qp %d max_ird_adapter %d\n",
3910 adap->params.max_ordird_qp,
3911 adap->params.max_ird_adapter);
3912 }
3913 if (caps_cmd.iscsicaps) {
3914 params[0] = FW_PARAM_PFVF(ISCSI_START);
3915 params[1] = FW_PARAM_PFVF(ISCSI_END);
3916 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
3917 params, val);
3918 if (ret < 0)
3919 goto bye;
3920 adap->vres.iscsi.start = val[0];
3921 adap->vres.iscsi.size = val[1] - val[0] + 1;
3922 }
3923#undef FW_PARAM_PFVF
3924#undef FW_PARAM_DEV
3925
3926
3927
3928
3929
3930
3931 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3932 if (state != DEV_STATE_INIT) {
3933 int i;
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952 for (i = 0; i < NMTUS; i++)
3953 if (adap->params.mtus[i] == 1492) {
3954 adap->params.mtus[i] = 1488;
3955 break;
3956 }
3957
3958 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3959 adap->params.b_wnd);
3960 }
3961 t4_init_sge_params(adap);
3962 t4_init_tp_params(adap);
3963 adap->flags |= FW_OK;
3964 return 0;
3965
3966
3967
3968
3969
3970
3971bye:
3972 kfree(adap->sge.egr_map);
3973 kfree(adap->sge.ingr_map);
3974 kfree(adap->sge.starving_fl);
3975 kfree(adap->sge.txq_maperr);
3976 if (ret != -ETIMEDOUT && ret != -EIO)
3977 t4_fw_bye(adap, adap->mbox);
3978 return ret;
3979}
3980
3981
3982
3983static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3984 pci_channel_state_t state)
3985{
3986 int i;
3987 struct adapter *adap = pci_get_drvdata(pdev);
3988
3989 if (!adap)
3990 goto out;
3991
3992 rtnl_lock();
3993 adap->flags &= ~FW_OK;
3994 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3995 spin_lock(&adap->stats_lock);
3996 for_each_port(adap, i) {
3997 struct net_device *dev = adap->port[i];
3998
3999 netif_device_detach(dev);
4000 netif_carrier_off(dev);
4001 }
4002 spin_unlock(&adap->stats_lock);
4003 disable_interrupts(adap);
4004 if (adap->flags & FULL_INIT_DONE)
4005 cxgb_down(adap);
4006 rtnl_unlock();
4007 if ((adap->flags & DEV_ENABLED)) {
4008 pci_disable_device(pdev);
4009 adap->flags &= ~DEV_ENABLED;
4010 }
4011out: return state == pci_channel_io_perm_failure ?
4012 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4013}
4014
4015static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4016{
4017 int i, ret;
4018 struct fw_caps_config_cmd c;
4019 struct adapter *adap = pci_get_drvdata(pdev);
4020
4021 if (!adap) {
4022 pci_restore_state(pdev);
4023 pci_save_state(pdev);
4024 return PCI_ERS_RESULT_RECOVERED;
4025 }
4026
4027 if (!(adap->flags & DEV_ENABLED)) {
4028 if (pci_enable_device(pdev)) {
4029 dev_err(&pdev->dev, "Cannot reenable PCI "
4030 "device after reset\n");
4031 return PCI_ERS_RESULT_DISCONNECT;
4032 }
4033 adap->flags |= DEV_ENABLED;
4034 }
4035
4036 pci_set_master(pdev);
4037 pci_restore_state(pdev);
4038 pci_save_state(pdev);
4039 pci_cleanup_aer_uncorrect_error_status(pdev);
4040
4041 if (t4_wait_dev_ready(adap->regs) < 0)
4042 return PCI_ERS_RESULT_DISCONNECT;
4043 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
4044 return PCI_ERS_RESULT_DISCONNECT;
4045 adap->flags |= FW_OK;
4046 if (adap_init1(adap, &c))
4047 return PCI_ERS_RESULT_DISCONNECT;
4048
4049 for_each_port(adap, i) {
4050 struct port_info *p = adap2pinfo(adap, i);
4051
4052 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
4053 NULL, NULL);
4054 if (ret < 0)
4055 return PCI_ERS_RESULT_DISCONNECT;
4056 p->viid = ret;
4057 p->xact_addr_filt = -1;
4058 }
4059
4060 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4061 adap->params.b_wnd);
4062 setup_memwin(adap);
4063 if (cxgb_up(adap))
4064 return PCI_ERS_RESULT_DISCONNECT;
4065 return PCI_ERS_RESULT_RECOVERED;
4066}
4067
4068static void eeh_resume(struct pci_dev *pdev)
4069{
4070 int i;
4071 struct adapter *adap = pci_get_drvdata(pdev);
4072
4073 if (!adap)
4074 return;
4075
4076 rtnl_lock();
4077 for_each_port(adap, i) {
4078 struct net_device *dev = adap->port[i];
4079
4080 if (netif_running(dev)) {
4081 link_start(dev);
4082 cxgb_set_rxmode(dev);
4083 }
4084 netif_device_attach(dev);
4085 }
4086 rtnl_unlock();
4087}
4088
4089static const struct pci_error_handlers cxgb4_eeh = {
4090 .error_detected = eeh_err_detected,
4091 .slot_reset = eeh_slot_reset,
4092 .resume = eeh_resume,
4093};
4094
4095static inline bool is_x_10g_port(const struct link_config *lc)
4096{
4097 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
4098 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
4099}
4100
4101static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
4102 unsigned int us, unsigned int cnt,
4103 unsigned int size, unsigned int iqe_size)
4104{
4105 q->adap = adap;
4106 cxgb4_set_rspq_intr_params(q, us, cnt);
4107 q->iqe_len = iqe_size;
4108 q->size = size;
4109}
4110
4111
4112
4113
4114
4115
4116static void cfg_queues(struct adapter *adap)
4117{
4118 struct sge *s = &adap->sge;
4119 int i, n10g = 0, qidx = 0;
4120#ifndef CONFIG_CHELSIO_T4_DCB
4121 int q10g = 0;
4122#endif
4123 int ciq_size;
4124
4125 for_each_port(adap, i)
4126 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4127#ifdef CONFIG_CHELSIO_T4_DCB
4128
4129
4130
4131
4132 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4133 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4134 MAX_ETH_QSETS, adap->params.nports * 8);
4135 BUG_ON(1);
4136 }
4137
4138 for_each_port(adap, i) {
4139 struct port_info *pi = adap2pinfo(adap, i);
4140
4141 pi->first_qset = qidx;
4142 pi->nqsets = 8;
4143 qidx += pi->nqsets;
4144 }
4145#else
4146
4147
4148
4149
4150 if (n10g)
4151 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4152 if (q10g > netif_get_num_default_rss_queues())
4153 q10g = netif_get_num_default_rss_queues();
4154
4155 for_each_port(adap, i) {
4156 struct port_info *pi = adap2pinfo(adap, i);
4157
4158 pi->first_qset = qidx;
4159 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4160 qidx += pi->nqsets;
4161 }
4162#endif
4163
4164 s->ethqsets = qidx;
4165 s->max_ethqsets = qidx;
4166
4167 if (is_offload(adap)) {
4168
4169
4170
4171
4172
4173 if (n10g) {
4174 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
4175 num_online_cpus());
4176 s->ofldqsets = roundup(i, adap->params.nports);
4177 } else
4178 s->ofldqsets = adap->params.nports;
4179
4180 s->rdmaqs = adap->params.nports;
4181
4182
4183
4184
4185
4186
4187 s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
4188 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
4189 adap->params.nports;
4190 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
4191 }
4192
4193 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4194 struct sge_eth_rxq *r = &s->ethrxq[i];
4195
4196 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4197 r->fl.size = 72;
4198 }
4199
4200 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4201 s->ethtxq[i].q.size = 1024;
4202
4203 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4204 s->ctrlq[i].q.size = 512;
4205
4206 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4207 s->ofldtxq[i].q.size = 1024;
4208
4209 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
4210 struct sge_ofld_rxq *r = &s->ofldrxq[i];
4211
4212 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4213 r->rspq.uld = CXGB4_ULD_ISCSI;
4214 r->fl.size = 72;
4215 }
4216
4217 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4218 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4219
4220 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
4221 r->rspq.uld = CXGB4_ULD_RDMA;
4222 r->fl.size = 72;
4223 }
4224
4225 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
4226 if (ciq_size > SGE_MAX_IQ_SIZE) {
4227 CH_WARN(adap, "CIQ size too small for available IQs\n");
4228 ciq_size = SGE_MAX_IQ_SIZE;
4229 }
4230
4231 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
4232 struct sge_ofld_rxq *r = &s->rdmaciq[i];
4233
4234 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
4235 r->rspq.uld = CXGB4_ULD_RDMA;
4236 }
4237
4238 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4239 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
4240}
4241
4242
4243
4244
4245
4246static void reduce_ethqs(struct adapter *adap, int n)
4247{
4248 int i;
4249 struct port_info *pi;
4250
4251 while (n < adap->sge.ethqsets)
4252 for_each_port(adap, i) {
4253 pi = adap2pinfo(adap, i);
4254 if (pi->nqsets > 1) {
4255 pi->nqsets--;
4256 adap->sge.ethqsets--;
4257 if (adap->sge.ethqsets <= n)
4258 break;
4259 }
4260 }
4261
4262 n = 0;
4263 for_each_port(adap, i) {
4264 pi = adap2pinfo(adap, i);
4265 pi->first_qset = n;
4266 n += pi->nqsets;
4267 }
4268}
4269
4270
4271#define EXTRA_VECS 2
4272
4273static int enable_msix(struct adapter *adap)
4274{
4275 int ofld_need = 0;
4276 int i, want, need, allocated;
4277 struct sge *s = &adap->sge;
4278 unsigned int nchan = adap->params.nports;
4279 struct msix_entry *entries;
4280
4281 entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
4282 GFP_KERNEL);
4283 if (!entries)
4284 return -ENOMEM;
4285
4286 for (i = 0; i < MAX_INGQ + 1; ++i)
4287 entries[i].entry = i;
4288
4289 want = s->max_ethqsets + EXTRA_VECS;
4290 if (is_offload(adap)) {
4291 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
4292
4293 ofld_need = 3 * nchan;
4294 }
4295#ifdef CONFIG_CHELSIO_T4_DCB
4296
4297
4298
4299 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
4300#else
4301 need = adap->params.nports + EXTRA_VECS + ofld_need;
4302#endif
4303 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4304 if (allocated < 0) {
4305 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4306 " not using MSI-X\n");
4307 kfree(entries);
4308 return allocated;
4309 }
4310
4311
4312
4313
4314
4315 i = allocated - EXTRA_VECS - ofld_need;
4316 if (i < s->max_ethqsets) {
4317 s->max_ethqsets = i;
4318 if (i < s->ethqsets)
4319 reduce_ethqs(adap, i);
4320 }
4321 if (is_offload(adap)) {
4322 if (allocated < want) {
4323 s->rdmaqs = nchan;
4324 s->rdmaciqs = nchan;
4325 }
4326
4327
4328 i = allocated - EXTRA_VECS - s->max_ethqsets -
4329 s->rdmaqs - s->rdmaciqs;
4330 s->ofldqsets = (i / nchan) * nchan;
4331 }
4332 for (i = 0; i < allocated; ++i)
4333 adap->msix_info[i].vec = entries[i].vector;
4334
4335 kfree(entries);
4336 return 0;
4337}
4338
4339#undef EXTRA_VECS
4340
4341static int init_rss(struct adapter *adap)
4342{
4343 unsigned int i, j;
4344
4345 for_each_port(adap, i) {
4346 struct port_info *pi = adap2pinfo(adap, i);
4347
4348 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4349 if (!pi->rss)
4350 return -ENOMEM;
4351 for (j = 0; j < pi->rss_size; j++)
4352 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
4353 }
4354 return 0;
4355}
4356
4357static void print_port_info(const struct net_device *dev)
4358{
4359 char buf[80];
4360 char *bufp = buf;
4361 const char *spd = "";
4362 const struct port_info *pi = netdev_priv(dev);
4363 const struct adapter *adap = pi->adapter;
4364
4365 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4366 spd = " 2.5 GT/s";
4367 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4368 spd = " 5 GT/s";
4369 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
4370 spd = " 8 GT/s";
4371
4372 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4373 bufp += sprintf(bufp, "100/");
4374 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4375 bufp += sprintf(bufp, "1000/");
4376 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4377 bufp += sprintf(bufp, "10G/");
4378 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4379 bufp += sprintf(bufp, "40G/");
4380 if (bufp != buf)
4381 --bufp;
4382 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
4383
4384 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4385 adap->params.vpd.id,
4386 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
4387 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
4388 (adap->flags & USING_MSIX) ? " MSI-X" :
4389 (adap->flags & USING_MSI) ? " MSI" : "");
4390 netdev_info(dev, "S/N: %s, P/N: %s\n",
4391 adap->params.vpd.sn, adap->params.vpd.pn);
4392}
4393
4394static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
4395{
4396 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4397}
4398
4399
4400
4401
4402
4403
4404
4405
4406static void free_some_resources(struct adapter *adapter)
4407{
4408 unsigned int i;
4409
4410 t4_free_mem(adapter->l2t);
4411 t4_free_mem(adapter->tids.tid_tab);
4412 kfree(adapter->sge.egr_map);
4413 kfree(adapter->sge.ingr_map);
4414 kfree(adapter->sge.starving_fl);
4415 kfree(adapter->sge.txq_maperr);
4416 disable_msi(adapter);
4417
4418 for_each_port(adapter, i)
4419 if (adapter->port[i]) {
4420 kfree(adap2pinfo(adapter, i)->rss);
4421 free_netdev(adapter->port[i]);
4422 }
4423 if (adapter->flags & FW_OK)
4424 t4_fw_bye(adapter, adapter->fn);
4425}
4426
4427#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4428#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4429 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4430#define SEGMENT_SIZE 128
4431
4432static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4433{
4434 int func, i, err, s_qpp, qpp, num_seg;
4435 struct port_info *pi;
4436 bool highdma = false;
4437 struct adapter *adapter = NULL;
4438 void __iomem *regs;
4439
4440 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4441
4442 err = pci_request_regions(pdev, KBUILD_MODNAME);
4443 if (err) {
4444
4445 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4446 return err;
4447 }
4448
4449 err = pci_enable_device(pdev);
4450 if (err) {
4451 dev_err(&pdev->dev, "cannot enable PCI device\n");
4452 goto out_release_regions;
4453 }
4454
4455 regs = pci_ioremap_bar(pdev, 0);
4456 if (!regs) {
4457 dev_err(&pdev->dev, "cannot map device registers\n");
4458 err = -ENOMEM;
4459 goto out_disable_device;
4460 }
4461
4462 err = t4_wait_dev_ready(regs);
4463 if (err < 0)
4464 goto out_unmap_bar0;
4465
4466
4467 func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
4468 if (func != ent->driver_data) {
4469 iounmap(regs);
4470 pci_disable_device(pdev);
4471 pci_save_state(pdev);
4472 goto sriov;
4473 }
4474
4475 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4476 highdma = true;
4477 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4478 if (err) {
4479 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4480 "coherent allocations\n");
4481 goto out_unmap_bar0;
4482 }
4483 } else {
4484 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4485 if (err) {
4486 dev_err(&pdev->dev, "no usable DMA configuration\n");
4487 goto out_unmap_bar0;
4488 }
4489 }
4490
4491 pci_enable_pcie_error_reporting(pdev);
4492 enable_pcie_relaxed_ordering(pdev);
4493 pci_set_master(pdev);
4494 pci_save_state(pdev);
4495
4496 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4497 if (!adapter) {
4498 err = -ENOMEM;
4499 goto out_unmap_bar0;
4500 }
4501
4502 adapter->workq = create_singlethread_workqueue("cxgb4");
4503 if (!adapter->workq) {
4504 err = -ENOMEM;
4505 goto out_free_adapter;
4506 }
4507
4508
4509 adapter->flags |= DEV_ENABLED;
4510
4511 adapter->regs = regs;
4512 adapter->pdev = pdev;
4513 adapter->pdev_dev = &pdev->dev;
4514 adapter->mbox = func;
4515 adapter->fn = func;
4516 adapter->msg_enable = dflt_msg_enable;
4517 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4518
4519 spin_lock_init(&adapter->stats_lock);
4520 spin_lock_init(&adapter->tid_release_lock);
4521 spin_lock_init(&adapter->win0_lock);
4522
4523 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
4524 INIT_WORK(&adapter->db_full_task, process_db_full);
4525 INIT_WORK(&adapter->db_drop_task, process_db_drop);
4526
4527 err = t4_prep_adapter(adapter);
4528 if (err)
4529 goto out_free_adapter;
4530
4531
4532 if (!is_t4(adapter->params.chip)) {
4533 s_qpp = (QUEUESPERPAGEPF0_S +
4534 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
4535 adapter->fn);
4536 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
4537 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
4538 num_seg = PAGE_SIZE / SEGMENT_SIZE;
4539
4540
4541
4542
4543
4544
4545 if (qpp > num_seg) {
4546 dev_err(&pdev->dev,
4547 "Incorrect number of egress queues per page\n");
4548 err = -EINVAL;
4549 goto out_free_adapter;
4550 }
4551 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
4552 pci_resource_len(pdev, 2));
4553 if (!adapter->bar2) {
4554 dev_err(&pdev->dev, "cannot map device bar2 region\n");
4555 err = -ENOMEM;
4556 goto out_free_adapter;
4557 }
4558 }
4559
4560 setup_memwin(adapter);
4561 err = adap_init0(adapter);
4562 setup_memwin_rdma(adapter);
4563 if (err)
4564 goto out_unmap_bar;
4565
4566 for_each_port(adapter, i) {
4567 struct net_device *netdev;
4568
4569 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4570 MAX_ETH_QSETS);
4571 if (!netdev) {
4572 err = -ENOMEM;
4573 goto out_free_dev;
4574 }
4575
4576 SET_NETDEV_DEV(netdev, &pdev->dev);
4577
4578 adapter->port[i] = netdev;
4579 pi = netdev_priv(netdev);
4580 pi->adapter = adapter;
4581 pi->xact_addr_filt = -1;
4582 pi->port_id = i;
4583 netdev->irq = pdev->irq;
4584
4585 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4586 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4587 NETIF_F_RXCSUM | NETIF_F_RXHASH |
4588 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
4589 if (highdma)
4590 netdev->hw_features |= NETIF_F_HIGHDMA;
4591 netdev->features |= netdev->hw_features;
4592 netdev->vlan_features = netdev->features & VLAN_FEAT;
4593
4594 netdev->priv_flags |= IFF_UNICAST_FLT;
4595
4596 netdev->netdev_ops = &cxgb4_netdev_ops;
4597#ifdef CONFIG_CHELSIO_T4_DCB
4598 netdev->dcbnl_ops = &cxgb4_dcb_ops;
4599 cxgb4_dcb_state_init(netdev);
4600#endif
4601 cxgb4_set_ethtool_ops(netdev);
4602 }
4603
4604 pci_set_drvdata(pdev, adapter);
4605
4606 if (adapter->flags & FW_OK) {
4607 err = t4_port_init(adapter, func, func, 0);
4608 if (err)
4609 goto out_free_dev;
4610 }
4611
4612
4613
4614
4615
4616 cfg_queues(adapter);
4617
4618 adapter->l2t = t4_init_l2t();
4619 if (!adapter->l2t) {
4620
4621 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4622 adapter->params.offload = 0;
4623 }
4624
4625#if IS_ENABLED(CONFIG_IPV6)
4626 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
4627 adapter->clipt_end);
4628 if (!adapter->clipt) {
4629
4630
4631
4632 dev_warn(&pdev->dev,
4633 "could not allocate Clip table, continuing\n");
4634 adapter->params.offload = 0;
4635 }
4636#endif
4637 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
4638 dev_warn(&pdev->dev, "could not allocate TID table, "
4639 "continuing\n");
4640 adapter->params.offload = 0;
4641 }
4642
4643
4644 if (msi > 1 && enable_msix(adapter) == 0)
4645 adapter->flags |= USING_MSIX;
4646 else if (msi > 0 && pci_enable_msi(pdev) == 0)
4647 adapter->flags |= USING_MSI;
4648
4649 err = init_rss(adapter);
4650 if (err)
4651 goto out_free_dev;
4652
4653
4654
4655
4656
4657
4658
4659 for_each_port(adapter, i) {
4660 pi = adap2pinfo(adapter, i);
4661 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
4662 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
4663
4664 err = register_netdev(adapter->port[i]);
4665 if (err)
4666 break;
4667 adapter->chan_map[pi->tx_chan] = i;
4668 print_port_info(adapter->port[i]);
4669 }
4670 if (i == 0) {
4671 dev_err(&pdev->dev, "could not register any net devices\n");
4672 goto out_free_dev;
4673 }
4674 if (err) {
4675 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
4676 err = 0;
4677 }
4678
4679 if (cxgb4_debugfs_root) {
4680 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
4681 cxgb4_debugfs_root);
4682 setup_debugfs(adapter);
4683 }
4684
4685
4686 pdev->needs_freset = 1;
4687
4688 if (is_offload(adapter))
4689 attach_ulds(adapter);
4690
4691sriov:
4692#ifdef CONFIG_PCI_IOV
4693 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
4694 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
4695 dev_info(&pdev->dev,
4696 "instantiated %u virtual functions\n",
4697 num_vf[func]);
4698#endif
4699 return 0;
4700
4701 out_free_dev:
4702 free_some_resources(adapter);
4703 out_unmap_bar:
4704 if (!is_t4(adapter->params.chip))
4705 iounmap(adapter->bar2);
4706 out_free_adapter:
4707 if (adapter->workq)
4708 destroy_workqueue(adapter->workq);
4709
4710 kfree(adapter);
4711 out_unmap_bar0:
4712 iounmap(regs);
4713 out_disable_device:
4714 pci_disable_pcie_error_reporting(pdev);
4715 pci_disable_device(pdev);
4716 out_release_regions:
4717 pci_release_regions(pdev);
4718 return err;
4719}
4720
4721static void remove_one(struct pci_dev *pdev)
4722{
4723 struct adapter *adapter = pci_get_drvdata(pdev);
4724
4725#ifdef CONFIG_PCI_IOV
4726 pci_disable_sriov(pdev);
4727
4728#endif
4729
4730 if (adapter) {
4731 int i;
4732
4733
4734
4735
4736 destroy_workqueue(adapter->workq);
4737
4738 if (is_offload(adapter))
4739 detach_ulds(adapter);
4740
4741 disable_interrupts(adapter);
4742
4743 for_each_port(adapter, i)
4744 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
4745 unregister_netdev(adapter->port[i]);
4746
4747 debugfs_remove_recursive(adapter->debugfs_root);
4748
4749
4750
4751
4752 if (adapter->tids.ftid_tab) {
4753 struct filter_entry *f = &adapter->tids.ftid_tab[0];
4754 for (i = 0; i < (adapter->tids.nftids +
4755 adapter->tids.nsftids); i++, f++)
4756 if (f->valid)
4757 clear_filter(adapter, f);
4758 }
4759
4760 if (adapter->flags & FULL_INIT_DONE)
4761 cxgb_down(adapter);
4762
4763 free_some_resources(adapter);
4764#if IS_ENABLED(CONFIG_IPV6)
4765 t4_cleanup_clip_tbl(adapter);
4766#endif
4767 iounmap(adapter->regs);
4768 if (!is_t4(adapter->params.chip))
4769 iounmap(adapter->bar2);
4770 pci_disable_pcie_error_reporting(pdev);
4771 if ((adapter->flags & DEV_ENABLED)) {
4772 pci_disable_device(pdev);
4773 adapter->flags &= ~DEV_ENABLED;
4774 }
4775 pci_release_regions(pdev);
4776 synchronize_rcu();
4777 kfree(adapter);
4778 } else
4779 pci_release_regions(pdev);
4780}
4781
4782static struct pci_driver cxgb4_driver = {
4783 .name = KBUILD_MODNAME,
4784 .id_table = cxgb4_pci_tbl,
4785 .probe = init_one,
4786 .remove = remove_one,
4787 .shutdown = remove_one,
4788 .err_handler = &cxgb4_eeh,
4789};
4790
4791static int __init cxgb4_init_module(void)
4792{
4793 int ret;
4794
4795
4796 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
4797 if (!cxgb4_debugfs_root)
4798 pr_warn("could not create debugfs entry, continuing\n");
4799
4800 ret = pci_register_driver(&cxgb4_driver);
4801 if (ret < 0)
4802 debugfs_remove(cxgb4_debugfs_root);
4803
4804#if IS_ENABLED(CONFIG_IPV6)
4805 if (!inet6addr_registered) {
4806 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
4807 inet6addr_registered = true;
4808 }
4809#endif
4810
4811 return ret;
4812}
4813
4814static void __exit cxgb4_cleanup_module(void)
4815{
4816#if IS_ENABLED(CONFIG_IPV6)
4817 if (inet6addr_registered) {
4818 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
4819 inet6addr_registered = false;
4820 }
4821#endif
4822 pci_unregister_driver(&cxgb4_driver);
4823 debugfs_remove(cxgb4_debugfs_root);
4824}
4825
4826module_init(cxgb4_init_module);
4827module_exit(cxgb4_cleanup_module);
4828