1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if.h>
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <net/addrconf.h>
64#include <net/bonding.h>
65#include <net/addrconf.h>
66#include <asm/uaccess.h>
67
68#include "cxgb4.h"
69#include "t4_regs.h"
70#include "t4_values.h"
71#include "t4_msg.h"
72#include "t4fw_api.h"
73#include "t4fw_version.h"
74#include "cxgb4_dcb.h"
75#include "cxgb4_debugfs.h"
76#include "clip_tbl.h"
77#include "l2t.h"
78
79char cxgb4_driver_name[] = KBUILD_MODNAME;
80
81#ifdef DRV_VERSION
82#undef DRV_VERSION
83#endif
84#define DRV_VERSION "2.0.0-ko"
85const char cxgb4_driver_version[] = DRV_VERSION;
86#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
87
88
89
90
91
92
93
94struct filter_entry {
95
96
97 u32 valid:1;
98 u32 locked:1;
99
100 u32 pending:1;
101 u32 smtidx:8;
102 struct l2t_entry *l2t;
103
104
105
106
107
108
109 struct ch_filter_specification fs;
110};
111
112#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
113 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
114 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
115
116
117
118#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
119 static const struct pci_device_id cxgb4_pci_tbl[] = {
120#define CH_PCI_DEVICE_ID_FUNCTION 0x4
121
122
123
124
125#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
126
127#define CH_PCI_ID_TABLE_ENTRY(devid) \
128 {PCI_VDEVICE(CHELSIO, (devid)), 4}
129
130#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
131 { 0, } \
132 }
133
134#include "t4_pci_id_tbl.h"
135
136#define FW4_FNAME "cxgb4/t4fw.bin"
137#define FW5_FNAME "cxgb4/t5fw.bin"
138#define FW6_FNAME "cxgb4/t6fw.bin"
139#define FW4_CFNAME "cxgb4/t4-config.txt"
140#define FW5_CFNAME "cxgb4/t5-config.txt"
141#define FW6_CFNAME "cxgb4/t6-config.txt"
142#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
143#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
144#define PHY_AQ1202_DEVICEID 0x4409
145#define PHY_BCM84834_DEVICEID 0x4486
146
147MODULE_DESCRIPTION(DRV_DESC);
148MODULE_AUTHOR("Chelsio Communications");
149MODULE_LICENSE("Dual BSD/GPL");
150MODULE_VERSION(DRV_VERSION);
151MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
152MODULE_FIRMWARE(FW4_FNAME);
153MODULE_FIRMWARE(FW5_FNAME);
154MODULE_FIRMWARE(FW6_FNAME);
155
156
157
158
159
160
161
162static uint force_init;
163
164module_param(force_init, uint, 0644);
165MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
166 "deprecated parameter");
167
168static int dflt_msg_enable = DFLT_MSG_ENABLE;
169
170module_param(dflt_msg_enable, int, 0644);
171MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
172
173
174
175
176
177
178
179
180
181
182static int msi = 2;
183
184module_param(msi, int, 0644);
185MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
186
187
188
189
190
191
192
193
194
195
196
197
198
199static int rx_dma_offset = 2;
200
201#ifdef CONFIG_PCI_IOV
202
203
204
205static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
206
207module_param_array(num_vf, uint, NULL, 0644);
208MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
209#endif
210
211
212
213
214
215
216
217static int select_queue;
218module_param(select_queue, int, 0644);
219MODULE_PARM_DESC(select_queue,
220 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
221
222static struct dentry *cxgb4_debugfs_root;
223
224static LIST_HEAD(adapter_list);
225static DEFINE_MUTEX(uld_mutex);
226
227static LIST_HEAD(adap_rcu_list);
228static DEFINE_SPINLOCK(adap_rcu_lock);
229static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
230static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
231
232static void link_report(struct net_device *dev)
233{
234 if (!netif_carrier_ok(dev))
235 netdev_info(dev, "link down\n");
236 else {
237 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
238
239 const char *s;
240 const struct port_info *p = netdev_priv(dev);
241
242 switch (p->link_cfg.speed) {
243 case 10000:
244 s = "10Gbps";
245 break;
246 case 1000:
247 s = "1000Mbps";
248 break;
249 case 100:
250 s = "100Mbps";
251 break;
252 case 40000:
253 s = "40Gbps";
254 break;
255 default:
256 pr_info("%s: unsupported speed: %d\n",
257 dev->name, p->link_cfg.speed);
258 return;
259 }
260
261 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
262 fc[p->link_cfg.fc]);
263 }
264}
265
266#ifdef CONFIG_CHELSIO_T4_DCB
267
268static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
269{
270 struct port_info *pi = netdev_priv(dev);
271 struct adapter *adap = pi->adapter;
272 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
273 int i;
274
275
276
277
278 for (i = 0; i < pi->nqsets; i++, txq++) {
279 u32 name, value;
280 int err;
281
282 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
283 FW_PARAMS_PARAM_X_V(
284 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
285 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
286 value = enable ? i : 0xffffffff;
287
288
289
290
291
292 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
293 &name, &value,
294 -FW_CMD_MAX_TIMEOUT);
295
296 if (err)
297 dev_err(adap->pdev_dev,
298 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
299 enable ? "set" : "unset", pi->port_id, i, -err);
300 else
301 txq->dcb_prio = value;
302 }
303}
304#endif
305
306void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
307{
308 struct net_device *dev = adapter->port[port_id];
309
310
311 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
312 if (link_stat)
313 netif_carrier_on(dev);
314 else {
315#ifdef CONFIG_CHELSIO_T4_DCB
316 cxgb4_dcb_state_init(dev);
317 dcb_tx_queue_prio_enable(dev, false);
318#endif
319 netif_carrier_off(dev);
320 }
321
322 link_report(dev);
323 }
324}
325
326void t4_os_portmod_changed(const struct adapter *adap, int port_id)
327{
328 static const char *mod_str[] = {
329 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
330 };
331
332 const struct net_device *dev = adap->port[port_id];
333 const struct port_info *pi = netdev_priv(dev);
334
335 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
336 netdev_info(dev, "port module unplugged\n");
337 else if (pi->mod_type < ARRAY_SIZE(mod_str))
338 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
339}
340
341int dbfifo_int_thresh = 10;
342module_param(dbfifo_int_thresh, int, 0644);
343MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
344
345
346
347
348static int dbfifo_drain_delay = 1000;
349module_param(dbfifo_drain_delay, int, 0644);
350MODULE_PARM_DESC(dbfifo_drain_delay,
351 "usecs to sleep while draining the dbfifo");
352
353static inline int cxgb4_set_addr_hash(struct port_info *pi)
354{
355 struct adapter *adap = pi->adapter;
356 u64 vec = 0;
357 bool ucast = false;
358 struct hash_mac_addr *entry;
359
360
361 list_for_each_entry(entry, &adap->mac_hlist, list) {
362 ucast |= is_unicast_ether_addr(entry->addr);
363 vec |= (1ULL << hash_mac_addr(entry->addr));
364 }
365 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
366 vec, false);
367}
368
369static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
370{
371 struct port_info *pi = netdev_priv(netdev);
372 struct adapter *adap = pi->adapter;
373 int ret;
374 u64 mhash = 0;
375 u64 uhash = 0;
376 bool free = false;
377 bool ucast = is_unicast_ether_addr(mac_addr);
378 const u8 *maclist[1] = {mac_addr};
379 struct hash_mac_addr *new_entry;
380
381 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
382 NULL, ucast ? &uhash : &mhash, false);
383 if (ret < 0)
384 goto out;
385
386
387
388
389 if (uhash || mhash) {
390 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
391 if (!new_entry)
392 return -ENOMEM;
393 ether_addr_copy(new_entry->addr, mac_addr);
394 list_add_tail(&new_entry->list, &adap->mac_hlist);
395 ret = cxgb4_set_addr_hash(pi);
396 }
397out:
398 return ret < 0 ? ret : 0;
399}
400
401static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
402{
403 struct port_info *pi = netdev_priv(netdev);
404 struct adapter *adap = pi->adapter;
405 int ret;
406 const u8 *maclist[1] = {mac_addr};
407 struct hash_mac_addr *entry, *tmp;
408
409
410
411
412 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
413 if (ether_addr_equal(entry->addr, mac_addr)) {
414 list_del(&entry->list);
415 kfree(entry);
416 return cxgb4_set_addr_hash(pi);
417 }
418 }
419
420 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
421 return ret < 0 ? -EINVAL : 0;
422}
423
424
425
426
427
428static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
429{
430 struct port_info *pi = netdev_priv(dev);
431 struct adapter *adapter = pi->adapter;
432
433 if (!(dev->flags & IFF_PROMISC)) {
434 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
435 if (!(dev->flags & IFF_ALLMULTI))
436 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
437 }
438
439 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
440 (dev->flags & IFF_PROMISC) ? 1 : 0,
441 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
442 sleep_ok);
443}
444
445
446
447
448
449
450
451static int link_start(struct net_device *dev)
452{
453 int ret;
454 struct port_info *pi = netdev_priv(dev);
455 unsigned int mb = pi->adapter->pf;
456
457
458
459
460
461 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
462 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
463 if (ret == 0) {
464 ret = t4_change_mac(pi->adapter, mb, pi->viid,
465 pi->xact_addr_filt, dev->dev_addr, true,
466 true);
467 if (ret >= 0) {
468 pi->xact_addr_filt = ret;
469 ret = 0;
470 }
471 }
472 if (ret == 0)
473 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
474 &pi->link_cfg);
475 if (ret == 0) {
476 local_bh_disable();
477 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
478 true, CXGB4_DCB_ENABLED);
479 local_bh_enable();
480 }
481
482 return ret;
483}
484
485int cxgb4_dcb_enabled(const struct net_device *dev)
486{
487#ifdef CONFIG_CHELSIO_T4_DCB
488 struct port_info *pi = netdev_priv(dev);
489
490 if (!pi->dcb.enabled)
491 return 0;
492
493 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
494 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
495#else
496 return 0;
497#endif
498}
499EXPORT_SYMBOL(cxgb4_dcb_enabled);
500
501#ifdef CONFIG_CHELSIO_T4_DCB
502
503static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
504{
505 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
506 struct net_device *dev = adap->port[port];
507 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
508 int new_dcb_enabled;
509
510 cxgb4_dcb_handle_fw_update(adap, pcmd);
511 new_dcb_enabled = cxgb4_dcb_enabled(dev);
512
513
514
515
516
517 if (new_dcb_enabled != old_dcb_enabled)
518 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
519}
520#endif
521
522
523
524
525static void clear_filter(struct adapter *adap, struct filter_entry *f)
526{
527
528
529
530
531
532
533 if (f->l2t)
534 cxgb4_l2t_release(f->l2t);
535
536
537
538
539
540 memset(f, 0, sizeof(*f));
541}
542
543
544
545static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
546{
547 unsigned int idx = GET_TID(rpl);
548 unsigned int nidx = idx - adap->tids.ftid_base;
549 unsigned int ret;
550 struct filter_entry *f;
551
552 if (idx >= adap->tids.ftid_base && nidx <
553 (adap->tids.nftids + adap->tids.nsftids)) {
554 idx = nidx;
555 ret = TCB_COOKIE_G(rpl->cookie);
556 f = &adap->tids.ftid_tab[idx];
557
558 if (ret == FW_FILTER_WR_FLT_DELETED) {
559
560
561
562 clear_filter(adap, f);
563 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
564 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
565 idx);
566 clear_filter(adap, f);
567 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
568 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
569 f->pending = 0;
570 f->valid = 1;
571 } else {
572
573
574
575 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
576 idx, ret);
577 clear_filter(adap, f);
578 }
579 }
580}
581
582
583
584static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
585 const struct pkt_gl *gl)
586{
587 u8 opcode = ((const struct rss_header *)rsp)->opcode;
588
589 rsp++;
590
591
592
593 if (unlikely(opcode == CPL_FW4_MSG &&
594 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
595 rsp++;
596 opcode = ((const struct rss_header *)rsp)->opcode;
597 rsp++;
598 if (opcode != CPL_SGE_EGR_UPDATE) {
599 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
600 , opcode);
601 goto out;
602 }
603 }
604
605 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
606 const struct cpl_sge_egr_update *p = (void *)rsp;
607 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
608 struct sge_txq *txq;
609
610 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
611 txq->restarts++;
612 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
613 struct sge_eth_txq *eq;
614
615 eq = container_of(txq, struct sge_eth_txq, q);
616 netif_tx_wake_queue(eq->txq);
617 } else {
618 struct sge_ofld_txq *oq;
619
620 oq = container_of(txq, struct sge_ofld_txq, q);
621 tasklet_schedule(&oq->qresume_tsk);
622 }
623 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
624 const struct cpl_fw6_msg *p = (void *)rsp;
625
626#ifdef CONFIG_CHELSIO_T4_DCB
627 const struct fw_port_cmd *pcmd = (const void *)p->data;
628 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
629 unsigned int action =
630 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
631
632 if (cmd == FW_PORT_CMD &&
633 action == FW_PORT_ACTION_GET_PORT_INFO) {
634 int port = FW_PORT_CMD_PORTID_G(
635 be32_to_cpu(pcmd->op_to_portid));
636 struct net_device *dev = q->adap->port[port];
637 int state_input = ((pcmd->u.info.dcbxdis_pkd &
638 FW_PORT_CMD_DCBXDIS_F)
639 ? CXGB4_DCB_INPUT_FW_DISABLED
640 : CXGB4_DCB_INPUT_FW_ENABLED);
641
642 cxgb4_dcb_state_fsm(dev, state_input);
643 }
644
645 if (cmd == FW_PORT_CMD &&
646 action == FW_PORT_ACTION_L2_DCB_CFG)
647 dcb_rpl(q->adap, pcmd);
648 else
649#endif
650 if (p->type == 0)
651 t4_handle_fw_rpl(q->adap, p->data);
652 } else if (opcode == CPL_L2T_WRITE_RPL) {
653 const struct cpl_l2t_write_rpl *p = (void *)rsp;
654
655 do_l2t_write_rpl(q->adap, p);
656 } else if (opcode == CPL_SET_TCB_RPL) {
657 const struct cpl_set_tcb_rpl *p = (void *)rsp;
658
659 filter_rpl(q->adap, p);
660 } else
661 dev_err(q->adap->pdev_dev,
662 "unexpected CPL %#x on FW event queue\n", opcode);
663out:
664 return 0;
665}
666
667
668static void uldrx_flush_handler(struct sge_rspq *q)
669{
670 if (ulds[q->uld].lro_flush)
671 ulds[q->uld].lro_flush(&q->lro_mgr);
672}
673
674
675
676
677
678
679
680
681
682
683static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
684 const struct pkt_gl *gl)
685{
686 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
687 int ret;
688
689
690
691 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
692 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
693 rsp += 2;
694
695 if (q->flush_handler)
696 ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
697 rsp, gl, &q->lro_mgr,
698 &q->napi);
699 else
700 ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
701 rsp, gl);
702
703 if (ret) {
704 rxq->stats.nomem++;
705 return -1;
706 }
707
708 if (gl == NULL)
709 rxq->stats.imm++;
710 else if (gl == CXGB4_MSG_AN)
711 rxq->stats.an++;
712 else
713 rxq->stats.pkts++;
714 return 0;
715}
716
717static void disable_msi(struct adapter *adapter)
718{
719 if (adapter->flags & USING_MSIX) {
720 pci_disable_msix(adapter->pdev);
721 adapter->flags &= ~USING_MSIX;
722 } else if (adapter->flags & USING_MSI) {
723 pci_disable_msi(adapter->pdev);
724 adapter->flags &= ~USING_MSI;
725 }
726}
727
728
729
730
731static irqreturn_t t4_nondata_intr(int irq, void *cookie)
732{
733 struct adapter *adap = cookie;
734 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
735
736 if (v & PFSW_F) {
737 adap->swintr = 1;
738 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
739 }
740 if (adap->flags & MASTER_PF)
741 t4_slow_intr_handler(adap);
742 return IRQ_HANDLED;
743}
744
745
746
747
748static void name_msix_vecs(struct adapter *adap)
749{
750 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
751
752
753 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
754
755
756 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
757 adap->port[0]->name);
758
759
760 for_each_port(adap, j) {
761 struct net_device *d = adap->port[j];
762 const struct port_info *pi = netdev_priv(d);
763
764 for (i = 0; i < pi->nqsets; i++, msi_idx++)
765 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
766 d->name, i);
767 }
768
769
770 for_each_iscsirxq(&adap->sge, i)
771 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
772 adap->port[0]->name, i);
773
774 for_each_iscsitrxq(&adap->sge, i)
775 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
776 adap->port[0]->name, i);
777
778 for_each_rdmarxq(&adap->sge, i)
779 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
780 adap->port[0]->name, i);
781
782 for_each_rdmaciq(&adap->sge, i)
783 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
784 adap->port[0]->name, i);
785}
786
787static int request_msix_queue_irqs(struct adapter *adap)
788{
789 struct sge *s = &adap->sge;
790 int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
791 int iscsitqidx = 0;
792 int msi_index = 2;
793
794 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
795 adap->msix_info[1].desc, &s->fw_evtq);
796 if (err)
797 return err;
798
799 for_each_ethrxq(s, ethqidx) {
800 err = request_irq(adap->msix_info[msi_index].vec,
801 t4_sge_intr_msix, 0,
802 adap->msix_info[msi_index].desc,
803 &s->ethrxq[ethqidx].rspq);
804 if (err)
805 goto unwind;
806 msi_index++;
807 }
808 for_each_iscsirxq(s, iscsiqidx) {
809 err = request_irq(adap->msix_info[msi_index].vec,
810 t4_sge_intr_msix, 0,
811 adap->msix_info[msi_index].desc,
812 &s->iscsirxq[iscsiqidx].rspq);
813 if (err)
814 goto unwind;
815 msi_index++;
816 }
817 for_each_iscsitrxq(s, iscsitqidx) {
818 err = request_irq(adap->msix_info[msi_index].vec,
819 t4_sge_intr_msix, 0,
820 adap->msix_info[msi_index].desc,
821 &s->iscsitrxq[iscsitqidx].rspq);
822 if (err)
823 goto unwind;
824 msi_index++;
825 }
826 for_each_rdmarxq(s, rdmaqidx) {
827 err = request_irq(adap->msix_info[msi_index].vec,
828 t4_sge_intr_msix, 0,
829 adap->msix_info[msi_index].desc,
830 &s->rdmarxq[rdmaqidx].rspq);
831 if (err)
832 goto unwind;
833 msi_index++;
834 }
835 for_each_rdmaciq(s, rdmaciqqidx) {
836 err = request_irq(adap->msix_info[msi_index].vec,
837 t4_sge_intr_msix, 0,
838 adap->msix_info[msi_index].desc,
839 &s->rdmaciq[rdmaciqqidx].rspq);
840 if (err)
841 goto unwind;
842 msi_index++;
843 }
844 return 0;
845
846unwind:
847 while (--rdmaciqqidx >= 0)
848 free_irq(adap->msix_info[--msi_index].vec,
849 &s->rdmaciq[rdmaciqqidx].rspq);
850 while (--rdmaqidx >= 0)
851 free_irq(adap->msix_info[--msi_index].vec,
852 &s->rdmarxq[rdmaqidx].rspq);
853 while (--iscsitqidx >= 0)
854 free_irq(adap->msix_info[--msi_index].vec,
855 &s->iscsitrxq[iscsitqidx].rspq);
856 while (--iscsiqidx >= 0)
857 free_irq(adap->msix_info[--msi_index].vec,
858 &s->iscsirxq[iscsiqidx].rspq);
859 while (--ethqidx >= 0)
860 free_irq(adap->msix_info[--msi_index].vec,
861 &s->ethrxq[ethqidx].rspq);
862 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
863 return err;
864}
865
866static void free_msix_queue_irqs(struct adapter *adap)
867{
868 int i, msi_index = 2;
869 struct sge *s = &adap->sge;
870
871 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
872 for_each_ethrxq(s, i)
873 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
874 for_each_iscsirxq(s, i)
875 free_irq(adap->msix_info[msi_index++].vec,
876 &s->iscsirxq[i].rspq);
877 for_each_iscsitrxq(s, i)
878 free_irq(adap->msix_info[msi_index++].vec,
879 &s->iscsitrxq[i].rspq);
880 for_each_rdmarxq(s, i)
881 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
882 for_each_rdmaciq(s, i)
883 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
884}
885
886
887
888
889
890
891
892
893
894
895int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
896{
897 u16 *rss;
898 int i, err;
899 struct adapter *adapter = pi->adapter;
900 const struct sge_eth_rxq *rxq;
901
902 rxq = &adapter->sge.ethrxq[pi->first_qset];
903 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
904 if (!rss)
905 return -ENOMEM;
906
907
908 for (i = 0; i < pi->rss_size; i++, queues++)
909 rss[i] = rxq[*queues].rspq.abs_id;
910
911 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
912 pi->rss_size, rss, pi->rss_size);
913
914
915
916
917
918 if (!err)
919 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
920 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
921 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
922 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
923 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
924 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
925 rss[0]);
926 kfree(rss);
927 return err;
928}
929
930
931
932
933
934
935
936static int setup_rss(struct adapter *adap)
937{
938 int i, j, err;
939
940 for_each_port(adap, i) {
941 const struct port_info *pi = adap2pinfo(adap, i);
942
943
944 for (j = 0; j < pi->rss_size; j++)
945 pi->rss[j] = j % pi->nqsets;
946
947 err = cxgb4_write_rss(pi, pi->rss);
948 if (err)
949 return err;
950 }
951 return 0;
952}
953
954
955
956
957static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
958{
959 qid -= p->ingr_start;
960 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
961}
962
963
964
965
966static void quiesce_rx(struct adapter *adap)
967{
968 int i;
969
970 for (i = 0; i < adap->sge.ingr_sz; i++) {
971 struct sge_rspq *q = adap->sge.ingr_map[i];
972
973 if (q && q->handler) {
974 napi_disable(&q->napi);
975 local_bh_disable();
976 while (!cxgb_poll_lock_napi(q))
977 mdelay(1);
978 local_bh_enable();
979 }
980
981 }
982}
983
984
985static void disable_interrupts(struct adapter *adap)
986{
987 if (adap->flags & FULL_INIT_DONE) {
988 t4_intr_disable(adap);
989 if (adap->flags & USING_MSIX) {
990 free_msix_queue_irqs(adap);
991 free_irq(adap->msix_info[0].vec, adap);
992 } else {
993 free_irq(adap->pdev->irq, adap);
994 }
995 quiesce_rx(adap);
996 }
997}
998
999
1000
1001
1002static void enable_rx(struct adapter *adap)
1003{
1004 int i;
1005
1006 for (i = 0; i < adap->sge.ingr_sz; i++) {
1007 struct sge_rspq *q = adap->sge.ingr_map[i];
1008
1009 if (!q)
1010 continue;
1011 if (q->handler) {
1012 cxgb_busy_poll_init_lock(q);
1013 napi_enable(&q->napi);
1014 }
1015
1016 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
1017 SEINTARM_V(q->intr_params) |
1018 INGRESSQID_V(q->cntxt_id));
1019 }
1020}
1021
1022static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
1023 unsigned int nq, unsigned int per_chan, int msi_idx,
1024 u16 *ids, bool lro)
1025{
1026 int i, err;
1027
1028 for (i = 0; i < nq; i++, q++) {
1029 if (msi_idx > 0)
1030 msi_idx++;
1031 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
1032 adap->port[i / per_chan],
1033 msi_idx, q->fl.size ? &q->fl : NULL,
1034 uldrx_handler,
1035 lro ? uldrx_flush_handler : NULL,
1036 0);
1037 if (err)
1038 return err;
1039 memset(&q->stats, 0, sizeof(q->stats));
1040 if (ids)
1041 ids[i] = q->rspq.abs_id;
1042 }
1043 return 0;
1044}
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054static int setup_sge_queues(struct adapter *adap)
1055{
1056 int err, msi_idx, i, j;
1057 struct sge *s = &adap->sge;
1058
1059 bitmap_zero(s->starving_fl, s->egr_sz);
1060 bitmap_zero(s->txq_maperr, s->egr_sz);
1061
1062 if (adap->flags & USING_MSIX)
1063 msi_idx = 1;
1064 else {
1065 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1066 NULL, NULL, NULL, -1);
1067 if (err)
1068 return err;
1069 msi_idx = -((int)s->intrq.abs_id + 1);
1070 }
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1086 msi_idx, NULL, fwevtq_handler, NULL, -1);
1087 if (err) {
1088freeout: t4_free_sge_resources(adap);
1089 return err;
1090 }
1091
1092 for_each_port(adap, i) {
1093 struct net_device *dev = adap->port[i];
1094 struct port_info *pi = netdev_priv(dev);
1095 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1096 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1097
1098 for (j = 0; j < pi->nqsets; j++, q++) {
1099 if (msi_idx > 0)
1100 msi_idx++;
1101 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1102 msi_idx, &q->fl,
1103 t4_ethrx_handler,
1104 NULL,
1105 t4_get_mps_bg_map(adap,
1106 pi->tx_chan));
1107 if (err)
1108 goto freeout;
1109 q->rspq.idx = j;
1110 memset(&q->stats, 0, sizeof(q->stats));
1111 }
1112 for (j = 0; j < pi->nqsets; j++, t++) {
1113 err = t4_sge_alloc_eth_txq(adap, t, dev,
1114 netdev_get_tx_queue(dev, j),
1115 s->fw_evtq.cntxt_id);
1116 if (err)
1117 goto freeout;
1118 }
1119 }
1120
1121 j = s->iscsiqsets / adap->params.nports;
1122 for_each_iscsirxq(s, i) {
1123 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
1124 adap->port[i / j],
1125 s->fw_evtq.cntxt_id);
1126 if (err)
1127 goto freeout;
1128 }
1129
1130#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
1131 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \
1132 if (err) \
1133 goto freeout; \
1134 if (msi_idx > 0) \
1135 msi_idx += nq; \
1136} while (0)
1137
1138 ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
1139 ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
1140 ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
1141 j = s->rdmaciqs / adap->params.nports;
1142 ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
1143
1144#undef ALLOC_OFLD_RXQS
1145
1146 for_each_port(adap, i) {
1147
1148
1149
1150
1151 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1152 s->fw_evtq.cntxt_id,
1153 s->rdmarxq[i].rspq.cntxt_id);
1154 if (err)
1155 goto freeout;
1156 }
1157
1158 t4_write_reg(adap, is_t4(adap->params.chip) ?
1159 MPS_TRC_RSS_CONTROL_A :
1160 MPS_T5_TRC_RSS_CONTROL_A,
1161 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1162 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1163 return 0;
1164}
1165
1166
1167
1168
1169
1170void *t4_alloc_mem(size_t size)
1171{
1172 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1173
1174 if (!p)
1175 p = vzalloc(size);
1176 return p;
1177}
1178
1179
1180
1181
1182void t4_free_mem(void *addr)
1183{
1184 kvfree(addr);
1185}
1186
1187
1188
1189
1190
1191
1192
1193static int set_filter_wr(struct adapter *adapter, int fidx)
1194{
1195 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1196 struct sk_buff *skb;
1197 struct fw_filter_wr *fwr;
1198 unsigned int ftid;
1199
1200 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
1201 if (!skb)
1202 return -ENOMEM;
1203
1204
1205
1206
1207
1208 if (f->fs.newdmac || f->fs.newvlan) {
1209
1210 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
1211 f->fs.eport, f->fs.dmac);
1212 if (f->l2t == NULL) {
1213 kfree_skb(skb);
1214 return -ENOMEM;
1215 }
1216 }
1217
1218 ftid = adapter->tids.ftid_base + fidx;
1219
1220 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1221 memset(fwr, 0, sizeof(*fwr));
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1232 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
1233 fwr->tid_to_iq =
1234 htonl(FW_FILTER_WR_TID_V(ftid) |
1235 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1236 FW_FILTER_WR_NOREPLY_V(0) |
1237 FW_FILTER_WR_IQ_V(f->fs.iq));
1238 fwr->del_filter_to_l2tix =
1239 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1240 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1241 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1242 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1243 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1244 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1245 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1246 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1247 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
1248 f->fs.newvlan == VLAN_REWRITE) |
1249 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
1250 f->fs.newvlan == VLAN_REWRITE) |
1251 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1252 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1253 FW_FILTER_WR_PRIO_V(f->fs.prio) |
1254 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
1255 fwr->ethtype = htons(f->fs.val.ethtype);
1256 fwr->ethtypem = htons(f->fs.mask.ethtype);
1257 fwr->frag_to_ovlan_vldm =
1258 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1259 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1260 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1261 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1262 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1263 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
1264 fwr->smac_sel = 0;
1265 fwr->rx_chan_rx_rpl_iq =
1266 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1267 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
1268 fwr->maci_to_matchtypem =
1269 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1270 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1271 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1272 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1273 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1274 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1275 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1276 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
1277 fwr->ptcl = f->fs.val.proto;
1278 fwr->ptclm = f->fs.mask.proto;
1279 fwr->ttyp = f->fs.val.tos;
1280 fwr->ttypm = f->fs.mask.tos;
1281 fwr->ivlan = htons(f->fs.val.ivlan);
1282 fwr->ivlanm = htons(f->fs.mask.ivlan);
1283 fwr->ovlan = htons(f->fs.val.ovlan);
1284 fwr->ovlanm = htons(f->fs.mask.ovlan);
1285 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1286 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1287 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1288 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1289 fwr->lp = htons(f->fs.val.lport);
1290 fwr->lpm = htons(f->fs.mask.lport);
1291 fwr->fp = htons(f->fs.val.fport);
1292 fwr->fpm = htons(f->fs.mask.fport);
1293 if (f->fs.newsmac)
1294 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1295
1296
1297
1298
1299 f->pending = 1;
1300 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1301 t4_ofld_send(adapter, skb);
1302 return 0;
1303}
1304
1305
1306
1307static int del_filter_wr(struct adapter *adapter, int fidx)
1308{
1309 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1310 struct sk_buff *skb;
1311 struct fw_filter_wr *fwr;
1312 unsigned int len, ftid;
1313
1314 len = sizeof(*fwr);
1315 ftid = adapter->tids.ftid_base + fidx;
1316
1317 skb = alloc_skb(len, GFP_KERNEL);
1318 if (!skb)
1319 return -ENOMEM;
1320
1321 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1322 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1323
1324
1325
1326
1327 f->pending = 1;
1328 t4_mgmt_tx(adapter, skb);
1329 return 0;
1330}
1331
1332static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1333 void *accel_priv, select_queue_fallback_t fallback)
1334{
1335 int txq;
1336
1337#ifdef CONFIG_CHELSIO_T4_DCB
1338
1339
1340
1341
1342
1343 if (cxgb4_dcb_enabled(dev)) {
1344 u16 vlan_tci;
1345 int err;
1346
1347 err = vlan_get_tag(skb, &vlan_tci);
1348 if (unlikely(err)) {
1349 if (net_ratelimit())
1350 netdev_warn(dev,
1351 "TX Packet without VLAN Tag on DCB Link\n");
1352 txq = 0;
1353 } else {
1354 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1355#ifdef CONFIG_CHELSIO_T4_FCOE
1356 if (skb->protocol == htons(ETH_P_FCOE))
1357 txq = skb->priority & 0x7;
1358#endif
1359 }
1360 return txq;
1361 }
1362#endif
1363
1364 if (select_queue) {
1365 txq = (skb_rx_queue_recorded(skb)
1366 ? skb_get_rx_queue(skb)
1367 : smp_processor_id());
1368
1369 while (unlikely(txq >= dev->real_num_tx_queues))
1370 txq -= dev->real_num_tx_queues;
1371
1372 return txq;
1373 }
1374
1375 return fallback(dev, skb) % dev->real_num_tx_queues;
1376}
1377
1378static int closest_timer(const struct sge *s, int time)
1379{
1380 int i, delta, match = 0, min_delta = INT_MAX;
1381
1382 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1383 delta = time - s->timer_val[i];
1384 if (delta < 0)
1385 delta = -delta;
1386 if (delta < min_delta) {
1387 min_delta = delta;
1388 match = i;
1389 }
1390 }
1391 return match;
1392}
1393
1394static int closest_thres(const struct sge *s, int thres)
1395{
1396 int i, delta, match = 0, min_delta = INT_MAX;
1397
1398 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1399 delta = thres - s->counter_val[i];
1400 if (delta < 0)
1401 delta = -delta;
1402 if (delta < min_delta) {
1403 min_delta = delta;
1404 match = i;
1405 }
1406 }
1407 return match;
1408}
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1420 unsigned int us, unsigned int cnt)
1421{
1422 struct adapter *adap = q->adap;
1423
1424 if ((us | cnt) == 0)
1425 cnt = 1;
1426
1427 if (cnt) {
1428 int err;
1429 u32 v, new_idx;
1430
1431 new_idx = closest_thres(&adap->sge, cnt);
1432 if (q->desc && q->pktcnt_idx != new_idx) {
1433
1434 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1435 FW_PARAMS_PARAM_X_V(
1436 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1437 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1438 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1439 &v, &new_idx);
1440 if (err)
1441 return err;
1442 }
1443 q->pktcnt_idx = new_idx;
1444 }
1445
1446 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1447 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1448 return 0;
1449}
1450
1451static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1452{
1453 const struct port_info *pi = netdev_priv(dev);
1454 netdev_features_t changed = dev->features ^ features;
1455 int err;
1456
1457 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1458 return 0;
1459
1460 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1461 -1, -1, -1,
1462 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1463 if (unlikely(err))
1464 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1465 return err;
1466}
1467
1468static int setup_debugfs(struct adapter *adap)
1469{
1470 if (IS_ERR_OR_NULL(adap->debugfs_root))
1471 return -1;
1472
1473#ifdef CONFIG_DEBUG_FS
1474 t4_setup_debugfs(adap);
1475#endif
1476 return 0;
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486int cxgb4_alloc_atid(struct tid_info *t, void *data)
1487{
1488 int atid = -1;
1489
1490 spin_lock_bh(&t->atid_lock);
1491 if (t->afree) {
1492 union aopen_entry *p = t->afree;
1493
1494 atid = (p - t->atid_tab) + t->atid_base;
1495 t->afree = p->next;
1496 p->data = data;
1497 t->atids_in_use++;
1498 }
1499 spin_unlock_bh(&t->atid_lock);
1500 return atid;
1501}
1502EXPORT_SYMBOL(cxgb4_alloc_atid);
1503
1504
1505
1506
1507void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1508{
1509 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1510
1511 spin_lock_bh(&t->atid_lock);
1512 p->next = t->afree;
1513 t->afree = p;
1514 t->atids_in_use--;
1515 spin_unlock_bh(&t->atid_lock);
1516}
1517EXPORT_SYMBOL(cxgb4_free_atid);
1518
1519
1520
1521
1522int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1523{
1524 int stid;
1525
1526 spin_lock_bh(&t->stid_lock);
1527 if (family == PF_INET) {
1528 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1529 if (stid < t->nstids)
1530 __set_bit(stid, t->stid_bmap);
1531 else
1532 stid = -1;
1533 } else {
1534 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1535 if (stid < 0)
1536 stid = -1;
1537 }
1538 if (stid >= 0) {
1539 t->stid_tab[stid].data = data;
1540 stid += t->stid_base;
1541
1542
1543
1544
1545 if (family == PF_INET)
1546 t->stids_in_use++;
1547 else
1548 t->stids_in_use += 2;
1549 }
1550 spin_unlock_bh(&t->stid_lock);
1551 return stid;
1552}
1553EXPORT_SYMBOL(cxgb4_alloc_stid);
1554
1555
1556
1557int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1558{
1559 int stid;
1560
1561 spin_lock_bh(&t->stid_lock);
1562 if (family == PF_INET) {
1563 stid = find_next_zero_bit(t->stid_bmap,
1564 t->nstids + t->nsftids, t->nstids);
1565 if (stid < (t->nstids + t->nsftids))
1566 __set_bit(stid, t->stid_bmap);
1567 else
1568 stid = -1;
1569 } else {
1570 stid = -1;
1571 }
1572 if (stid >= 0) {
1573 t->stid_tab[stid].data = data;
1574 stid -= t->nstids;
1575 stid += t->sftid_base;
1576 t->sftids_in_use++;
1577 }
1578 spin_unlock_bh(&t->stid_lock);
1579 return stid;
1580}
1581EXPORT_SYMBOL(cxgb4_alloc_sftid);
1582
1583
1584
1585void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1586{
1587
1588 if (t->nsftids && (stid >= t->sftid_base)) {
1589 stid -= t->sftid_base;
1590 stid += t->nstids;
1591 } else {
1592 stid -= t->stid_base;
1593 }
1594
1595 spin_lock_bh(&t->stid_lock);
1596 if (family == PF_INET)
1597 __clear_bit(stid, t->stid_bmap);
1598 else
1599 bitmap_release_region(t->stid_bmap, stid, 1);
1600 t->stid_tab[stid].data = NULL;
1601 if (stid < t->nstids) {
1602 if (family == PF_INET)
1603 t->stids_in_use--;
1604 else
1605 t->stids_in_use -= 2;
1606 } else {
1607 t->sftids_in_use--;
1608 }
1609 spin_unlock_bh(&t->stid_lock);
1610}
1611EXPORT_SYMBOL(cxgb4_free_stid);
1612
1613
1614
1615
1616static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1617 unsigned int tid)
1618{
1619 struct cpl_tid_release *req;
1620
1621 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1622 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1623 INIT_TP_WR(req, tid);
1624 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1625}
1626
1627
1628
1629
1630
1631static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1632 unsigned int tid)
1633{
1634 void **p = &t->tid_tab[tid];
1635 struct adapter *adap = container_of(t, struct adapter, tids);
1636
1637 spin_lock_bh(&adap->tid_release_lock);
1638 *p = adap->tid_release_head;
1639
1640 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1641 if (!adap->tid_release_task_busy) {
1642 adap->tid_release_task_busy = true;
1643 queue_work(adap->workq, &adap->tid_release_task);
1644 }
1645 spin_unlock_bh(&adap->tid_release_lock);
1646}
1647
1648
1649
1650
1651static void process_tid_release_list(struct work_struct *work)
1652{
1653 struct sk_buff *skb;
1654 struct adapter *adap;
1655
1656 adap = container_of(work, struct adapter, tid_release_task);
1657
1658 spin_lock_bh(&adap->tid_release_lock);
1659 while (adap->tid_release_head) {
1660 void **p = adap->tid_release_head;
1661 unsigned int chan = (uintptr_t)p & 3;
1662 p = (void *)p - chan;
1663
1664 adap->tid_release_head = *p;
1665 *p = NULL;
1666 spin_unlock_bh(&adap->tid_release_lock);
1667
1668 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1669 GFP_KERNEL)))
1670 schedule_timeout_uninterruptible(1);
1671
1672 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1673 t4_ofld_send(adap, skb);
1674 spin_lock_bh(&adap->tid_release_lock);
1675 }
1676 adap->tid_release_task_busy = false;
1677 spin_unlock_bh(&adap->tid_release_lock);
1678}
1679
1680
1681
1682
1683
1684void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1685{
1686 struct sk_buff *skb;
1687 struct adapter *adap = container_of(t, struct adapter, tids);
1688
1689 WARN_ON(tid >= t->ntids);
1690
1691 if (t->tid_tab[tid]) {
1692 t->tid_tab[tid] = NULL;
1693 if (t->hash_base && (tid >= t->hash_base))
1694 atomic_dec(&t->hash_tids_in_use);
1695 else
1696 atomic_dec(&t->tids_in_use);
1697 }
1698
1699 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1700 if (likely(skb)) {
1701 mk_tid_release(skb, chan, tid);
1702 t4_ofld_send(adap, skb);
1703 } else
1704 cxgb4_queue_tid_release(t, chan, tid);
1705}
1706EXPORT_SYMBOL(cxgb4_remove_tid);
1707
1708
1709
1710
1711static int tid_init(struct tid_info *t)
1712{
1713 size_t size;
1714 unsigned int stid_bmap_size;
1715 unsigned int natids = t->natids;
1716 struct adapter *adap = container_of(t, struct adapter, tids);
1717
1718 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1719 size = t->ntids * sizeof(*t->tid_tab) +
1720 natids * sizeof(*t->atid_tab) +
1721 t->nstids * sizeof(*t->stid_tab) +
1722 t->nsftids * sizeof(*t->stid_tab) +
1723 stid_bmap_size * sizeof(long) +
1724 t->nftids * sizeof(*t->ftid_tab) +
1725 t->nsftids * sizeof(*t->ftid_tab);
1726
1727 t->tid_tab = t4_alloc_mem(size);
1728 if (!t->tid_tab)
1729 return -ENOMEM;
1730
1731 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1732 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1733 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1734 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1735 spin_lock_init(&t->stid_lock);
1736 spin_lock_init(&t->atid_lock);
1737
1738 t->stids_in_use = 0;
1739 t->sftids_in_use = 0;
1740 t->afree = NULL;
1741 t->atids_in_use = 0;
1742 atomic_set(&t->tids_in_use, 0);
1743 atomic_set(&t->hash_tids_in_use, 0);
1744
1745
1746 if (natids) {
1747 while (--natids)
1748 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1749 t->afree = t->atid_tab;
1750 }
1751 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1752
1753 if (!t->stid_base &&
1754 (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
1755 __set_bit(0, t->stid_bmap);
1756
1757 return 0;
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1772 __be32 sip, __be16 sport, __be16 vlan,
1773 unsigned int queue)
1774{
1775 unsigned int chan;
1776 struct sk_buff *skb;
1777 struct adapter *adap;
1778 struct cpl_pass_open_req *req;
1779 int ret;
1780
1781 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1782 if (!skb)
1783 return -ENOMEM;
1784
1785 adap = netdev2adap(dev);
1786 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
1787 INIT_TP_WR(req, 0);
1788 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1789 req->local_port = sport;
1790 req->peer_port = htons(0);
1791 req->local_ip = sip;
1792 req->peer_ip = htonl(0);
1793 chan = rxq_to_chan(&adap->sge, queue);
1794 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1795 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1796 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1797 ret = t4_mgmt_tx(adap, skb);
1798 return net_xmit_eval(ret);
1799}
1800EXPORT_SYMBOL(cxgb4_create_server);
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1813 const struct in6_addr *sip, __be16 sport,
1814 unsigned int queue)
1815{
1816 unsigned int chan;
1817 struct sk_buff *skb;
1818 struct adapter *adap;
1819 struct cpl_pass_open_req6 *req;
1820 int ret;
1821
1822 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1823 if (!skb)
1824 return -ENOMEM;
1825
1826 adap = netdev2adap(dev);
1827 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
1828 INIT_TP_WR(req, 0);
1829 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1830 req->local_port = sport;
1831 req->peer_port = htons(0);
1832 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1833 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1834 req->peer_ip_hi = cpu_to_be64(0);
1835 req->peer_ip_lo = cpu_to_be64(0);
1836 chan = rxq_to_chan(&adap->sge, queue);
1837 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1838 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1839 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1840 ret = t4_mgmt_tx(adap, skb);
1841 return net_xmit_eval(ret);
1842}
1843EXPORT_SYMBOL(cxgb4_create_server6);
1844
1845int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1846 unsigned int queue, bool ipv6)
1847{
1848 struct sk_buff *skb;
1849 struct adapter *adap;
1850 struct cpl_close_listsvr_req *req;
1851 int ret;
1852
1853 adap = netdev2adap(dev);
1854
1855 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1856 if (!skb)
1857 return -ENOMEM;
1858
1859 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
1860 INIT_TP_WR(req, 0);
1861 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1862 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1863 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1864 ret = t4_mgmt_tx(adap, skb);
1865 return net_xmit_eval(ret);
1866}
1867EXPORT_SYMBOL(cxgb4_remove_server);
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1880 unsigned int *idx)
1881{
1882 unsigned int i = 0;
1883
1884 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1885 ++i;
1886 if (idx)
1887 *idx = i;
1888 return mtus[i];
1889}
1890EXPORT_SYMBOL(cxgb4_best_mtu);
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1909 unsigned short header_size,
1910 unsigned short data_size_max,
1911 unsigned short data_size_align,
1912 unsigned int *mtu_idxp)
1913{
1914 unsigned short max_mtu = header_size + data_size_max;
1915 unsigned short data_size_align_mask = data_size_align - 1;
1916 int mtu_idx, aligned_mtu_idx;
1917
1918
1919
1920
1921
1922
1923 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1924 unsigned short data_size = mtus[mtu_idx] - header_size;
1925
1926
1927
1928
1929 if ((data_size & data_size_align_mask) == 0)
1930 aligned_mtu_idx = mtu_idx;
1931
1932
1933
1934
1935
1936 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1937 break;
1938 }
1939
1940
1941
1942
1943 if (mtu_idx == NMTUS)
1944 mtu_idx--;
1945
1946
1947
1948
1949
1950 if (aligned_mtu_idx >= 0 &&
1951 mtu_idx - aligned_mtu_idx <= 1)
1952 mtu_idx = aligned_mtu_idx;
1953
1954
1955
1956
1957 if (mtu_idxp)
1958 *mtu_idxp = mtu_idx;
1959 return mtus[mtu_idx];
1960}
1961EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1962
1963
1964
1965
1966
1967
1968
1969
1970unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1971{
1972
1973
1974
1975
1976
1977
1978 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1979 return ((viid & 0x7f) << 1);
1980 else
1981 return (viid & 0x7f);
1982}
1983EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1984
1985
1986
1987
1988
1989
1990
1991unsigned int cxgb4_port_chan(const struct net_device *dev)
1992{
1993 return netdev2pinfo(dev)->tx_chan;
1994}
1995EXPORT_SYMBOL(cxgb4_port_chan);
1996
1997unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1998{
1999 struct adapter *adap = netdev2adap(dev);
2000 u32 v1, v2, lp_count, hp_count;
2001
2002 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2003 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2004 if (is_t4(adap->params.chip)) {
2005 lp_count = LP_COUNT_G(v1);
2006 hp_count = HP_COUNT_G(v1);
2007 } else {
2008 lp_count = LP_COUNT_T5_G(v1);
2009 hp_count = HP_COUNT_T5_G(v2);
2010 }
2011 return lpfifo ? lp_count : hp_count;
2012}
2013EXPORT_SYMBOL(cxgb4_dbfifo_count);
2014
2015
2016
2017
2018
2019
2020
2021unsigned int cxgb4_port_viid(const struct net_device *dev)
2022{
2023 return netdev2pinfo(dev)->viid;
2024}
2025EXPORT_SYMBOL(cxgb4_port_viid);
2026
2027
2028
2029
2030
2031
2032
2033unsigned int cxgb4_port_idx(const struct net_device *dev)
2034{
2035 return netdev2pinfo(dev)->port_id;
2036}
2037EXPORT_SYMBOL(cxgb4_port_idx);
2038
2039void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2040 struct tp_tcp_stats *v6)
2041{
2042 struct adapter *adap = pci_get_drvdata(pdev);
2043
2044 spin_lock(&adap->stats_lock);
2045 t4_tp_get_tcp_stats(adap, v4, v6);
2046 spin_unlock(&adap->stats_lock);
2047}
2048EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2049
2050void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2051 const unsigned int *pgsz_order)
2052{
2053 struct adapter *adap = netdev2adap(dev);
2054
2055 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2056 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2057 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2058 HPZ3_V(pgsz_order[3]));
2059}
2060EXPORT_SYMBOL(cxgb4_iscsi_init);
2061
2062int cxgb4_flush_eq_cache(struct net_device *dev)
2063{
2064 struct adapter *adap = netdev2adap(dev);
2065
2066 return t4_sge_ctxt_flush(adap, adap->mbox);
2067}
2068EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2069
2070static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2071{
2072 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
2073 __be64 indices;
2074 int ret;
2075
2076 spin_lock(&adap->win0_lock);
2077 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2078 sizeof(indices), (__be32 *)&indices,
2079 T4_MEMORY_READ);
2080 spin_unlock(&adap->win0_lock);
2081 if (!ret) {
2082 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2083 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2084 }
2085 return ret;
2086}
2087
2088int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2089 u16 size)
2090{
2091 struct adapter *adap = netdev2adap(dev);
2092 u16 hw_pidx, hw_cidx;
2093 int ret;
2094
2095 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2096 if (ret)
2097 goto out;
2098
2099 if (pidx != hw_pidx) {
2100 u16 delta;
2101 u32 val;
2102
2103 if (pidx >= hw_pidx)
2104 delta = pidx - hw_pidx;
2105 else
2106 delta = size - hw_pidx + pidx;
2107
2108 if (is_t4(adap->params.chip))
2109 val = PIDX_V(delta);
2110 else
2111 val = PIDX_T5_V(delta);
2112 wmb();
2113 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2114 QID_V(qid) | val);
2115 }
2116out:
2117 return ret;
2118}
2119EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2120
2121int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2122{
2123 struct adapter *adap;
2124 u32 offset, memtype, memaddr;
2125 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2126 u32 edc0_end, edc1_end, mc0_end, mc1_end;
2127 int ret;
2128
2129 adap = netdev2adap(dev);
2130
2131 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2132
2133
2134
2135
2136
2137
2138
2139 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2140 edc0_size = EDRAM0_SIZE_G(size) << 20;
2141 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2142 edc1_size = EDRAM1_SIZE_G(size) << 20;
2143 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2144 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2145
2146 edc0_end = edc0_size;
2147 edc1_end = edc0_end + edc1_size;
2148 mc0_end = edc1_end + mc0_size;
2149
2150 if (offset < edc0_end) {
2151 memtype = MEM_EDC0;
2152 memaddr = offset;
2153 } else if (offset < edc1_end) {
2154 memtype = MEM_EDC1;
2155 memaddr = offset - edc0_end;
2156 } else {
2157 if (offset < mc0_end) {
2158 memtype = MEM_MC0;
2159 memaddr = offset - edc1_end;
2160 } else if (is_t5(adap->params.chip)) {
2161 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2162 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2163 mc1_end = mc0_end + mc1_size;
2164 if (offset < mc1_end) {
2165 memtype = MEM_MC1;
2166 memaddr = offset - mc0_end;
2167 } else {
2168
2169 goto err;
2170 }
2171 } else {
2172
2173 goto err;
2174 }
2175 }
2176
2177 spin_lock(&adap->win0_lock);
2178 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2179 spin_unlock(&adap->win0_lock);
2180 return ret;
2181
2182err:
2183 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2184 stag, offset);
2185 return -EINVAL;
2186}
2187EXPORT_SYMBOL(cxgb4_read_tpte);
2188
2189u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2190{
2191 u32 hi, lo;
2192 struct adapter *adap;
2193
2194 adap = netdev2adap(dev);
2195 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2196 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2197
2198 return ((u64)hi << 32) | (u64)lo;
2199}
2200EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2201
2202int cxgb4_bar2_sge_qregs(struct net_device *dev,
2203 unsigned int qid,
2204 enum cxgb4_bar2_qtype qtype,
2205 int user,
2206 u64 *pbar2_qoffset,
2207 unsigned int *pbar2_qid)
2208{
2209 return t4_bar2_sge_qregs(netdev2adap(dev),
2210 qid,
2211 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2212 ? T4_BAR2_QTYPE_EGRESS
2213 : T4_BAR2_QTYPE_INGRESS),
2214 user,
2215 pbar2_qoffset,
2216 pbar2_qid);
2217}
2218EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2219
2220static struct pci_driver cxgb4_driver;
2221
2222static void check_neigh_update(struct neighbour *neigh)
2223{
2224 const struct device *parent;
2225 const struct net_device *netdev = neigh->dev;
2226
2227 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2228 netdev = vlan_dev_real_dev(netdev);
2229 parent = netdev->dev.parent;
2230 if (parent && parent->driver == &cxgb4_driver.driver)
2231 t4_l2t_update(dev_get_drvdata(parent), neigh);
2232}
2233
2234static int netevent_cb(struct notifier_block *nb, unsigned long event,
2235 void *data)
2236{
2237 switch (event) {
2238 case NETEVENT_NEIGH_UPDATE:
2239 check_neigh_update(data);
2240 break;
2241 case NETEVENT_REDIRECT:
2242 default:
2243 break;
2244 }
2245 return 0;
2246}
2247
2248static bool netevent_registered;
2249static struct notifier_block cxgb4_netevent_nb = {
2250 .notifier_call = netevent_cb
2251};
2252
2253static void drain_db_fifo(struct adapter *adap, int usecs)
2254{
2255 u32 v1, v2, lp_count, hp_count;
2256
2257 do {
2258 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2259 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2260 if (is_t4(adap->params.chip)) {
2261 lp_count = LP_COUNT_G(v1);
2262 hp_count = HP_COUNT_G(v1);
2263 } else {
2264 lp_count = LP_COUNT_T5_G(v1);
2265 hp_count = HP_COUNT_T5_G(v2);
2266 }
2267
2268 if (lp_count == 0 && hp_count == 0)
2269 break;
2270 set_current_state(TASK_UNINTERRUPTIBLE);
2271 schedule_timeout(usecs_to_jiffies(usecs));
2272 } while (1);
2273}
2274
2275static void disable_txq_db(struct sge_txq *q)
2276{
2277 unsigned long flags;
2278
2279 spin_lock_irqsave(&q->db_lock, flags);
2280 q->db_disabled = 1;
2281 spin_unlock_irqrestore(&q->db_lock, flags);
2282}
2283
2284static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2285{
2286 spin_lock_irq(&q->db_lock);
2287 if (q->db_pidx_inc) {
2288
2289
2290
2291 wmb();
2292 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2293 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2294 q->db_pidx_inc = 0;
2295 }
2296 q->db_disabled = 0;
2297 spin_unlock_irq(&q->db_lock);
2298}
2299
2300static void disable_dbs(struct adapter *adap)
2301{
2302 int i;
2303
2304 for_each_ethrxq(&adap->sge, i)
2305 disable_txq_db(&adap->sge.ethtxq[i].q);
2306 for_each_iscsirxq(&adap->sge, i)
2307 disable_txq_db(&adap->sge.ofldtxq[i].q);
2308 for_each_port(adap, i)
2309 disable_txq_db(&adap->sge.ctrlq[i].q);
2310}
2311
2312static void enable_dbs(struct adapter *adap)
2313{
2314 int i;
2315
2316 for_each_ethrxq(&adap->sge, i)
2317 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2318 for_each_iscsirxq(&adap->sge, i)
2319 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
2320 for_each_port(adap, i)
2321 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2322}
2323
2324static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2325{
2326 if (adap->uld_handle[CXGB4_ULD_RDMA])
2327 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2328 cmd);
2329}
2330
2331static void process_db_full(struct work_struct *work)
2332{
2333 struct adapter *adap;
2334
2335 adap = container_of(work, struct adapter, db_full_task);
2336
2337 drain_db_fifo(adap, dbfifo_drain_delay);
2338 enable_dbs(adap);
2339 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2340 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2341 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2342 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2343 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2344 else
2345 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2346 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2347}
2348
2349static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2350{
2351 u16 hw_pidx, hw_cidx;
2352 int ret;
2353
2354 spin_lock_irq(&q->db_lock);
2355 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2356 if (ret)
2357 goto out;
2358 if (q->db_pidx != hw_pidx) {
2359 u16 delta;
2360 u32 val;
2361
2362 if (q->db_pidx >= hw_pidx)
2363 delta = q->db_pidx - hw_pidx;
2364 else
2365 delta = q->size - hw_pidx + q->db_pidx;
2366
2367 if (is_t4(adap->params.chip))
2368 val = PIDX_V(delta);
2369 else
2370 val = PIDX_T5_V(delta);
2371 wmb();
2372 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2373 QID_V(q->cntxt_id) | val);
2374 }
2375out:
2376 q->db_disabled = 0;
2377 q->db_pidx_inc = 0;
2378 spin_unlock_irq(&q->db_lock);
2379 if (ret)
2380 CH_WARN(adap, "DB drop recovery failed.\n");
2381}
2382static void recover_all_queues(struct adapter *adap)
2383{
2384 int i;
2385
2386 for_each_ethrxq(&adap->sge, i)
2387 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2388 for_each_iscsirxq(&adap->sge, i)
2389 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2390 for_each_port(adap, i)
2391 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2392}
2393
2394static void process_db_drop(struct work_struct *work)
2395{
2396 struct adapter *adap;
2397
2398 adap = container_of(work, struct adapter, db_drop_task);
2399
2400 if (is_t4(adap->params.chip)) {
2401 drain_db_fifo(adap, dbfifo_drain_delay);
2402 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2403 drain_db_fifo(adap, dbfifo_drain_delay);
2404 recover_all_queues(adap);
2405 drain_db_fifo(adap, dbfifo_drain_delay);
2406 enable_dbs(adap);
2407 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2408 } else if (is_t5(adap->params.chip)) {
2409 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2410 u16 qid = (dropped_db >> 15) & 0x1ffff;
2411 u16 pidx_inc = dropped_db & 0x1fff;
2412 u64 bar2_qoffset;
2413 unsigned int bar2_qid;
2414 int ret;
2415
2416 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2417 0, &bar2_qoffset, &bar2_qid);
2418 if (ret)
2419 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2420 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2421 else
2422 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2423 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2424
2425
2426 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2427 }
2428
2429 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2430 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2431}
2432
2433void t4_db_full(struct adapter *adap)
2434{
2435 if (is_t4(adap->params.chip)) {
2436 disable_dbs(adap);
2437 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2438 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2439 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2440 queue_work(adap->workq, &adap->db_full_task);
2441 }
2442}
2443
2444void t4_db_dropped(struct adapter *adap)
2445{
2446 if (is_t4(adap->params.chip)) {
2447 disable_dbs(adap);
2448 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2449 }
2450 queue_work(adap->workq, &adap->db_drop_task);
2451}
2452
2453static void uld_attach(struct adapter *adap, unsigned int uld)
2454{
2455 void *handle;
2456 struct cxgb4_lld_info lli;
2457 unsigned short i;
2458
2459 lli.pdev = adap->pdev;
2460 lli.pf = adap->pf;
2461 lli.l2t = adap->l2t;
2462 lli.tids = &adap->tids;
2463 lli.ports = adap->port;
2464 lli.vr = &adap->vres;
2465 lli.mtus = adap->params.mtus;
2466 if (uld == CXGB4_ULD_RDMA) {
2467 lli.rxq_ids = adap->sge.rdma_rxq;
2468 lli.ciq_ids = adap->sge.rdma_ciq;
2469 lli.nrxq = adap->sge.rdmaqs;
2470 lli.nciq = adap->sge.rdmaciqs;
2471 } else if (uld == CXGB4_ULD_ISCSI) {
2472 lli.rxq_ids = adap->sge.iscsi_rxq;
2473 lli.nrxq = adap->sge.iscsiqsets;
2474 } else if (uld == CXGB4_ULD_ISCSIT) {
2475 lli.rxq_ids = adap->sge.iscsit_rxq;
2476 lli.nrxq = adap->sge.niscsitq;
2477 }
2478 lli.ntxq = adap->sge.iscsiqsets;
2479 lli.nchan = adap->params.nports;
2480 lli.nports = adap->params.nports;
2481 lli.wr_cred = adap->params.ofldq_wr_cred;
2482 lli.adapter_type = adap->params.chip;
2483 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
2484 lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
2485 lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
2486 lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
2487 lli.iscsi_ppm = &adap->iscsi_ppm;
2488 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
2489 lli.udb_density = 1 << adap->params.sge.eq_qpp;
2490 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
2491 lli.filt_mode = adap->params.tp.vlan_pri_map;
2492
2493 for (i = 0; i < NCHAN; i++)
2494 lli.tx_modq[i] = i;
2495 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
2496 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
2497 lli.fw_vers = adap->params.fw_vers;
2498 lli.dbfifo_int_thresh = dbfifo_int_thresh;
2499 lli.sge_ingpadboundary = adap->sge.fl_align;
2500 lli.sge_egrstatuspagesize = adap->sge.stat_len;
2501 lli.sge_pktshift = adap->sge.pktshift;
2502 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
2503 lli.max_ordird_qp = adap->params.max_ordird_qp;
2504 lli.max_ird_adapter = adap->params.max_ird_adapter;
2505 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
2506 lli.nodeid = dev_to_node(adap->pdev_dev);
2507
2508 handle = ulds[uld].add(&lli);
2509 if (IS_ERR(handle)) {
2510 dev_warn(adap->pdev_dev,
2511 "could not attach to the %s driver, error %ld\n",
2512 uld_str[uld], PTR_ERR(handle));
2513 return;
2514 }
2515
2516 adap->uld_handle[uld] = handle;
2517
2518 if (!netevent_registered) {
2519 register_netevent_notifier(&cxgb4_netevent_nb);
2520 netevent_registered = true;
2521 }
2522
2523 if (adap->flags & FULL_INIT_DONE)
2524 ulds[uld].state_change(handle, CXGB4_STATE_UP);
2525}
2526
2527static void attach_ulds(struct adapter *adap)
2528{
2529 unsigned int i;
2530
2531 spin_lock(&adap_rcu_lock);
2532 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
2533 spin_unlock(&adap_rcu_lock);
2534
2535 mutex_lock(&uld_mutex);
2536 list_add_tail(&adap->list_node, &adapter_list);
2537 for (i = 0; i < CXGB4_ULD_MAX; i++)
2538 if (ulds[i].add)
2539 uld_attach(adap, i);
2540 mutex_unlock(&uld_mutex);
2541}
2542
2543static void detach_ulds(struct adapter *adap)
2544{
2545 unsigned int i;
2546
2547 mutex_lock(&uld_mutex);
2548 list_del(&adap->list_node);
2549 for (i = 0; i < CXGB4_ULD_MAX; i++)
2550 if (adap->uld_handle[i]) {
2551 ulds[i].state_change(adap->uld_handle[i],
2552 CXGB4_STATE_DETACH);
2553 adap->uld_handle[i] = NULL;
2554 }
2555 if (netevent_registered && list_empty(&adapter_list)) {
2556 unregister_netevent_notifier(&cxgb4_netevent_nb);
2557 netevent_registered = false;
2558 }
2559 mutex_unlock(&uld_mutex);
2560
2561 spin_lock(&adap_rcu_lock);
2562 list_del_rcu(&adap->rcu_node);
2563 spin_unlock(&adap_rcu_lock);
2564}
2565
2566static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2567{
2568 unsigned int i;
2569
2570 mutex_lock(&uld_mutex);
2571 for (i = 0; i < CXGB4_ULD_MAX; i++)
2572 if (adap->uld_handle[i])
2573 ulds[i].state_change(adap->uld_handle[i], new_state);
2574 mutex_unlock(&uld_mutex);
2575}
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2587{
2588 int ret = 0;
2589 struct adapter *adap;
2590
2591 if (type >= CXGB4_ULD_MAX)
2592 return -EINVAL;
2593 mutex_lock(&uld_mutex);
2594 if (ulds[type].add) {
2595 ret = -EBUSY;
2596 goto out;
2597 }
2598 ulds[type] = *p;
2599 list_for_each_entry(adap, &adapter_list, list_node)
2600 uld_attach(adap, type);
2601out: mutex_unlock(&uld_mutex);
2602 return ret;
2603}
2604EXPORT_SYMBOL(cxgb4_register_uld);
2605
2606
2607
2608
2609
2610
2611
2612int cxgb4_unregister_uld(enum cxgb4_uld type)
2613{
2614 struct adapter *adap;
2615
2616 if (type >= CXGB4_ULD_MAX)
2617 return -EINVAL;
2618 mutex_lock(&uld_mutex);
2619 list_for_each_entry(adap, &adapter_list, list_node)
2620 adap->uld_handle[type] = NULL;
2621 ulds[type].add = NULL;
2622 mutex_unlock(&uld_mutex);
2623 return 0;
2624}
2625EXPORT_SYMBOL(cxgb4_unregister_uld);
2626
2627#if IS_ENABLED(CONFIG_IPV6)
2628static int cxgb4_inet6addr_handler(struct notifier_block *this,
2629 unsigned long event, void *data)
2630{
2631 struct inet6_ifaddr *ifa = data;
2632 struct net_device *event_dev = ifa->idev->dev;
2633 const struct device *parent = NULL;
2634#if IS_ENABLED(CONFIG_BONDING)
2635 struct adapter *adap;
2636#endif
2637 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
2638 event_dev = vlan_dev_real_dev(event_dev);
2639#if IS_ENABLED(CONFIG_BONDING)
2640 if (event_dev->flags & IFF_MASTER) {
2641 list_for_each_entry(adap, &adapter_list, list_node) {
2642 switch (event) {
2643 case NETDEV_UP:
2644 cxgb4_clip_get(adap->port[0],
2645 (const u32 *)ifa, 1);
2646 break;
2647 case NETDEV_DOWN:
2648 cxgb4_clip_release(adap->port[0],
2649 (const u32 *)ifa, 1);
2650 break;
2651 default:
2652 break;
2653 }
2654 }
2655 return NOTIFY_OK;
2656 }
2657#endif
2658
2659 if (event_dev)
2660 parent = event_dev->dev.parent;
2661
2662 if (parent && parent->driver == &cxgb4_driver.driver) {
2663 switch (event) {
2664 case NETDEV_UP:
2665 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2666 break;
2667 case NETDEV_DOWN:
2668 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2669 break;
2670 default:
2671 break;
2672 }
2673 }
2674 return NOTIFY_OK;
2675}
2676
2677static bool inet6addr_registered;
2678static struct notifier_block cxgb4_inet6addr_notifier = {
2679 .notifier_call = cxgb4_inet6addr_handler
2680};
2681
2682static void update_clip(const struct adapter *adap)
2683{
2684 int i;
2685 struct net_device *dev;
2686 int ret;
2687
2688 rcu_read_lock();
2689
2690 for (i = 0; i < MAX_NPORTS; i++) {
2691 dev = adap->port[i];
2692 ret = 0;
2693
2694 if (dev)
2695 ret = cxgb4_update_root_dev_clip(dev);
2696
2697 if (ret < 0)
2698 break;
2699 }
2700 rcu_read_unlock();
2701}
2702#endif
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714static int cxgb_up(struct adapter *adap)
2715{
2716 int err;
2717
2718 err = setup_sge_queues(adap);
2719 if (err)
2720 goto out;
2721 err = setup_rss(adap);
2722 if (err)
2723 goto freeq;
2724
2725 if (adap->flags & USING_MSIX) {
2726 name_msix_vecs(adap);
2727 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2728 adap->msix_info[0].desc, adap);
2729 if (err)
2730 goto irq_err;
2731
2732 err = request_msix_queue_irqs(adap);
2733 if (err) {
2734 free_irq(adap->msix_info[0].vec, adap);
2735 goto irq_err;
2736 }
2737 } else {
2738 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2739 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2740 adap->port[0]->name, adap);
2741 if (err)
2742 goto irq_err;
2743 }
2744 enable_rx(adap);
2745 t4_sge_start(adap);
2746 t4_intr_enable(adap);
2747 adap->flags |= FULL_INIT_DONE;
2748 notify_ulds(adap, CXGB4_STATE_UP);
2749#if IS_ENABLED(CONFIG_IPV6)
2750 update_clip(adap);
2751#endif
2752
2753 INIT_LIST_HEAD(&adap->mac_hlist);
2754 out:
2755 return err;
2756 irq_err:
2757 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2758 freeq:
2759 t4_free_sge_resources(adap);
2760 goto out;
2761}
2762
2763static void cxgb_down(struct adapter *adapter)
2764{
2765 cancel_work_sync(&adapter->tid_release_task);
2766 cancel_work_sync(&adapter->db_full_task);
2767 cancel_work_sync(&adapter->db_drop_task);
2768 adapter->tid_release_task_busy = false;
2769 adapter->tid_release_head = NULL;
2770
2771 t4_sge_stop(adapter);
2772 t4_free_sge_resources(adapter);
2773 adapter->flags &= ~FULL_INIT_DONE;
2774}
2775
2776
2777
2778
2779static int cxgb_open(struct net_device *dev)
2780{
2781 int err;
2782 struct port_info *pi = netdev_priv(dev);
2783 struct adapter *adapter = pi->adapter;
2784
2785 netif_carrier_off(dev);
2786
2787 if (!(adapter->flags & FULL_INIT_DONE)) {
2788 err = cxgb_up(adapter);
2789 if (err < 0)
2790 return err;
2791 }
2792
2793 err = link_start(dev);
2794 if (!err)
2795 netif_tx_start_all_queues(dev);
2796 return err;
2797}
2798
2799static int cxgb_close(struct net_device *dev)
2800{
2801 struct port_info *pi = netdev_priv(dev);
2802 struct adapter *adapter = pi->adapter;
2803
2804 netif_tx_stop_all_queues(dev);
2805 netif_carrier_off(dev);
2806 return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
2807}
2808
2809
2810
2811static int writable_filter(struct filter_entry *f)
2812{
2813 if (f->locked)
2814 return -EPERM;
2815 if (f->pending)
2816 return -EBUSY;
2817
2818 return 0;
2819}
2820
2821
2822
2823
2824
2825static int delete_filter(struct adapter *adapter, unsigned int fidx)
2826{
2827 struct filter_entry *f;
2828 int ret;
2829
2830 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
2831 return -EINVAL;
2832
2833 f = &adapter->tids.ftid_tab[fidx];
2834 ret = writable_filter(f);
2835 if (ret)
2836 return ret;
2837 if (f->valid)
2838 return del_filter_wr(adapter, fidx);
2839
2840 return 0;
2841}
2842
2843int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2844 __be32 sip, __be16 sport, __be16 vlan,
2845 unsigned int queue, unsigned char port, unsigned char mask)
2846{
2847 int ret;
2848 struct filter_entry *f;
2849 struct adapter *adap;
2850 int i;
2851 u8 *val;
2852
2853 adap = netdev2adap(dev);
2854
2855
2856 stid -= adap->tids.sftid_base;
2857 stid += adap->tids.nftids;
2858
2859
2860
2861 f = &adap->tids.ftid_tab[stid];
2862 ret = writable_filter(f);
2863 if (ret)
2864 return ret;
2865
2866
2867
2868
2869 if (f->valid)
2870 clear_filter(adap, f);
2871
2872
2873 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2874 f->fs.val.lport = cpu_to_be16(sport);
2875 f->fs.mask.lport = ~0;
2876 val = (u8 *)&sip;
2877 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2878 for (i = 0; i < 4; i++) {
2879 f->fs.val.lip[i] = val[i];
2880 f->fs.mask.lip[i] = ~0;
2881 }
2882 if (adap->params.tp.vlan_pri_map & PORT_F) {
2883 f->fs.val.iport = port;
2884 f->fs.mask.iport = mask;
2885 }
2886 }
2887
2888 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2889 f->fs.val.proto = IPPROTO_TCP;
2890 f->fs.mask.proto = ~0;
2891 }
2892
2893 f->fs.dirsteer = 1;
2894 f->fs.iq = queue;
2895
2896 f->locked = 1;
2897 f->fs.rpttid = 1;
2898
2899 ret = set_filter_wr(adap, stid);
2900 if (ret) {
2901 clear_filter(adap, f);
2902 return ret;
2903 }
2904
2905 return 0;
2906}
2907EXPORT_SYMBOL(cxgb4_create_server_filter);
2908
2909int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2910 unsigned int queue, bool ipv6)
2911{
2912 int ret;
2913 struct filter_entry *f;
2914 struct adapter *adap;
2915
2916 adap = netdev2adap(dev);
2917
2918
2919 stid -= adap->tids.sftid_base;
2920 stid += adap->tids.nftids;
2921
2922 f = &adap->tids.ftid_tab[stid];
2923
2924 f->locked = 0;
2925
2926 ret = delete_filter(adap, stid);
2927 if (ret)
2928 return ret;
2929
2930 return 0;
2931}
2932EXPORT_SYMBOL(cxgb4_remove_server_filter);
2933
2934static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2935 struct rtnl_link_stats64 *ns)
2936{
2937 struct port_stats stats;
2938 struct port_info *p = netdev_priv(dev);
2939 struct adapter *adapter = p->adapter;
2940
2941
2942
2943
2944
2945 spin_lock(&adapter->stats_lock);
2946 if (!netif_device_present(dev)) {
2947 spin_unlock(&adapter->stats_lock);
2948 return ns;
2949 }
2950 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2951 &p->stats_base);
2952 spin_unlock(&adapter->stats_lock);
2953
2954 ns->tx_bytes = stats.tx_octets;
2955 ns->tx_packets = stats.tx_frames;
2956 ns->rx_bytes = stats.rx_octets;
2957 ns->rx_packets = stats.rx_frames;
2958 ns->multicast = stats.rx_mcast_frames;
2959
2960
2961 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2962 stats.rx_runt;
2963 ns->rx_over_errors = 0;
2964 ns->rx_crc_errors = stats.rx_fcs_err;
2965 ns->rx_frame_errors = stats.rx_symbol_err;
2966 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2967 stats.rx_ovflow2 + stats.rx_ovflow3 +
2968 stats.rx_trunc0 + stats.rx_trunc1 +
2969 stats.rx_trunc2 + stats.rx_trunc3;
2970 ns->rx_missed_errors = 0;
2971
2972
2973 ns->tx_aborted_errors = 0;
2974 ns->tx_carrier_errors = 0;
2975 ns->tx_fifo_errors = 0;
2976 ns->tx_heartbeat_errors = 0;
2977 ns->tx_window_errors = 0;
2978
2979 ns->tx_errors = stats.tx_error_frames;
2980 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2981 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2982 return ns;
2983}
2984
2985static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2986{
2987 unsigned int mbox;
2988 int ret = 0, prtad, devad;
2989 struct port_info *pi = netdev_priv(dev);
2990 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2991
2992 switch (cmd) {
2993 case SIOCGMIIPHY:
2994 if (pi->mdio_addr < 0)
2995 return -EOPNOTSUPP;
2996 data->phy_id = pi->mdio_addr;
2997 break;
2998 case SIOCGMIIREG:
2999 case SIOCSMIIREG:
3000 if (mdio_phy_id_is_c45(data->phy_id)) {
3001 prtad = mdio_phy_id_prtad(data->phy_id);
3002 devad = mdio_phy_id_devad(data->phy_id);
3003 } else if (data->phy_id < 32) {
3004 prtad = data->phy_id;
3005 devad = 0;
3006 data->reg_num &= 0x1f;
3007 } else
3008 return -EINVAL;
3009
3010 mbox = pi->adapter->pf;
3011 if (cmd == SIOCGMIIREG)
3012 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3013 data->reg_num, &data->val_out);
3014 else
3015 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3016 data->reg_num, data->val_in);
3017 break;
3018 case SIOCGHWTSTAMP:
3019 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3020 sizeof(pi->tstamp_config)) ?
3021 -EFAULT : 0;
3022 case SIOCSHWTSTAMP:
3023 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
3024 sizeof(pi->tstamp_config)))
3025 return -EFAULT;
3026
3027 switch (pi->tstamp_config.rx_filter) {
3028 case HWTSTAMP_FILTER_NONE:
3029 pi->rxtstamp = false;
3030 break;
3031 case HWTSTAMP_FILTER_ALL:
3032 pi->rxtstamp = true;
3033 break;
3034 default:
3035 pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3036 return -ERANGE;
3037 }
3038
3039 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3040 sizeof(pi->tstamp_config)) ?
3041 -EFAULT : 0;
3042 default:
3043 return -EOPNOTSUPP;
3044 }
3045 return ret;
3046}
3047
3048static void cxgb_set_rxmode(struct net_device *dev)
3049{
3050
3051 set_rxmode(dev, -1, false);
3052}
3053
3054static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3055{
3056 int ret;
3057 struct port_info *pi = netdev_priv(dev);
3058
3059 if (new_mtu < 81 || new_mtu > MAX_MTU)
3060 return -EINVAL;
3061 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
3062 -1, -1, -1, true);
3063 if (!ret)
3064 dev->mtu = new_mtu;
3065 return ret;
3066}
3067
3068static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3069{
3070 int ret;
3071 struct sockaddr *addr = p;
3072 struct port_info *pi = netdev_priv(dev);
3073
3074 if (!is_valid_ether_addr(addr->sa_data))
3075 return -EADDRNOTAVAIL;
3076
3077 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
3078 pi->xact_addr_filt, addr->sa_data, true, true);
3079 if (ret < 0)
3080 return ret;
3081
3082 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3083 pi->xact_addr_filt = ret;
3084 return 0;
3085}
3086
3087#ifdef CONFIG_NET_POLL_CONTROLLER
3088static void cxgb_netpoll(struct net_device *dev)
3089{
3090 struct port_info *pi = netdev_priv(dev);
3091 struct adapter *adap = pi->adapter;
3092
3093 if (adap->flags & USING_MSIX) {
3094 int i;
3095 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3096
3097 for (i = pi->nqsets; i; i--, rx++)
3098 t4_sge_intr_msix(0, &rx->rspq);
3099 } else
3100 t4_intr_handler(adap)(0, adap);
3101}
3102#endif
3103
3104static const struct net_device_ops cxgb4_netdev_ops = {
3105 .ndo_open = cxgb_open,
3106 .ndo_stop = cxgb_close,
3107 .ndo_start_xmit = t4_eth_xmit,
3108 .ndo_select_queue = cxgb_select_queue,
3109 .ndo_get_stats64 = cxgb_get_stats,
3110 .ndo_set_rx_mode = cxgb_set_rxmode,
3111 .ndo_set_mac_address = cxgb_set_mac_addr,
3112 .ndo_set_features = cxgb_set_features,
3113 .ndo_validate_addr = eth_validate_addr,
3114 .ndo_do_ioctl = cxgb_ioctl,
3115 .ndo_change_mtu = cxgb_change_mtu,
3116#ifdef CONFIG_NET_POLL_CONTROLLER
3117 .ndo_poll_controller = cxgb_netpoll,
3118#endif
3119#ifdef CONFIG_CHELSIO_T4_FCOE
3120 .ndo_fcoe_enable = cxgb_fcoe_enable,
3121 .ndo_fcoe_disable = cxgb_fcoe_disable,
3122#endif
3123#ifdef CONFIG_NET_RX_BUSY_POLL
3124 .ndo_busy_poll = cxgb_busy_poll,
3125#endif
3126
3127};
3128
3129void t4_fatal_err(struct adapter *adap)
3130{
3131 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
3132 t4_intr_disable(adap);
3133 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3134}
3135
3136static void setup_memwin(struct adapter *adap)
3137{
3138 u32 nic_win_base = t4_get_util_window(adap);
3139
3140 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3141}
3142
3143static void setup_memwin_rdma(struct adapter *adap)
3144{
3145 if (adap->vres.ocq.size) {
3146 u32 start;
3147 unsigned int sz_kb;
3148
3149 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3150 start &= PCI_BASE_ADDRESS_MEM_MASK;
3151 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3152 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3153 t4_write_reg(adap,
3154 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3155 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3156 t4_write_reg(adap,
3157 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3158 adap->vres.ocq.start);
3159 t4_read_reg(adap,
3160 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3161 }
3162}
3163
3164static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3165{
3166 u32 v;
3167 int ret;
3168
3169
3170 memset(c, 0, sizeof(*c));
3171 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3172 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3173 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3174 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
3175 if (ret < 0)
3176 return ret;
3177
3178 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3179 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3180 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
3181 if (ret < 0)
3182 return ret;
3183
3184 ret = t4_config_glbl_rss(adap, adap->pf,
3185 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3186 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3187 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3188 if (ret < 0)
3189 return ret;
3190
3191 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
3192 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3193 FW_CMD_CAP_PF);
3194 if (ret < 0)
3195 return ret;
3196
3197 t4_sge_init(adap);
3198
3199
3200 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3201 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3202 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3203 v = t4_read_reg(adap, TP_PIO_DATA_A);
3204 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3205
3206
3207 adap->params.tp.tx_modq_map = 0xE4;
3208 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3209 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3210
3211
3212 v = 0x84218421;
3213 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3214 &v, 1, TP_TX_SCHED_HDR_A);
3215 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3216 &v, 1, TP_TX_SCHED_FIFO_A);
3217 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3218 &v, 1, TP_TX_SCHED_PCMD_A);
3219
3220#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16
3221 if (is_offload(adap)) {
3222 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3223 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3224 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3225 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3226 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3227 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3228 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3229 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3230 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3231 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3232 }
3233
3234
3235 return t4_early_init(adap, adap->pf);
3236}
3237
3238
3239
3240
3241#define MAX_ATIDS 8192U
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259static int adap_init0_tweaks(struct adapter *adapter)
3260{
3261
3262
3263
3264
3265
3266 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3267
3268
3269
3270
3271 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3272 dev_err(&adapter->pdev->dev,
3273 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3274 rx_dma_offset);
3275 rx_dma_offset = 2;
3276 }
3277 t4_set_reg_field(adapter, SGE_CONTROL_A,
3278 PKTSHIFT_V(PKTSHIFT_M),
3279 PKTSHIFT_V(rx_dma_offset));
3280
3281
3282
3283
3284
3285 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3286 CSUM_HAS_PSEUDO_HDR_F, 0);
3287
3288 return 0;
3289}
3290
3291
3292
3293
3294
3295static int phy_aq1202_version(const u8 *phy_fw_data,
3296 size_t phy_fw_size)
3297{
3298 int offset;
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3310 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3311 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3312
3313 offset = le24(phy_fw_data + 0x8) << 12;
3314 offset = le24(phy_fw_data + offset + 0xa);
3315 return be16(phy_fw_data + offset + 0x27e);
3316
3317 #undef be16
3318 #undef le16
3319 #undef le24
3320}
3321
3322static struct info_10gbt_phy_fw {
3323 unsigned int phy_fw_id;
3324 char *phy_fw_file;
3325 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3326 int phy_flash;
3327} phy_info_array[] = {
3328 {
3329 PHY_AQ1202_DEVICEID,
3330 PHY_AQ1202_FIRMWARE,
3331 phy_aq1202_version,
3332 1,
3333 },
3334 {
3335 PHY_BCM84834_DEVICEID,
3336 PHY_BCM84834_FIRMWARE,
3337 NULL,
3338 0,
3339 },
3340 { 0, NULL, NULL },
3341};
3342
3343static struct info_10gbt_phy_fw *find_phy_info(int devid)
3344{
3345 int i;
3346
3347 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3348 if (phy_info_array[i].phy_fw_id == devid)
3349 return &phy_info_array[i];
3350 }
3351 return NULL;
3352}
3353
3354
3355
3356
3357
3358
3359static int adap_init0_phy(struct adapter *adap)
3360{
3361 const struct firmware *phyf;
3362 int ret;
3363 struct info_10gbt_phy_fw *phy_info;
3364
3365
3366
3367 phy_info = find_phy_info(adap->pdev->device);
3368 if (!phy_info) {
3369 dev_warn(adap->pdev_dev,
3370 "No PHY Firmware file found for this PHY\n");
3371 return -EOPNOTSUPP;
3372 }
3373
3374
3375
3376
3377
3378
3379 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3380 adap->pdev_dev);
3381 if (ret < 0) {
3382
3383
3384
3385
3386
3387
3388 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3389 "/lib/firmware/%s, error %d\n",
3390 phy_info->phy_fw_file, -ret);
3391 if (phy_info->phy_flash) {
3392 int cur_phy_fw_ver = 0;
3393
3394 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3395 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3396 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3397 ret = 0;
3398 }
3399
3400 return ret;
3401 }
3402
3403
3404
3405 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3406 phy_info->phy_fw_version,
3407 (u8 *)phyf->data, phyf->size);
3408 if (ret < 0)
3409 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3410 -ret);
3411 else if (ret > 0) {
3412 int new_phy_fw_ver = 0;
3413
3414 if (phy_info->phy_fw_version)
3415 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3416 phyf->size);
3417 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3418 "Firmware /lib/firmware/%s, version %#x\n",
3419 phy_info->phy_fw_file, new_phy_fw_ver);
3420 }
3421
3422 release_firmware(phyf);
3423
3424 return ret;
3425}
3426
3427
3428
3429
3430static int adap_init0_config(struct adapter *adapter, int reset)
3431{
3432 struct fw_caps_config_cmd caps_cmd;
3433 const struct firmware *cf;
3434 unsigned long mtype = 0, maddr = 0;
3435 u32 finiver, finicsum, cfcsum;
3436 int ret;
3437 int config_issued = 0;
3438 char *fw_config_file, fw_config_file_path[256];
3439 char *config_name = NULL;
3440
3441
3442
3443
3444 if (reset) {
3445 ret = t4_fw_reset(adapter, adapter->mbox,
3446 PIORSTMODE_F | PIORST_F);
3447 if (ret < 0)
3448 goto bye;
3449 }
3450
3451
3452
3453
3454
3455
3456 if (is_10gbt_device(adapter->pdev->device)) {
3457 ret = adap_init0_phy(adapter);
3458 if (ret < 0)
3459 goto bye;
3460 }
3461
3462
3463
3464
3465
3466 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3467 case CHELSIO_T4:
3468 fw_config_file = FW4_CFNAME;
3469 break;
3470 case CHELSIO_T5:
3471 fw_config_file = FW5_CFNAME;
3472 break;
3473 case CHELSIO_T6:
3474 fw_config_file = FW6_CFNAME;
3475 break;
3476 default:
3477 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3478 adapter->pdev->device);
3479 ret = -EINVAL;
3480 goto bye;
3481 }
3482
3483 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3484 if (ret < 0) {
3485 config_name = "On FLASH";
3486 mtype = FW_MEMTYPE_CF_FLASH;
3487 maddr = t4_flash_cfg_addr(adapter);
3488 } else {
3489 u32 params[7], val[7];
3490
3491 sprintf(fw_config_file_path,
3492 "/lib/firmware/%s", fw_config_file);
3493 config_name = fw_config_file_path;
3494
3495 if (cf->size >= FLASH_CFG_MAX_SIZE)
3496 ret = -ENOMEM;
3497 else {
3498 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3499 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3500 ret = t4_query_params(adapter, adapter->mbox,
3501 adapter->pf, 0, 1, params, val);
3502 if (ret == 0) {
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513 size_t resid = cf->size & 0x3;
3514 size_t size = cf->size & ~0x3;
3515 __be32 *data = (__be32 *)cf->data;
3516
3517 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3518 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3519
3520 spin_lock(&adapter->win0_lock);
3521 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3522 size, data, T4_MEMORY_WRITE);
3523 if (ret == 0 && resid != 0) {
3524 union {
3525 __be32 word;
3526 char buf[4];
3527 } last;
3528 int i;
3529
3530 last.word = data[size >> 2];
3531 for (i = resid; i < 4; i++)
3532 last.buf[i] = 0;
3533 ret = t4_memory_rw(adapter, 0, mtype,
3534 maddr + size,
3535 4, &last.word,
3536 T4_MEMORY_WRITE);
3537 }
3538 spin_unlock(&adapter->win0_lock);
3539 }
3540 }
3541
3542 release_firmware(cf);
3543 if (ret)
3544 goto bye;
3545 }
3546
3547
3548
3549
3550
3551
3552
3553 memset(&caps_cmd, 0, sizeof(caps_cmd));
3554 caps_cmd.op_to_write =
3555 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3556 FW_CMD_REQUEST_F |
3557 FW_CMD_READ_F);
3558 caps_cmd.cfvalid_to_len16 =
3559 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3560 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3561 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3562 FW_LEN16(caps_cmd));
3563 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3564 &caps_cmd);
3565
3566
3567
3568
3569
3570
3571
3572 if (ret == -ENOENT) {
3573 memset(&caps_cmd, 0, sizeof(caps_cmd));
3574 caps_cmd.op_to_write =
3575 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3576 FW_CMD_REQUEST_F |
3577 FW_CMD_READ_F);
3578 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3579 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3580 sizeof(caps_cmd), &caps_cmd);
3581 config_name = "Firmware Default";
3582 }
3583
3584 config_issued = 1;
3585 if (ret < 0)
3586 goto bye;
3587
3588 finiver = ntohl(caps_cmd.finiver);
3589 finicsum = ntohl(caps_cmd.finicsum);
3590 cfcsum = ntohl(caps_cmd.cfcsum);
3591 if (finicsum != cfcsum)
3592 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3593 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3594 finicsum, cfcsum);
3595
3596
3597
3598
3599 caps_cmd.op_to_write =
3600 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3601 FW_CMD_REQUEST_F |
3602 FW_CMD_WRITE_F);
3603 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3604 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3605 NULL);
3606 if (ret < 0)
3607 goto bye;
3608
3609
3610
3611
3612
3613 ret = adap_init0_tweaks(adapter);
3614 if (ret < 0)
3615 goto bye;
3616
3617
3618
3619
3620
3621 ret = t4_fw_initialize(adapter, adapter->mbox);
3622 if (ret < 0)
3623 goto bye;
3624
3625
3626
3627
3628 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3629 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3630 config_name, finiver, cfcsum);
3631 return 0;
3632
3633
3634
3635
3636
3637
3638bye:
3639 if (config_issued && ret != -ENOENT)
3640 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
3641 config_name, -ret);
3642 return ret;
3643}
3644
3645static struct fw_info fw_info_array[] = {
3646 {
3647 .chip = CHELSIO_T4,
3648 .fs_name = FW4_CFNAME,
3649 .fw_mod_name = FW4_FNAME,
3650 .fw_hdr = {
3651 .chip = FW_HDR_CHIP_T4,
3652 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
3653 .intfver_nic = FW_INTFVER(T4, NIC),
3654 .intfver_vnic = FW_INTFVER(T4, VNIC),
3655 .intfver_ri = FW_INTFVER(T4, RI),
3656 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3657 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3658 },
3659 }, {
3660 .chip = CHELSIO_T5,
3661 .fs_name = FW5_CFNAME,
3662 .fw_mod_name = FW5_FNAME,
3663 .fw_hdr = {
3664 .chip = FW_HDR_CHIP_T5,
3665 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
3666 .intfver_nic = FW_INTFVER(T5, NIC),
3667 .intfver_vnic = FW_INTFVER(T5, VNIC),
3668 .intfver_ri = FW_INTFVER(T5, RI),
3669 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3670 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3671 },
3672 }, {
3673 .chip = CHELSIO_T6,
3674 .fs_name = FW6_CFNAME,
3675 .fw_mod_name = FW6_FNAME,
3676 .fw_hdr = {
3677 .chip = FW_HDR_CHIP_T6,
3678 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
3679 .intfver_nic = FW_INTFVER(T6, NIC),
3680 .intfver_vnic = FW_INTFVER(T6, VNIC),
3681 .intfver_ofld = FW_INTFVER(T6, OFLD),
3682 .intfver_ri = FW_INTFVER(T6, RI),
3683 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3684 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3685 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3686 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3687 },
3688 }
3689
3690};
3691
3692static struct fw_info *find_fw_info(int chip)
3693{
3694 int i;
3695
3696 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
3697 if (fw_info_array[i].chip == chip)
3698 return &fw_info_array[i];
3699 }
3700 return NULL;
3701}
3702
3703
3704
3705
3706static int adap_init0(struct adapter *adap)
3707{
3708 int ret;
3709 u32 v, port_vec;
3710 enum dev_state state;
3711 u32 params[7], val[7];
3712 struct fw_caps_config_cmd caps_cmd;
3713 int reset = 1;
3714
3715
3716
3717
3718 ret = t4_init_devlog_params(adap);
3719 if (ret < 0)
3720 return ret;
3721
3722
3723 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
3724 if (ret < 0) {
3725 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3726 ret);
3727 return ret;
3728 }
3729 if (ret == adap->mbox)
3730 adap->flags |= MASTER_PF;
3731
3732
3733
3734
3735
3736
3737
3738
3739 t4_get_fw_version(adap, &adap->params.fw_vers);
3740 t4_get_tp_version(adap, &adap->params.tp_vers);
3741 ret = t4_check_fw_version(adap);
3742
3743 if (ret)
3744 state = DEV_STATE_UNINIT;
3745 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3746 struct fw_info *fw_info;
3747 struct fw_hdr *card_fw;
3748 const struct firmware *fw;
3749 const u8 *fw_data = NULL;
3750 unsigned int fw_size = 0;
3751
3752
3753
3754
3755 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
3756 if (fw_info == NULL) {
3757 dev_err(adap->pdev_dev,
3758 "unable to get firmware info for chip %d.\n",
3759 CHELSIO_CHIP_VERSION(adap->params.chip));
3760 return -EINVAL;
3761 }
3762
3763
3764
3765
3766 card_fw = t4_alloc_mem(sizeof(*card_fw));
3767
3768
3769 ret = request_firmware(&fw, fw_info->fw_mod_name,
3770 adap->pdev_dev);
3771 if (ret < 0) {
3772 dev_err(adap->pdev_dev,
3773 "unable to load firmware image %s, error %d\n",
3774 fw_info->fw_mod_name, ret);
3775 } else {
3776 fw_data = fw->data;
3777 fw_size = fw->size;
3778 }
3779
3780
3781 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
3782 state, &reset);
3783
3784
3785 release_firmware(fw);
3786 t4_free_mem(card_fw);
3787
3788 if (ret < 0)
3789 goto bye;
3790 }
3791
3792
3793
3794
3795
3796
3797
3798
3799 ret = t4_get_vpd_params(adap, &adap->params.vpd);
3800 if (ret < 0)
3801 goto bye;
3802
3803
3804
3805
3806
3807
3808 v =
3809 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3810 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
3811 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
3812 if (ret < 0)
3813 goto bye;
3814
3815 adap->params.nports = hweight32(port_vec);
3816 adap->params.portvec = port_vec;
3817
3818
3819
3820
3821 if (state == DEV_STATE_INIT) {
3822 dev_info(adap->pdev_dev, "Coming up as %s: "\
3823 "Adapter already initialized\n",
3824 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3825 } else {
3826 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3827 "Initializing adapter\n");
3828
3829
3830
3831
3832 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3833 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3834 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3835 params, val);
3836
3837
3838
3839
3840 if (ret < 0) {
3841 dev_err(adap->pdev_dev, "firmware doesn't support "
3842 "Firmware Configuration Files\n");
3843 goto bye;
3844 }
3845
3846
3847
3848
3849
3850 ret = adap_init0_config(adap, reset);
3851 if (ret == -ENOENT) {
3852 dev_err(adap->pdev_dev, "no Configuration File "
3853 "present on adapter.\n");
3854 goto bye;
3855 }
3856 if (ret < 0) {
3857 dev_err(adap->pdev_dev, "could not initialize "
3858 "adapter, error %d\n", -ret);
3859 goto bye;
3860 }
3861 }
3862
3863
3864
3865
3866
3867 ret = t4_sge_init(adap);
3868 if (ret < 0)
3869 goto bye;
3870
3871 if (is_bypass_device(adap->pdev->device))
3872 adap->params.bypass = 1;
3873
3874
3875
3876
3877#define FW_PARAM_DEV(param) \
3878 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3879 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3880
3881#define FW_PARAM_PFVF(param) \
3882 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3883 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3884 FW_PARAMS_PARAM_Y_V(0) | \
3885 FW_PARAMS_PARAM_Z_V(0)
3886
3887 params[0] = FW_PARAM_PFVF(EQ_START);
3888 params[1] = FW_PARAM_PFVF(L2T_START);
3889 params[2] = FW_PARAM_PFVF(L2T_END);
3890 params[3] = FW_PARAM_PFVF(FILTER_START);
3891 params[4] = FW_PARAM_PFVF(FILTER_END);
3892 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3893 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
3894 if (ret < 0)
3895 goto bye;
3896 adap->sge.egr_start = val[0];
3897 adap->l2t_start = val[1];
3898 adap->l2t_end = val[2];
3899 adap->tids.ftid_base = val[3];
3900 adap->tids.nftids = val[4] - val[3] + 1;
3901 adap->sge.ingr_start = val[5];
3902
3903
3904
3905
3906
3907
3908
3909 params[0] = FW_PARAM_PFVF(EQ_END);
3910 params[1] = FW_PARAM_PFVF(IQFLINT_END);
3911 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3912 if (ret < 0)
3913 goto bye;
3914 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3915 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3916
3917 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3918 sizeof(*adap->sge.egr_map), GFP_KERNEL);
3919 if (!adap->sge.egr_map) {
3920 ret = -ENOMEM;
3921 goto bye;
3922 }
3923
3924 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3925 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3926 if (!adap->sge.ingr_map) {
3927 ret = -ENOMEM;
3928 goto bye;
3929 }
3930
3931
3932
3933
3934 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3935 sizeof(long), GFP_KERNEL);
3936 if (!adap->sge.starving_fl) {
3937 ret = -ENOMEM;
3938 goto bye;
3939 }
3940
3941 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3942 sizeof(long), GFP_KERNEL);
3943 if (!adap->sge.txq_maperr) {
3944 ret = -ENOMEM;
3945 goto bye;
3946 }
3947
3948#ifdef CONFIG_DEBUG_FS
3949 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3950 sizeof(long), GFP_KERNEL);
3951 if (!adap->sge.blocked_fl) {
3952 ret = -ENOMEM;
3953 goto bye;
3954 }
3955#endif
3956
3957 params[0] = FW_PARAM_PFVF(CLIP_START);
3958 params[1] = FW_PARAM_PFVF(CLIP_END);
3959 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3960 if (ret < 0)
3961 goto bye;
3962 adap->clipt_start = val[0];
3963 adap->clipt_end = val[1];
3964
3965
3966 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3967 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3968 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3969
3970
3971
3972 if ((val[0] != val[1]) && (ret >= 0)) {
3973 adap->flags |= FW_OFLD_CONN;
3974 adap->tids.aftid_base = val[0];
3975 adap->tids.aftid_end = val[1];
3976 }
3977
3978
3979
3980
3981
3982
3983 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3984 val[0] = 1;
3985 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
3986
3987
3988
3989
3990
3991
3992
3993 if (is_t4(adap->params.chip)) {
3994 adap->params.ulptx_memwrite_dsgl = false;
3995 } else {
3996 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
3997 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
3998 1, params, val);
3999 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
4000 }
4001
4002
4003
4004
4005
4006 memset(&caps_cmd, 0, sizeof(caps_cmd));
4007 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4008 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4009 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4010 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4011 &caps_cmd);
4012 if (ret < 0)
4013 goto bye;
4014
4015 if (caps_cmd.ofldcaps) {
4016
4017 params[0] = FW_PARAM_DEV(NTID);
4018 params[1] = FW_PARAM_PFVF(SERVER_START);
4019 params[2] = FW_PARAM_PFVF(SERVER_END);
4020 params[3] = FW_PARAM_PFVF(TDDP_START);
4021 params[4] = FW_PARAM_PFVF(TDDP_END);
4022 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4023 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4024 params, val);
4025 if (ret < 0)
4026 goto bye;
4027 adap->tids.ntids = val[0];
4028 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4029 adap->tids.stid_base = val[1];
4030 adap->tids.nstids = val[2] - val[1] + 1;
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4041 adap->tids.sftid_base = adap->tids.ftid_base +
4042 DIV_ROUND_UP(adap->tids.nftids, 3);
4043 adap->tids.nsftids = adap->tids.nftids -
4044 DIV_ROUND_UP(adap->tids.nftids, 3);
4045 adap->tids.nftids = adap->tids.sftid_base -
4046 adap->tids.ftid_base;
4047 }
4048 adap->vres.ddp.start = val[3];
4049 adap->vres.ddp.size = val[4] - val[3] + 1;
4050 adap->params.ofldq_wr_cred = val[5];
4051
4052 adap->params.offload = 1;
4053 }
4054 if (caps_cmd.rdmacaps) {
4055 params[0] = FW_PARAM_PFVF(STAG_START);
4056 params[1] = FW_PARAM_PFVF(STAG_END);
4057 params[2] = FW_PARAM_PFVF(RQ_START);
4058 params[3] = FW_PARAM_PFVF(RQ_END);
4059 params[4] = FW_PARAM_PFVF(PBL_START);
4060 params[5] = FW_PARAM_PFVF(PBL_END);
4061 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4062 params, val);
4063 if (ret < 0)
4064 goto bye;
4065 adap->vres.stag.start = val[0];
4066 adap->vres.stag.size = val[1] - val[0] + 1;
4067 adap->vres.rq.start = val[2];
4068 adap->vres.rq.size = val[3] - val[2] + 1;
4069 adap->vres.pbl.start = val[4];
4070 adap->vres.pbl.size = val[5] - val[4] + 1;
4071
4072 params[0] = FW_PARAM_PFVF(SQRQ_START);
4073 params[1] = FW_PARAM_PFVF(SQRQ_END);
4074 params[2] = FW_PARAM_PFVF(CQ_START);
4075 params[3] = FW_PARAM_PFVF(CQ_END);
4076 params[4] = FW_PARAM_PFVF(OCQ_START);
4077 params[5] = FW_PARAM_PFVF(OCQ_END);
4078 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
4079 val);
4080 if (ret < 0)
4081 goto bye;
4082 adap->vres.qp.start = val[0];
4083 adap->vres.qp.size = val[1] - val[0] + 1;
4084 adap->vres.cq.start = val[2];
4085 adap->vres.cq.size = val[3] - val[2] + 1;
4086 adap->vres.ocq.start = val[4];
4087 adap->vres.ocq.size = val[5] - val[4] + 1;
4088
4089 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4090 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
4091 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
4092 val);
4093 if (ret < 0) {
4094 adap->params.max_ordird_qp = 8;
4095 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4096 ret = 0;
4097 } else {
4098 adap->params.max_ordird_qp = val[0];
4099 adap->params.max_ird_adapter = val[1];
4100 }
4101 dev_info(adap->pdev_dev,
4102 "max_ordird_qp %d max_ird_adapter %d\n",
4103 adap->params.max_ordird_qp,
4104 adap->params.max_ird_adapter);
4105 }
4106 if (caps_cmd.iscsicaps) {
4107 params[0] = FW_PARAM_PFVF(ISCSI_START);
4108 params[1] = FW_PARAM_PFVF(ISCSI_END);
4109 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4110 params, val);
4111 if (ret < 0)
4112 goto bye;
4113 adap->vres.iscsi.start = val[0];
4114 adap->vres.iscsi.size = val[1] - val[0] + 1;
4115 }
4116#undef FW_PARAM_PFVF
4117#undef FW_PARAM_DEV
4118
4119
4120
4121
4122
4123
4124 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4125 if (state != DEV_STATE_INIT) {
4126 int i;
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145 for (i = 0; i < NMTUS; i++)
4146 if (adap->params.mtus[i] == 1492) {
4147 adap->params.mtus[i] = 1488;
4148 break;
4149 }
4150
4151 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4152 adap->params.b_wnd);
4153 }
4154 t4_init_sge_params(adap);
4155 adap->flags |= FW_OK;
4156 t4_init_tp_params(adap);
4157 return 0;
4158
4159
4160
4161
4162
4163
4164bye:
4165 kfree(adap->sge.egr_map);
4166 kfree(adap->sge.ingr_map);
4167 kfree(adap->sge.starving_fl);
4168 kfree(adap->sge.txq_maperr);
4169#ifdef CONFIG_DEBUG_FS
4170 kfree(adap->sge.blocked_fl);
4171#endif
4172 if (ret != -ETIMEDOUT && ret != -EIO)
4173 t4_fw_bye(adap, adap->mbox);
4174 return ret;
4175}
4176
4177
4178
4179static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4180 pci_channel_state_t state)
4181{
4182 int i;
4183 struct adapter *adap = pci_get_drvdata(pdev);
4184
4185 if (!adap)
4186 goto out;
4187
4188 rtnl_lock();
4189 adap->flags &= ~FW_OK;
4190 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4191 spin_lock(&adap->stats_lock);
4192 for_each_port(adap, i) {
4193 struct net_device *dev = adap->port[i];
4194
4195 netif_device_detach(dev);
4196 netif_carrier_off(dev);
4197 }
4198 spin_unlock(&adap->stats_lock);
4199 disable_interrupts(adap);
4200 if (adap->flags & FULL_INIT_DONE)
4201 cxgb_down(adap);
4202 rtnl_unlock();
4203 if ((adap->flags & DEV_ENABLED)) {
4204 pci_disable_device(pdev);
4205 adap->flags &= ~DEV_ENABLED;
4206 }
4207out: return state == pci_channel_io_perm_failure ?
4208 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4209}
4210
4211static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4212{
4213 int i, ret;
4214 struct fw_caps_config_cmd c;
4215 struct adapter *adap = pci_get_drvdata(pdev);
4216
4217 if (!adap) {
4218 pci_restore_state(pdev);
4219 pci_save_state(pdev);
4220 return PCI_ERS_RESULT_RECOVERED;
4221 }
4222
4223 if (!(adap->flags & DEV_ENABLED)) {
4224 if (pci_enable_device(pdev)) {
4225 dev_err(&pdev->dev, "Cannot reenable PCI "
4226 "device after reset\n");
4227 return PCI_ERS_RESULT_DISCONNECT;
4228 }
4229 adap->flags |= DEV_ENABLED;
4230 }
4231
4232 pci_set_master(pdev);
4233 pci_restore_state(pdev);
4234 pci_save_state(pdev);
4235 pci_cleanup_aer_uncorrect_error_status(pdev);
4236
4237 if (t4_wait_dev_ready(adap->regs) < 0)
4238 return PCI_ERS_RESULT_DISCONNECT;
4239 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
4240 return PCI_ERS_RESULT_DISCONNECT;
4241 adap->flags |= FW_OK;
4242 if (adap_init1(adap, &c))
4243 return PCI_ERS_RESULT_DISCONNECT;
4244
4245 for_each_port(adap, i) {
4246 struct port_info *p = adap2pinfo(adap, i);
4247
4248 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
4249 NULL, NULL);
4250 if (ret < 0)
4251 return PCI_ERS_RESULT_DISCONNECT;
4252 p->viid = ret;
4253 p->xact_addr_filt = -1;
4254 }
4255
4256 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4257 adap->params.b_wnd);
4258 setup_memwin(adap);
4259 if (cxgb_up(adap))
4260 return PCI_ERS_RESULT_DISCONNECT;
4261 return PCI_ERS_RESULT_RECOVERED;
4262}
4263
4264static void eeh_resume(struct pci_dev *pdev)
4265{
4266 int i;
4267 struct adapter *adap = pci_get_drvdata(pdev);
4268
4269 if (!adap)
4270 return;
4271
4272 rtnl_lock();
4273 for_each_port(adap, i) {
4274 struct net_device *dev = adap->port[i];
4275
4276 if (netif_running(dev)) {
4277 link_start(dev);
4278 cxgb_set_rxmode(dev);
4279 }
4280 netif_device_attach(dev);
4281 }
4282 rtnl_unlock();
4283}
4284
4285static const struct pci_error_handlers cxgb4_eeh = {
4286 .error_detected = eeh_err_detected,
4287 .slot_reset = eeh_slot_reset,
4288 .resume = eeh_resume,
4289};
4290
4291static inline bool is_x_10g_port(const struct link_config *lc)
4292{
4293 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
4294 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
4295}
4296
4297static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
4298 unsigned int us, unsigned int cnt,
4299 unsigned int size, unsigned int iqe_size)
4300{
4301 q->adap = adap;
4302 cxgb4_set_rspq_intr_params(q, us, cnt);
4303 q->iqe_len = iqe_size;
4304 q->size = size;
4305}
4306
4307
4308
4309
4310
4311
4312static void cfg_queues(struct adapter *adap)
4313{
4314 struct sge *s = &adap->sge;
4315 int i, n10g = 0, qidx = 0;
4316#ifndef CONFIG_CHELSIO_T4_DCB
4317 int q10g = 0;
4318#endif
4319 int ciq_size;
4320
4321 for_each_port(adap, i)
4322 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4323#ifdef CONFIG_CHELSIO_T4_DCB
4324
4325
4326
4327
4328 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4329 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4330 MAX_ETH_QSETS, adap->params.nports * 8);
4331 BUG_ON(1);
4332 }
4333
4334 for_each_port(adap, i) {
4335 struct port_info *pi = adap2pinfo(adap, i);
4336
4337 pi->first_qset = qidx;
4338 pi->nqsets = 8;
4339 qidx += pi->nqsets;
4340 }
4341#else
4342
4343
4344
4345
4346 if (n10g)
4347 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4348 if (q10g > netif_get_num_default_rss_queues())
4349 q10g = netif_get_num_default_rss_queues();
4350
4351 for_each_port(adap, i) {
4352 struct port_info *pi = adap2pinfo(adap, i);
4353
4354 pi->first_qset = qidx;
4355 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4356 qidx += pi->nqsets;
4357 }
4358#endif
4359
4360 s->ethqsets = qidx;
4361 s->max_ethqsets = qidx;
4362
4363 if (is_offload(adap)) {
4364
4365
4366
4367
4368
4369 if (n10g) {
4370 i = min_t(int, ARRAY_SIZE(s->iscsirxq),
4371 num_online_cpus());
4372 s->iscsiqsets = roundup(i, adap->params.nports);
4373 } else
4374 s->iscsiqsets = adap->params.nports;
4375
4376 s->rdmaqs = adap->params.nports;
4377
4378
4379
4380
4381
4382
4383 s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
4384 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
4385 adap->params.nports;
4386 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
4387
4388 if (!is_t4(adap->params.chip))
4389 s->niscsitq = s->iscsiqsets;
4390 }
4391
4392 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4393 struct sge_eth_rxq *r = &s->ethrxq[i];
4394
4395 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4396 r->fl.size = 72;
4397 }
4398
4399 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4400 s->ethtxq[i].q.size = 1024;
4401
4402 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4403 s->ctrlq[i].q.size = 512;
4404
4405 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4406 s->ofldtxq[i].q.size = 1024;
4407
4408 for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
4409 struct sge_ofld_rxq *r = &s->iscsirxq[i];
4410
4411 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4412 r->rspq.uld = CXGB4_ULD_ISCSI;
4413 r->fl.size = 72;
4414 }
4415
4416 if (!is_t4(adap->params.chip)) {
4417 for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
4418 struct sge_ofld_rxq *r = &s->iscsitrxq[i];
4419
4420 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4421 r->rspq.uld = CXGB4_ULD_ISCSIT;
4422 r->fl.size = 72;
4423 }
4424 }
4425
4426 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4427 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4428
4429 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
4430 r->rspq.uld = CXGB4_ULD_RDMA;
4431 r->fl.size = 72;
4432 }
4433
4434 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
4435 if (ciq_size > SGE_MAX_IQ_SIZE) {
4436 CH_WARN(adap, "CIQ size too small for available IQs\n");
4437 ciq_size = SGE_MAX_IQ_SIZE;
4438 }
4439
4440 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
4441 struct sge_ofld_rxq *r = &s->rdmaciq[i];
4442
4443 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
4444 r->rspq.uld = CXGB4_ULD_RDMA;
4445 }
4446
4447 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4448 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
4449}
4450
4451
4452
4453
4454
4455static void reduce_ethqs(struct adapter *adap, int n)
4456{
4457 int i;
4458 struct port_info *pi;
4459
4460 while (n < adap->sge.ethqsets)
4461 for_each_port(adap, i) {
4462 pi = adap2pinfo(adap, i);
4463 if (pi->nqsets > 1) {
4464 pi->nqsets--;
4465 adap->sge.ethqsets--;
4466 if (adap->sge.ethqsets <= n)
4467 break;
4468 }
4469 }
4470
4471 n = 0;
4472 for_each_port(adap, i) {
4473 pi = adap2pinfo(adap, i);
4474 pi->first_qset = n;
4475 n += pi->nqsets;
4476 }
4477}
4478
4479
4480#define EXTRA_VECS 2
4481
4482static int enable_msix(struct adapter *adap)
4483{
4484 int ofld_need = 0;
4485 int i, want, need, allocated;
4486 struct sge *s = &adap->sge;
4487 unsigned int nchan = adap->params.nports;
4488 struct msix_entry *entries;
4489
4490 entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
4491 GFP_KERNEL);
4492 if (!entries)
4493 return -ENOMEM;
4494
4495 for (i = 0; i < MAX_INGQ + 1; ++i)
4496 entries[i].entry = i;
4497
4498 want = s->max_ethqsets + EXTRA_VECS;
4499 if (is_offload(adap)) {
4500 want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
4501 s->niscsitq;
4502
4503 if (is_t4(adap->params.chip))
4504 ofld_need = 3 * nchan;
4505 else
4506 ofld_need = 4 * nchan;
4507 }
4508#ifdef CONFIG_CHELSIO_T4_DCB
4509
4510
4511
4512 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
4513#else
4514 need = adap->params.nports + EXTRA_VECS + ofld_need;
4515#endif
4516 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4517 if (allocated < 0) {
4518 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4519 " not using MSI-X\n");
4520 kfree(entries);
4521 return allocated;
4522 }
4523
4524
4525
4526
4527
4528 i = allocated - EXTRA_VECS - ofld_need;
4529 if (i < s->max_ethqsets) {
4530 s->max_ethqsets = i;
4531 if (i < s->ethqsets)
4532 reduce_ethqs(adap, i);
4533 }
4534 if (is_offload(adap)) {
4535 if (allocated < want) {
4536 s->rdmaqs = nchan;
4537 s->rdmaciqs = nchan;
4538
4539 if (!is_t4(adap->params.chip))
4540 s->niscsitq = nchan;
4541 }
4542
4543
4544 i = allocated - EXTRA_VECS - s->max_ethqsets -
4545 s->rdmaqs - s->rdmaciqs - s->niscsitq;
4546 s->iscsiqsets = (i / nchan) * nchan;
4547
4548 }
4549 for (i = 0; i < allocated; ++i)
4550 adap->msix_info[i].vec = entries[i].vector;
4551 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
4552 "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
4553 allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
4554 s->rdmaciqs);
4555
4556 kfree(entries);
4557 return 0;
4558}
4559
4560#undef EXTRA_VECS
4561
4562static int init_rss(struct adapter *adap)
4563{
4564 unsigned int i;
4565 int err;
4566
4567 err = t4_init_rss_mode(adap, adap->mbox);
4568 if (err)
4569 return err;
4570
4571 for_each_port(adap, i) {
4572 struct port_info *pi = adap2pinfo(adap, i);
4573
4574 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4575 if (!pi->rss)
4576 return -ENOMEM;
4577 }
4578 return 0;
4579}
4580
4581static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
4582 enum pci_bus_speed *speed,
4583 enum pcie_link_width *width)
4584{
4585 u32 lnkcap1, lnkcap2;
4586 int err1, err2;
4587
4588#define PCIE_MLW_CAP_SHIFT 4
4589
4590 *speed = PCI_SPEED_UNKNOWN;
4591 *width = PCIE_LNK_WIDTH_UNKNOWN;
4592
4593 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
4594 &lnkcap1);
4595 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
4596 &lnkcap2);
4597 if (!err2 && lnkcap2) {
4598 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
4599 *speed = PCIE_SPEED_8_0GT;
4600 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
4601 *speed = PCIE_SPEED_5_0GT;
4602 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
4603 *speed = PCIE_SPEED_2_5GT;
4604 }
4605 if (!err1) {
4606 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
4607 if (!lnkcap2) {
4608 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
4609 *speed = PCIE_SPEED_5_0GT;
4610 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
4611 *speed = PCIE_SPEED_2_5GT;
4612 }
4613 }
4614
4615 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
4616 return err1 ? err1 : err2 ? err2 : -EINVAL;
4617 return 0;
4618}
4619
4620static void cxgb4_check_pcie_caps(struct adapter *adap)
4621{
4622 enum pcie_link_width width, width_cap;
4623 enum pci_bus_speed speed, speed_cap;
4624
4625#define PCIE_SPEED_STR(speed) \
4626 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4627 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4628 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4629 "Unknown")
4630
4631 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
4632 dev_warn(adap->pdev_dev,
4633 "Unable to determine PCIe device BW capabilities\n");
4634 return;
4635 }
4636
4637 if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
4638 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
4639 dev_warn(adap->pdev_dev,
4640 "Unable to determine PCI Express bandwidth.\n");
4641 return;
4642 }
4643
4644 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
4645 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
4646 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
4647 width, width_cap);
4648 if (speed < speed_cap || width < width_cap)
4649 dev_info(adap->pdev_dev,
4650 "A slot with more lanes and/or higher speed is "
4651 "suggested for optimal performance.\n");
4652}
4653
4654static void print_port_info(const struct net_device *dev)
4655{
4656 char buf[80];
4657 char *bufp = buf;
4658 const char *spd = "";
4659 const struct port_info *pi = netdev_priv(dev);
4660 const struct adapter *adap = pi->adapter;
4661
4662 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4663 spd = " 2.5 GT/s";
4664 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4665 spd = " 5 GT/s";
4666 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
4667 spd = " 8 GT/s";
4668
4669 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4670 bufp += sprintf(bufp, "100/");
4671 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4672 bufp += sprintf(bufp, "1000/");
4673 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4674 bufp += sprintf(bufp, "10G/");
4675 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4676 bufp += sprintf(bufp, "40G/");
4677 if (bufp != buf)
4678 --bufp;
4679 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
4680
4681 netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n",
4682 adap->params.vpd.id,
4683 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
4684 is_offload(adap) ? "R" : "",
4685 (adap->flags & USING_MSIX) ? " MSI-X" :
4686 (adap->flags & USING_MSI) ? " MSI" : "");
4687 netdev_info(dev, "S/N: %s, P/N: %s\n",
4688 adap->params.vpd.sn, adap->params.vpd.pn);
4689}
4690
4691static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
4692{
4693 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4694}
4695
4696
4697
4698
4699
4700
4701
4702
4703static void free_some_resources(struct adapter *adapter)
4704{
4705 unsigned int i;
4706
4707 t4_free_mem(adapter->l2t);
4708 t4_free_mem(adapter->tids.tid_tab);
4709 kfree(adapter->sge.egr_map);
4710 kfree(adapter->sge.ingr_map);
4711 kfree(adapter->sge.starving_fl);
4712 kfree(adapter->sge.txq_maperr);
4713#ifdef CONFIG_DEBUG_FS
4714 kfree(adapter->sge.blocked_fl);
4715#endif
4716 disable_msi(adapter);
4717
4718 for_each_port(adapter, i)
4719 if (adapter->port[i]) {
4720 struct port_info *pi = adap2pinfo(adapter, i);
4721
4722 if (pi->viid != 0)
4723 t4_free_vi(adapter, adapter->mbox, adapter->pf,
4724 0, pi->viid);
4725 kfree(adap2pinfo(adapter, i)->rss);
4726 free_netdev(adapter->port[i]);
4727 }
4728 if (adapter->flags & FW_OK)
4729 t4_fw_bye(adapter, adapter->pf);
4730}
4731
4732#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4733#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4734 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4735#define SEGMENT_SIZE 128
4736
4737static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
4738{
4739 u16 device_id;
4740
4741
4742 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
4743
4744 switch (device_id >> 12) {
4745 case CHELSIO_T4:
4746 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
4747 case CHELSIO_T5:
4748 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4749 case CHELSIO_T6:
4750 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4751 default:
4752 dev_err(&pdev->dev, "Device %d is not supported\n",
4753 device_id);
4754 }
4755 return -EINVAL;
4756}
4757
4758static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4759{
4760 int func, i, err, s_qpp, qpp, num_seg;
4761 struct port_info *pi;
4762 bool highdma = false;
4763 struct adapter *adapter = NULL;
4764 void __iomem *regs;
4765 u32 whoami, pl_rev;
4766 enum chip_type chip;
4767
4768 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4769
4770 err = pci_request_regions(pdev, KBUILD_MODNAME);
4771 if (err) {
4772
4773 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4774 return err;
4775 }
4776
4777 err = pci_enable_device(pdev);
4778 if (err) {
4779 dev_err(&pdev->dev, "cannot enable PCI device\n");
4780 goto out_release_regions;
4781 }
4782
4783 regs = pci_ioremap_bar(pdev, 0);
4784 if (!regs) {
4785 dev_err(&pdev->dev, "cannot map device registers\n");
4786 err = -ENOMEM;
4787 goto out_disable_device;
4788 }
4789
4790 err = t4_wait_dev_ready(regs);
4791 if (err < 0)
4792 goto out_unmap_bar0;
4793
4794
4795 whoami = readl(regs + PL_WHOAMI_A);
4796 pl_rev = REV_G(readl(regs + PL_REV_A));
4797 chip = get_chip_type(pdev, pl_rev);
4798 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
4799 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4800 if (func != ent->driver_data) {
4801 iounmap(regs);
4802 pci_disable_device(pdev);
4803 pci_save_state(pdev);
4804 goto sriov;
4805 }
4806
4807 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4808 highdma = true;
4809 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4810 if (err) {
4811 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4812 "coherent allocations\n");
4813 goto out_unmap_bar0;
4814 }
4815 } else {
4816 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4817 if (err) {
4818 dev_err(&pdev->dev, "no usable DMA configuration\n");
4819 goto out_unmap_bar0;
4820 }
4821 }
4822
4823 pci_enable_pcie_error_reporting(pdev);
4824 enable_pcie_relaxed_ordering(pdev);
4825 pci_set_master(pdev);
4826 pci_save_state(pdev);
4827
4828 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4829 if (!adapter) {
4830 err = -ENOMEM;
4831 goto out_unmap_bar0;
4832 }
4833
4834 adapter->workq = create_singlethread_workqueue("cxgb4");
4835 if (!adapter->workq) {
4836 err = -ENOMEM;
4837 goto out_free_adapter;
4838 }
4839
4840
4841 adapter->flags |= DEV_ENABLED;
4842
4843 adapter->regs = regs;
4844 adapter->pdev = pdev;
4845 adapter->pdev_dev = &pdev->dev;
4846 adapter->mbox = func;
4847 adapter->pf = func;
4848 adapter->msg_enable = dflt_msg_enable;
4849 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4850
4851 spin_lock_init(&adapter->stats_lock);
4852 spin_lock_init(&adapter->tid_release_lock);
4853 spin_lock_init(&adapter->win0_lock);
4854
4855 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
4856 INIT_WORK(&adapter->db_full_task, process_db_full);
4857 INIT_WORK(&adapter->db_drop_task, process_db_drop);
4858
4859 err = t4_prep_adapter(adapter);
4860 if (err)
4861 goto out_free_adapter;
4862
4863
4864 if (!is_t4(adapter->params.chip)) {
4865 s_qpp = (QUEUESPERPAGEPF0_S +
4866 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
4867 adapter->pf);
4868 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
4869 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
4870 num_seg = PAGE_SIZE / SEGMENT_SIZE;
4871
4872
4873
4874
4875
4876
4877 if (qpp > num_seg) {
4878 dev_err(&pdev->dev,
4879 "Incorrect number of egress queues per page\n");
4880 err = -EINVAL;
4881 goto out_free_adapter;
4882 }
4883 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
4884 pci_resource_len(pdev, 2));
4885 if (!adapter->bar2) {
4886 dev_err(&pdev->dev, "cannot map device bar2 region\n");
4887 err = -ENOMEM;
4888 goto out_free_adapter;
4889 }
4890 }
4891
4892 setup_memwin(adapter);
4893 err = adap_init0(adapter);
4894#ifdef CONFIG_DEBUG_FS
4895 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
4896#endif
4897 setup_memwin_rdma(adapter);
4898 if (err)
4899 goto out_unmap_bar;
4900
4901
4902 if (!is_t4(adapter->params.chip))
4903 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
4904 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
4905 T6_STATMODE_V(0)));
4906
4907 for_each_port(adapter, i) {
4908 struct net_device *netdev;
4909
4910 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4911 MAX_ETH_QSETS);
4912 if (!netdev) {
4913 err = -ENOMEM;
4914 goto out_free_dev;
4915 }
4916
4917 SET_NETDEV_DEV(netdev, &pdev->dev);
4918
4919 adapter->port[i] = netdev;
4920 pi = netdev_priv(netdev);
4921 pi->adapter = adapter;
4922 pi->xact_addr_filt = -1;
4923 pi->port_id = i;
4924 netdev->irq = pdev->irq;
4925
4926 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4927 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4928 NETIF_F_RXCSUM | NETIF_F_RXHASH |
4929 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
4930 if (highdma)
4931 netdev->hw_features |= NETIF_F_HIGHDMA;
4932 netdev->features |= netdev->hw_features;
4933 netdev->vlan_features = netdev->features & VLAN_FEAT;
4934
4935 netdev->priv_flags |= IFF_UNICAST_FLT;
4936
4937 netdev->netdev_ops = &cxgb4_netdev_ops;
4938#ifdef CONFIG_CHELSIO_T4_DCB
4939 netdev->dcbnl_ops = &cxgb4_dcb_ops;
4940 cxgb4_dcb_state_init(netdev);
4941#endif
4942 cxgb4_set_ethtool_ops(netdev);
4943 }
4944
4945 pci_set_drvdata(pdev, adapter);
4946
4947 if (adapter->flags & FW_OK) {
4948 err = t4_port_init(adapter, func, func, 0);
4949 if (err)
4950 goto out_free_dev;
4951 } else if (adapter->params.nports == 1) {
4952
4953
4954
4955
4956
4957 u8 hw_addr[ETH_ALEN];
4958 u8 *na = adapter->params.vpd.na;
4959
4960 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
4961 if (!err) {
4962 for (i = 0; i < ETH_ALEN; i++)
4963 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
4964 hex2val(na[2 * i + 1]));
4965 t4_set_hw_addr(adapter, 0, hw_addr);
4966 }
4967 }
4968
4969
4970
4971
4972 cfg_queues(adapter);
4973
4974 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
4975 if (!adapter->l2t) {
4976
4977 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4978 adapter->params.offload = 0;
4979 }
4980
4981#if IS_ENABLED(CONFIG_IPV6)
4982 if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
4983 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
4984
4985
4986
4987 dev_warn(&pdev->dev,
4988 "CLIP not enabled in hardware, continuing\n");
4989 adapter->params.offload = 0;
4990 } else {
4991 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
4992 adapter->clipt_end);
4993 if (!adapter->clipt) {
4994
4995
4996
4997 dev_warn(&pdev->dev,
4998 "could not allocate Clip table, continuing\n");
4999 adapter->params.offload = 0;
5000 }
5001 }
5002#endif
5003 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5004 dev_warn(&pdev->dev, "could not allocate TID table, "
5005 "continuing\n");
5006 adapter->params.offload = 0;
5007 }
5008
5009 if (is_offload(adapter)) {
5010 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
5011 u32 hash_base, hash_reg;
5012
5013 if (chip <= CHELSIO_T5) {
5014 hash_reg = LE_DB_TID_HASHBASE_A;
5015 hash_base = t4_read_reg(adapter, hash_reg);
5016 adapter->tids.hash_base = hash_base / 4;
5017 } else {
5018 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
5019 hash_base = t4_read_reg(adapter, hash_reg);
5020 adapter->tids.hash_base = hash_base;
5021 }
5022 }
5023 }
5024
5025
5026 if (msi > 1 && enable_msix(adapter) == 0)
5027 adapter->flags |= USING_MSIX;
5028 else if (msi > 0 && pci_enable_msi(pdev) == 0)
5029 adapter->flags |= USING_MSI;
5030
5031
5032 cxgb4_check_pcie_caps(adapter);
5033
5034 err = init_rss(adapter);
5035 if (err)
5036 goto out_free_dev;
5037
5038
5039
5040
5041
5042
5043
5044 for_each_port(adapter, i) {
5045 pi = adap2pinfo(adapter, i);
5046 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5047 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5048
5049 err = register_netdev(adapter->port[i]);
5050 if (err)
5051 break;
5052 adapter->chan_map[pi->tx_chan] = i;
5053 print_port_info(adapter->port[i]);
5054 }
5055 if (i == 0) {
5056 dev_err(&pdev->dev, "could not register any net devices\n");
5057 goto out_free_dev;
5058 }
5059 if (err) {
5060 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5061 err = 0;
5062 }
5063
5064 if (cxgb4_debugfs_root) {
5065 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5066 cxgb4_debugfs_root);
5067 setup_debugfs(adapter);
5068 }
5069
5070
5071 pdev->needs_freset = 1;
5072
5073 if (is_offload(adapter))
5074 attach_ulds(adapter);
5075
5076sriov:
5077#ifdef CONFIG_PCI_IOV
5078 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
5079 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5080 dev_info(&pdev->dev,
5081 "instantiated %u virtual functions\n",
5082 num_vf[func]);
5083#endif
5084 return 0;
5085
5086 out_free_dev:
5087 free_some_resources(adapter);
5088 out_unmap_bar:
5089 if (!is_t4(adapter->params.chip))
5090 iounmap(adapter->bar2);
5091 out_free_adapter:
5092 if (adapter->workq)
5093 destroy_workqueue(adapter->workq);
5094
5095 kfree(adapter);
5096 out_unmap_bar0:
5097 iounmap(regs);
5098 out_disable_device:
5099 pci_disable_pcie_error_reporting(pdev);
5100 pci_disable_device(pdev);
5101 out_release_regions:
5102 pci_release_regions(pdev);
5103 return err;
5104}
5105
5106static void remove_one(struct pci_dev *pdev)
5107{
5108 struct adapter *adapter = pci_get_drvdata(pdev);
5109
5110#ifdef CONFIG_PCI_IOV
5111 pci_disable_sriov(pdev);
5112
5113#endif
5114
5115 if (adapter) {
5116 int i;
5117
5118
5119
5120
5121 destroy_workqueue(adapter->workq);
5122
5123 if (is_offload(adapter))
5124 detach_ulds(adapter);
5125
5126 disable_interrupts(adapter);
5127
5128 for_each_port(adapter, i)
5129 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5130 unregister_netdev(adapter->port[i]);
5131
5132 debugfs_remove_recursive(adapter->debugfs_root);
5133
5134
5135
5136
5137 if (adapter->tids.ftid_tab) {
5138 struct filter_entry *f = &adapter->tids.ftid_tab[0];
5139 for (i = 0; i < (adapter->tids.nftids +
5140 adapter->tids.nsftids); i++, f++)
5141 if (f->valid)
5142 clear_filter(adapter, f);
5143 }
5144
5145 if (adapter->flags & FULL_INIT_DONE)
5146 cxgb_down(adapter);
5147
5148 free_some_resources(adapter);
5149#if IS_ENABLED(CONFIG_IPV6)
5150 t4_cleanup_clip_tbl(adapter);
5151#endif
5152 iounmap(adapter->regs);
5153 if (!is_t4(adapter->params.chip))
5154 iounmap(adapter->bar2);
5155 pci_disable_pcie_error_reporting(pdev);
5156 if ((adapter->flags & DEV_ENABLED)) {
5157 pci_disable_device(pdev);
5158 adapter->flags &= ~DEV_ENABLED;
5159 }
5160 pci_release_regions(pdev);
5161 synchronize_rcu();
5162 kfree(adapter);
5163 } else
5164 pci_release_regions(pdev);
5165}
5166
5167static struct pci_driver cxgb4_driver = {
5168 .name = KBUILD_MODNAME,
5169 .id_table = cxgb4_pci_tbl,
5170 .probe = init_one,
5171 .remove = remove_one,
5172 .shutdown = remove_one,
5173 .err_handler = &cxgb4_eeh,
5174};
5175
5176static int __init cxgb4_init_module(void)
5177{
5178 int ret;
5179
5180
5181 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5182 if (!cxgb4_debugfs_root)
5183 pr_warn("could not create debugfs entry, continuing\n");
5184
5185 ret = pci_register_driver(&cxgb4_driver);
5186 if (ret < 0)
5187 debugfs_remove(cxgb4_debugfs_root);
5188
5189#if IS_ENABLED(CONFIG_IPV6)
5190 if (!inet6addr_registered) {
5191 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5192 inet6addr_registered = true;
5193 }
5194#endif
5195
5196 return ret;
5197}
5198
5199static void __exit cxgb4_cleanup_module(void)
5200{
5201#if IS_ENABLED(CONFIG_IPV6)
5202 if (inet6addr_registered) {
5203 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5204 inet6addr_registered = false;
5205 }
5206#endif
5207 pci_unregister_driver(&cxgb4_driver);
5208 debugfs_remove(cxgb4_debugfs_root);
5209}
5210
5211module_init(cxgb4_init_module);
5212module_exit(cxgb4_cleanup_module);
5213