1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if.h>
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <net/addrconf.h>
64#include <net/bonding.h>
65#include <net/addrconf.h>
66#include <asm/uaccess.h>
67#include <linux/crash_dump.h>
68
69#include "cxgb4.h"
70#include "cxgb4_filter.h"
71#include "t4_regs.h"
72#include "t4_values.h"
73#include "t4_msg.h"
74#include "t4fw_api.h"
75#include "t4fw_version.h"
76#include "cxgb4_dcb.h"
77#include "cxgb4_debugfs.h"
78#include "clip_tbl.h"
79#include "l2t.h"
80#include "sched.h"
81#include "cxgb4_tc_u32.h"
82
83char cxgb4_driver_name[] = KBUILD_MODNAME;
84
85#ifdef DRV_VERSION
86#undef DRV_VERSION
87#endif
88#define DRV_VERSION "2.0.0-ko"
89const char cxgb4_driver_version[] = DRV_VERSION;
90#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
91
92#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
93 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
94 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
95
96
97
98#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
99 static const struct pci_device_id cxgb4_pci_tbl[] = {
100#define CH_PCI_DEVICE_ID_FUNCTION 0x4
101
102
103
104
105#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
106
107#define CH_PCI_ID_TABLE_ENTRY(devid) \
108 {PCI_VDEVICE(CHELSIO, (devid)), 4}
109
110#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
111 { 0, } \
112 }
113
114#include "t4_pci_id_tbl.h"
115
116#define FW4_FNAME "cxgb4/t4fw.bin"
117#define FW5_FNAME "cxgb4/t5fw.bin"
118#define FW6_FNAME "cxgb4/t6fw.bin"
119#define FW4_CFNAME "cxgb4/t4-config.txt"
120#define FW5_CFNAME "cxgb4/t5-config.txt"
121#define FW6_CFNAME "cxgb4/t6-config.txt"
122#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
123#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
124#define PHY_AQ1202_DEVICEID 0x4409
125#define PHY_BCM84834_DEVICEID 0x4486
126
127MODULE_DESCRIPTION(DRV_DESC);
128MODULE_AUTHOR("Chelsio Communications");
129MODULE_LICENSE("Dual BSD/GPL");
130MODULE_VERSION(DRV_VERSION);
131MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
132MODULE_FIRMWARE(FW4_FNAME);
133MODULE_FIRMWARE(FW5_FNAME);
134MODULE_FIRMWARE(FW6_FNAME);
135
136
137
138
139
140
141
142
143
144
145static int msi = 2;
146
147module_param(msi, int, 0644);
148MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
149
150
151
152
153
154
155
156
157
158
159
160
161
162static int rx_dma_offset = 2;
163
164
165
166
167
168
169
170static int select_queue;
171module_param(select_queue, int, 0644);
172MODULE_PARM_DESC(select_queue,
173 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
174
175static struct dentry *cxgb4_debugfs_root;
176
177LIST_HEAD(adapter_list);
178DEFINE_MUTEX(uld_mutex);
179
180static void link_report(struct net_device *dev)
181{
182 if (!netif_carrier_ok(dev))
183 netdev_info(dev, "link down\n");
184 else {
185 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
186
187 const char *s;
188 const struct port_info *p = netdev_priv(dev);
189
190 switch (p->link_cfg.speed) {
191 case 100:
192 s = "100Mbps";
193 break;
194 case 1000:
195 s = "1Gbps";
196 break;
197 case 10000:
198 s = "10Gbps";
199 break;
200 case 25000:
201 s = "25Gbps";
202 break;
203 case 40000:
204 s = "40Gbps";
205 break;
206 case 100000:
207 s = "100Gbps";
208 break;
209 default:
210 pr_info("%s: unsupported speed: %d\n",
211 dev->name, p->link_cfg.speed);
212 return;
213 }
214
215 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
216 fc[p->link_cfg.fc]);
217 }
218}
219
220#ifdef CONFIG_CHELSIO_T4_DCB
221
222static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
223{
224 struct port_info *pi = netdev_priv(dev);
225 struct adapter *adap = pi->adapter;
226 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
227 int i;
228
229
230
231
232 for (i = 0; i < pi->nqsets; i++, txq++) {
233 u32 name, value;
234 int err;
235
236 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
237 FW_PARAMS_PARAM_X_V(
238 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
239 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
240 value = enable ? i : 0xffffffff;
241
242
243
244
245
246 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
247 &name, &value,
248 -FW_CMD_MAX_TIMEOUT);
249
250 if (err)
251 dev_err(adap->pdev_dev,
252 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
253 enable ? "set" : "unset", pi->port_id, i, -err);
254 else
255 txq->dcb_prio = value;
256 }
257}
258
259static int cxgb4_dcb_enabled(const struct net_device *dev)
260{
261 struct port_info *pi = netdev_priv(dev);
262
263 if (!pi->dcb.enabled)
264 return 0;
265
266 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
267 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
268}
269#endif
270
271void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
272{
273 struct net_device *dev = adapter->port[port_id];
274
275
276 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
277 if (link_stat)
278 netif_carrier_on(dev);
279 else {
280#ifdef CONFIG_CHELSIO_T4_DCB
281 if (cxgb4_dcb_enabled(dev)) {
282 cxgb4_dcb_state_init(dev);
283 dcb_tx_queue_prio_enable(dev, false);
284 }
285#endif
286 netif_carrier_off(dev);
287 }
288
289 link_report(dev);
290 }
291}
292
293void t4_os_portmod_changed(const struct adapter *adap, int port_id)
294{
295 static const char *mod_str[] = {
296 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
297 };
298
299 const struct net_device *dev = adap->port[port_id];
300 const struct port_info *pi = netdev_priv(dev);
301
302 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
303 netdev_info(dev, "port module unplugged\n");
304 else if (pi->mod_type < ARRAY_SIZE(mod_str))
305 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
306 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
307 netdev_info(dev, "%s: unsupported port module inserted\n",
308 dev->name);
309 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
310 netdev_info(dev, "%s: unknown port module inserted\n",
311 dev->name);
312 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
313 netdev_info(dev, "%s: transceiver module error\n", dev->name);
314 else
315 netdev_info(dev, "%s: unknown module type %d inserted\n",
316 dev->name, pi->mod_type);
317}
318
319int dbfifo_int_thresh = 10;
320module_param(dbfifo_int_thresh, int, 0644);
321MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
322
323
324
325
326static int dbfifo_drain_delay = 1000;
327module_param(dbfifo_drain_delay, int, 0644);
328MODULE_PARM_DESC(dbfifo_drain_delay,
329 "usecs to sleep while draining the dbfifo");
330
331static inline int cxgb4_set_addr_hash(struct port_info *pi)
332{
333 struct adapter *adap = pi->adapter;
334 u64 vec = 0;
335 bool ucast = false;
336 struct hash_mac_addr *entry;
337
338
339 list_for_each_entry(entry, &adap->mac_hlist, list) {
340 ucast |= is_unicast_ether_addr(entry->addr);
341 vec |= (1ULL << hash_mac_addr(entry->addr));
342 }
343 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
344 vec, false);
345}
346
347static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
348{
349 struct port_info *pi = netdev_priv(netdev);
350 struct adapter *adap = pi->adapter;
351 int ret;
352 u64 mhash = 0;
353 u64 uhash = 0;
354 bool free = false;
355 bool ucast = is_unicast_ether_addr(mac_addr);
356 const u8 *maclist[1] = {mac_addr};
357 struct hash_mac_addr *new_entry;
358
359 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
360 NULL, ucast ? &uhash : &mhash, false);
361 if (ret < 0)
362 goto out;
363
364
365
366
367 if (uhash || mhash) {
368 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
369 if (!new_entry)
370 return -ENOMEM;
371 ether_addr_copy(new_entry->addr, mac_addr);
372 list_add_tail(&new_entry->list, &adap->mac_hlist);
373 ret = cxgb4_set_addr_hash(pi);
374 }
375out:
376 return ret < 0 ? ret : 0;
377}
378
379static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
380{
381 struct port_info *pi = netdev_priv(netdev);
382 struct adapter *adap = pi->adapter;
383 int ret;
384 const u8 *maclist[1] = {mac_addr};
385 struct hash_mac_addr *entry, *tmp;
386
387
388
389
390 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
391 if (ether_addr_equal(entry->addr, mac_addr)) {
392 list_del(&entry->list);
393 kfree(entry);
394 return cxgb4_set_addr_hash(pi);
395 }
396 }
397
398 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
399 return ret < 0 ? -EINVAL : 0;
400}
401
402
403
404
405
406static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
407{
408 struct port_info *pi = netdev_priv(dev);
409 struct adapter *adapter = pi->adapter;
410
411 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
412 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
413
414 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
415 (dev->flags & IFF_PROMISC) ? 1 : 0,
416 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
417 sleep_ok);
418}
419
420
421
422
423
424
425
426static int link_start(struct net_device *dev)
427{
428 int ret;
429 struct port_info *pi = netdev_priv(dev);
430 unsigned int mb = pi->adapter->pf;
431
432
433
434
435
436 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
437 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
438 if (ret == 0) {
439 ret = t4_change_mac(pi->adapter, mb, pi->viid,
440 pi->xact_addr_filt, dev->dev_addr, true,
441 true);
442 if (ret >= 0) {
443 pi->xact_addr_filt = ret;
444 ret = 0;
445 }
446 }
447 if (ret == 0)
448 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
449 &pi->link_cfg);
450 if (ret == 0) {
451 local_bh_disable();
452 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
453 true, CXGB4_DCB_ENABLED);
454 local_bh_enable();
455 }
456
457 return ret;
458}
459
460#ifdef CONFIG_CHELSIO_T4_DCB
461
462static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
463{
464 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
465 struct net_device *dev = adap->port[adap->chan_map[port]];
466 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
467 int new_dcb_enabled;
468
469 cxgb4_dcb_handle_fw_update(adap, pcmd);
470 new_dcb_enabled = cxgb4_dcb_enabled(dev);
471
472
473
474
475
476 if (new_dcb_enabled != old_dcb_enabled)
477 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
478}
479#endif
480
481
482
483static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
484 const struct pkt_gl *gl)
485{
486 u8 opcode = ((const struct rss_header *)rsp)->opcode;
487
488 rsp++;
489
490
491
492 if (unlikely(opcode == CPL_FW4_MSG &&
493 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
494 rsp++;
495 opcode = ((const struct rss_header *)rsp)->opcode;
496 rsp++;
497 if (opcode != CPL_SGE_EGR_UPDATE) {
498 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
499 , opcode);
500 goto out;
501 }
502 }
503
504 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
505 const struct cpl_sge_egr_update *p = (void *)rsp;
506 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
507 struct sge_txq *txq;
508
509 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
510 txq->restarts++;
511 if (txq->q_type == CXGB4_TXQ_ETH) {
512 struct sge_eth_txq *eq;
513
514 eq = container_of(txq, struct sge_eth_txq, q);
515 netif_tx_wake_queue(eq->txq);
516 } else {
517 struct sge_uld_txq *oq;
518
519 oq = container_of(txq, struct sge_uld_txq, q);
520 tasklet_schedule(&oq->qresume_tsk);
521 }
522 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
523 const struct cpl_fw6_msg *p = (void *)rsp;
524
525#ifdef CONFIG_CHELSIO_T4_DCB
526 const struct fw_port_cmd *pcmd = (const void *)p->data;
527 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
528 unsigned int action =
529 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
530
531 if (cmd == FW_PORT_CMD &&
532 action == FW_PORT_ACTION_GET_PORT_INFO) {
533 int port = FW_PORT_CMD_PORTID_G(
534 be32_to_cpu(pcmd->op_to_portid));
535 struct net_device *dev =
536 q->adap->port[q->adap->chan_map[port]];
537 int state_input = ((pcmd->u.info.dcbxdis_pkd &
538 FW_PORT_CMD_DCBXDIS_F)
539 ? CXGB4_DCB_INPUT_FW_DISABLED
540 : CXGB4_DCB_INPUT_FW_ENABLED);
541
542 cxgb4_dcb_state_fsm(dev, state_input);
543 }
544
545 if (cmd == FW_PORT_CMD &&
546 action == FW_PORT_ACTION_L2_DCB_CFG)
547 dcb_rpl(q->adap, pcmd);
548 else
549#endif
550 if (p->type == 0)
551 t4_handle_fw_rpl(q->adap, p->data);
552 } else if (opcode == CPL_L2T_WRITE_RPL) {
553 const struct cpl_l2t_write_rpl *p = (void *)rsp;
554
555 do_l2t_write_rpl(q->adap, p);
556 } else if (opcode == CPL_SET_TCB_RPL) {
557 const struct cpl_set_tcb_rpl *p = (void *)rsp;
558
559 filter_rpl(q->adap, p);
560 } else
561 dev_err(q->adap->pdev_dev,
562 "unexpected CPL %#x on FW event queue\n", opcode);
563out:
564 return 0;
565}
566
567static void disable_msi(struct adapter *adapter)
568{
569 if (adapter->flags & USING_MSIX) {
570 pci_disable_msix(adapter->pdev);
571 adapter->flags &= ~USING_MSIX;
572 } else if (adapter->flags & USING_MSI) {
573 pci_disable_msi(adapter->pdev);
574 adapter->flags &= ~USING_MSI;
575 }
576}
577
578
579
580
581static irqreturn_t t4_nondata_intr(int irq, void *cookie)
582{
583 struct adapter *adap = cookie;
584 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
585
586 if (v & PFSW_F) {
587 adap->swintr = 1;
588 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
589 }
590 if (adap->flags & MASTER_PF)
591 t4_slow_intr_handler(adap);
592 return IRQ_HANDLED;
593}
594
595
596
597
598static void name_msix_vecs(struct adapter *adap)
599{
600 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
601
602
603 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
604
605
606 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
607 adap->port[0]->name);
608
609
610 for_each_port(adap, j) {
611 struct net_device *d = adap->port[j];
612 const struct port_info *pi = netdev_priv(d);
613
614 for (i = 0; i < pi->nqsets; i++, msi_idx++)
615 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
616 d->name, i);
617 }
618}
619
620static int request_msix_queue_irqs(struct adapter *adap)
621{
622 struct sge *s = &adap->sge;
623 int err, ethqidx;
624 int msi_index = 2;
625
626 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
627 adap->msix_info[1].desc, &s->fw_evtq);
628 if (err)
629 return err;
630
631 for_each_ethrxq(s, ethqidx) {
632 err = request_irq(adap->msix_info[msi_index].vec,
633 t4_sge_intr_msix, 0,
634 adap->msix_info[msi_index].desc,
635 &s->ethrxq[ethqidx].rspq);
636 if (err)
637 goto unwind;
638 msi_index++;
639 }
640 return 0;
641
642unwind:
643 while (--ethqidx >= 0)
644 free_irq(adap->msix_info[--msi_index].vec,
645 &s->ethrxq[ethqidx].rspq);
646 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
647 return err;
648}
649
650static void free_msix_queue_irqs(struct adapter *adap)
651{
652 int i, msi_index = 2;
653 struct sge *s = &adap->sge;
654
655 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
656 for_each_ethrxq(s, i)
657 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
658}
659
660
661
662
663
664
665
666
667
668
669int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
670{
671 u16 *rss;
672 int i, err;
673 struct adapter *adapter = pi->adapter;
674 const struct sge_eth_rxq *rxq;
675
676 rxq = &adapter->sge.ethrxq[pi->first_qset];
677 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
678 if (!rss)
679 return -ENOMEM;
680
681
682 for (i = 0; i < pi->rss_size; i++, queues++)
683 rss[i] = rxq[*queues].rspq.abs_id;
684
685 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
686 pi->rss_size, rss, pi->rss_size);
687
688
689
690
691
692 if (!err)
693 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
694 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
695 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
696 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
697 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
698 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
699 rss[0]);
700 kfree(rss);
701 return err;
702}
703
704
705
706
707
708
709
710static int setup_rss(struct adapter *adap)
711{
712 int i, j, err;
713
714 for_each_port(adap, i) {
715 const struct port_info *pi = adap2pinfo(adap, i);
716
717
718 for (j = 0; j < pi->rss_size; j++)
719 pi->rss[j] = j % pi->nqsets;
720
721 err = cxgb4_write_rss(pi, pi->rss);
722 if (err)
723 return err;
724 }
725 return 0;
726}
727
728
729
730
731static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
732{
733 qid -= p->ingr_start;
734 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
735}
736
737
738
739
740static void quiesce_rx(struct adapter *adap)
741{
742 int i;
743
744 for (i = 0; i < adap->sge.ingr_sz; i++) {
745 struct sge_rspq *q = adap->sge.ingr_map[i];
746
747 if (q && q->handler) {
748 napi_disable(&q->napi);
749 local_bh_disable();
750 while (!cxgb_poll_lock_napi(q))
751 mdelay(1);
752 local_bh_enable();
753 }
754
755 }
756}
757
758
759static void disable_interrupts(struct adapter *adap)
760{
761 if (adap->flags & FULL_INIT_DONE) {
762 t4_intr_disable(adap);
763 if (adap->flags & USING_MSIX) {
764 free_msix_queue_irqs(adap);
765 free_irq(adap->msix_info[0].vec, adap);
766 } else {
767 free_irq(adap->pdev->irq, adap);
768 }
769 quiesce_rx(adap);
770 }
771}
772
773
774
775
776static void enable_rx(struct adapter *adap)
777{
778 int i;
779
780 for (i = 0; i < adap->sge.ingr_sz; i++) {
781 struct sge_rspq *q = adap->sge.ingr_map[i];
782
783 if (!q)
784 continue;
785 if (q->handler) {
786 cxgb_busy_poll_init_lock(q);
787 napi_enable(&q->napi);
788 }
789
790 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
791 SEINTARM_V(q->intr_params) |
792 INGRESSQID_V(q->cntxt_id));
793 }
794}
795
796
797static int setup_fw_sge_queues(struct adapter *adap)
798{
799 struct sge *s = &adap->sge;
800 int err = 0;
801
802 bitmap_zero(s->starving_fl, s->egr_sz);
803 bitmap_zero(s->txq_maperr, s->egr_sz);
804
805 if (adap->flags & USING_MSIX)
806 adap->msi_idx = 1;
807 else {
808 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
809 NULL, NULL, NULL, -1);
810 if (err)
811 return err;
812 adap->msi_idx = -((int)s->intrq.abs_id + 1);
813 }
814
815 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
816 adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
817 if (err)
818 t4_free_sge_resources(adap);
819 return err;
820}
821
822
823
824
825
826
827
828
829
830static int setup_sge_queues(struct adapter *adap)
831{
832 int err, i, j;
833 struct sge *s = &adap->sge;
834 struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
835 unsigned int cmplqid = 0;
836
837 for_each_port(adap, i) {
838 struct net_device *dev = adap->port[i];
839 struct port_info *pi = netdev_priv(dev);
840 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
841 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
842
843 for (j = 0; j < pi->nqsets; j++, q++) {
844 if (adap->msi_idx > 0)
845 adap->msi_idx++;
846 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
847 adap->msi_idx, &q->fl,
848 t4_ethrx_handler,
849 NULL,
850 t4_get_mps_bg_map(adap,
851 pi->tx_chan));
852 if (err)
853 goto freeout;
854 q->rspq.idx = j;
855 memset(&q->stats, 0, sizeof(q->stats));
856 }
857 for (j = 0; j < pi->nqsets; j++, t++) {
858 err = t4_sge_alloc_eth_txq(adap, t, dev,
859 netdev_get_tx_queue(dev, j),
860 s->fw_evtq.cntxt_id);
861 if (err)
862 goto freeout;
863 }
864 }
865
866 for_each_port(adap, i) {
867
868
869
870 if (rxq_info)
871 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
872
873 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
874 s->fw_evtq.cntxt_id, cmplqid);
875 if (err)
876 goto freeout;
877 }
878
879 t4_write_reg(adap, is_t4(adap->params.chip) ?
880 MPS_TRC_RSS_CONTROL_A :
881 MPS_T5_TRC_RSS_CONTROL_A,
882 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
883 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
884 return 0;
885freeout:
886 t4_free_sge_resources(adap);
887 return err;
888}
889
890
891
892
893
894void *t4_alloc_mem(size_t size)
895{
896 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
897
898 if (!p)
899 p = vzalloc(size);
900 return p;
901}
902
903
904
905
906void t4_free_mem(void *addr)
907{
908 kvfree(addr);
909}
910
911static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
912 void *accel_priv, select_queue_fallback_t fallback)
913{
914 int txq;
915
916#ifdef CONFIG_CHELSIO_T4_DCB
917
918
919
920
921
922 if (cxgb4_dcb_enabled(dev)) {
923 u16 vlan_tci;
924 int err;
925
926 err = vlan_get_tag(skb, &vlan_tci);
927 if (unlikely(err)) {
928 if (net_ratelimit())
929 netdev_warn(dev,
930 "TX Packet without VLAN Tag on DCB Link\n");
931 txq = 0;
932 } else {
933 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
934 }
935 return txq;
936 }
937#endif
938
939 if (select_queue) {
940 txq = (skb_rx_queue_recorded(skb)
941 ? skb_get_rx_queue(skb)
942 : smp_processor_id());
943
944 while (unlikely(txq >= dev->real_num_tx_queues))
945 txq -= dev->real_num_tx_queues;
946
947 return txq;
948 }
949
950 return fallback(dev, skb) % dev->real_num_tx_queues;
951}
952
953static int closest_timer(const struct sge *s, int time)
954{
955 int i, delta, match = 0, min_delta = INT_MAX;
956
957 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
958 delta = time - s->timer_val[i];
959 if (delta < 0)
960 delta = -delta;
961 if (delta < min_delta) {
962 min_delta = delta;
963 match = i;
964 }
965 }
966 return match;
967}
968
969static int closest_thres(const struct sge *s, int thres)
970{
971 int i, delta, match = 0, min_delta = INT_MAX;
972
973 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
974 delta = thres - s->counter_val[i];
975 if (delta < 0)
976 delta = -delta;
977 if (delta < min_delta) {
978 min_delta = delta;
979 match = i;
980 }
981 }
982 return match;
983}
984
985
986
987
988
989
990
991
992
993
994int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
995 unsigned int us, unsigned int cnt)
996{
997 struct adapter *adap = q->adap;
998
999 if ((us | cnt) == 0)
1000 cnt = 1;
1001
1002 if (cnt) {
1003 int err;
1004 u32 v, new_idx;
1005
1006 new_idx = closest_thres(&adap->sge, cnt);
1007 if (q->desc && q->pktcnt_idx != new_idx) {
1008
1009 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1010 FW_PARAMS_PARAM_X_V(
1011 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1012 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1013 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1014 &v, &new_idx);
1015 if (err)
1016 return err;
1017 }
1018 q->pktcnt_idx = new_idx;
1019 }
1020
1021 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1022 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1023 return 0;
1024}
1025
1026static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1027{
1028 const struct port_info *pi = netdev_priv(dev);
1029 netdev_features_t changed = dev->features ^ features;
1030 int err;
1031
1032 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1033 return 0;
1034
1035 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1036 -1, -1, -1,
1037 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1038 if (unlikely(err))
1039 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1040 return err;
1041}
1042
1043static int setup_debugfs(struct adapter *adap)
1044{
1045 if (IS_ERR_OR_NULL(adap->debugfs_root))
1046 return -1;
1047
1048#ifdef CONFIG_DEBUG_FS
1049 t4_setup_debugfs(adap);
1050#endif
1051 return 0;
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061int cxgb4_alloc_atid(struct tid_info *t, void *data)
1062{
1063 int atid = -1;
1064
1065 spin_lock_bh(&t->atid_lock);
1066 if (t->afree) {
1067 union aopen_entry *p = t->afree;
1068
1069 atid = (p - t->atid_tab) + t->atid_base;
1070 t->afree = p->next;
1071 p->data = data;
1072 t->atids_in_use++;
1073 }
1074 spin_unlock_bh(&t->atid_lock);
1075 return atid;
1076}
1077EXPORT_SYMBOL(cxgb4_alloc_atid);
1078
1079
1080
1081
1082void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1083{
1084 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1085
1086 spin_lock_bh(&t->atid_lock);
1087 p->next = t->afree;
1088 t->afree = p;
1089 t->atids_in_use--;
1090 spin_unlock_bh(&t->atid_lock);
1091}
1092EXPORT_SYMBOL(cxgb4_free_atid);
1093
1094
1095
1096
1097int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1098{
1099 int stid;
1100
1101 spin_lock_bh(&t->stid_lock);
1102 if (family == PF_INET) {
1103 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1104 if (stid < t->nstids)
1105 __set_bit(stid, t->stid_bmap);
1106 else
1107 stid = -1;
1108 } else {
1109 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1110 if (stid < 0)
1111 stid = -1;
1112 }
1113 if (stid >= 0) {
1114 t->stid_tab[stid].data = data;
1115 stid += t->stid_base;
1116
1117
1118
1119
1120 if (family == PF_INET)
1121 t->stids_in_use++;
1122 else
1123 t->stids_in_use += 2;
1124 }
1125 spin_unlock_bh(&t->stid_lock);
1126 return stid;
1127}
1128EXPORT_SYMBOL(cxgb4_alloc_stid);
1129
1130
1131
1132int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1133{
1134 int stid;
1135
1136 spin_lock_bh(&t->stid_lock);
1137 if (family == PF_INET) {
1138 stid = find_next_zero_bit(t->stid_bmap,
1139 t->nstids + t->nsftids, t->nstids);
1140 if (stid < (t->nstids + t->nsftids))
1141 __set_bit(stid, t->stid_bmap);
1142 else
1143 stid = -1;
1144 } else {
1145 stid = -1;
1146 }
1147 if (stid >= 0) {
1148 t->stid_tab[stid].data = data;
1149 stid -= t->nstids;
1150 stid += t->sftid_base;
1151 t->sftids_in_use++;
1152 }
1153 spin_unlock_bh(&t->stid_lock);
1154 return stid;
1155}
1156EXPORT_SYMBOL(cxgb4_alloc_sftid);
1157
1158
1159
1160void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1161{
1162
1163 if (t->nsftids && (stid >= t->sftid_base)) {
1164 stid -= t->sftid_base;
1165 stid += t->nstids;
1166 } else {
1167 stid -= t->stid_base;
1168 }
1169
1170 spin_lock_bh(&t->stid_lock);
1171 if (family == PF_INET)
1172 __clear_bit(stid, t->stid_bmap);
1173 else
1174 bitmap_release_region(t->stid_bmap, stid, 1);
1175 t->stid_tab[stid].data = NULL;
1176 if (stid < t->nstids) {
1177 if (family == PF_INET)
1178 t->stids_in_use--;
1179 else
1180 t->stids_in_use -= 2;
1181 } else {
1182 t->sftids_in_use--;
1183 }
1184 spin_unlock_bh(&t->stid_lock);
1185}
1186EXPORT_SYMBOL(cxgb4_free_stid);
1187
1188
1189
1190
1191static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1192 unsigned int tid)
1193{
1194 struct cpl_tid_release *req;
1195
1196 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1197 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1198 INIT_TP_WR(req, tid);
1199 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1200}
1201
1202
1203
1204
1205
1206static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1207 unsigned int tid)
1208{
1209 void **p = &t->tid_tab[tid];
1210 struct adapter *adap = container_of(t, struct adapter, tids);
1211
1212 spin_lock_bh(&adap->tid_release_lock);
1213 *p = adap->tid_release_head;
1214
1215 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1216 if (!adap->tid_release_task_busy) {
1217 adap->tid_release_task_busy = true;
1218 queue_work(adap->workq, &adap->tid_release_task);
1219 }
1220 spin_unlock_bh(&adap->tid_release_lock);
1221}
1222
1223
1224
1225
1226static void process_tid_release_list(struct work_struct *work)
1227{
1228 struct sk_buff *skb;
1229 struct adapter *adap;
1230
1231 adap = container_of(work, struct adapter, tid_release_task);
1232
1233 spin_lock_bh(&adap->tid_release_lock);
1234 while (adap->tid_release_head) {
1235 void **p = adap->tid_release_head;
1236 unsigned int chan = (uintptr_t)p & 3;
1237 p = (void *)p - chan;
1238
1239 adap->tid_release_head = *p;
1240 *p = NULL;
1241 spin_unlock_bh(&adap->tid_release_lock);
1242
1243 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1244 GFP_KERNEL)))
1245 schedule_timeout_uninterruptible(1);
1246
1247 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1248 t4_ofld_send(adap, skb);
1249 spin_lock_bh(&adap->tid_release_lock);
1250 }
1251 adap->tid_release_task_busy = false;
1252 spin_unlock_bh(&adap->tid_release_lock);
1253}
1254
1255
1256
1257
1258
1259void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1260{
1261 struct sk_buff *skb;
1262 struct adapter *adap = container_of(t, struct adapter, tids);
1263
1264 WARN_ON(tid >= t->ntids);
1265
1266 if (t->tid_tab[tid]) {
1267 t->tid_tab[tid] = NULL;
1268 if (t->hash_base && (tid >= t->hash_base))
1269 atomic_dec(&t->hash_tids_in_use);
1270 else
1271 atomic_dec(&t->tids_in_use);
1272 }
1273
1274 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1275 if (likely(skb)) {
1276 mk_tid_release(skb, chan, tid);
1277 t4_ofld_send(adap, skb);
1278 } else
1279 cxgb4_queue_tid_release(t, chan, tid);
1280}
1281EXPORT_SYMBOL(cxgb4_remove_tid);
1282
1283
1284
1285
1286static int tid_init(struct tid_info *t)
1287{
1288 struct adapter *adap = container_of(t, struct adapter, tids);
1289 unsigned int max_ftids = t->nftids + t->nsftids;
1290 unsigned int natids = t->natids;
1291 unsigned int stid_bmap_size;
1292 unsigned int ftid_bmap_size;
1293 size_t size;
1294
1295 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1296 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1297 size = t->ntids * sizeof(*t->tid_tab) +
1298 natids * sizeof(*t->atid_tab) +
1299 t->nstids * sizeof(*t->stid_tab) +
1300 t->nsftids * sizeof(*t->stid_tab) +
1301 stid_bmap_size * sizeof(long) +
1302 max_ftids * sizeof(*t->ftid_tab) +
1303 ftid_bmap_size * sizeof(long);
1304
1305 t->tid_tab = t4_alloc_mem(size);
1306 if (!t->tid_tab)
1307 return -ENOMEM;
1308
1309 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1310 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1311 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1312 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1313 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1314 spin_lock_init(&t->stid_lock);
1315 spin_lock_init(&t->atid_lock);
1316 spin_lock_init(&t->ftid_lock);
1317
1318 t->stids_in_use = 0;
1319 t->sftids_in_use = 0;
1320 t->afree = NULL;
1321 t->atids_in_use = 0;
1322 atomic_set(&t->tids_in_use, 0);
1323 atomic_set(&t->hash_tids_in_use, 0);
1324
1325
1326 if (natids) {
1327 while (--natids)
1328 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1329 t->afree = t->atid_tab;
1330 }
1331
1332 if (is_offload(adap)) {
1333 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1334
1335 if (!t->stid_base &&
1336 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1337 __set_bit(0, t->stid_bmap);
1338 }
1339
1340 bitmap_zero(t->ftid_bmap, t->nftids);
1341 return 0;
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1356 __be32 sip, __be16 sport, __be16 vlan,
1357 unsigned int queue)
1358{
1359 unsigned int chan;
1360 struct sk_buff *skb;
1361 struct adapter *adap;
1362 struct cpl_pass_open_req *req;
1363 int ret;
1364
1365 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1366 if (!skb)
1367 return -ENOMEM;
1368
1369 adap = netdev2adap(dev);
1370 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
1371 INIT_TP_WR(req, 0);
1372 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1373 req->local_port = sport;
1374 req->peer_port = htons(0);
1375 req->local_ip = sip;
1376 req->peer_ip = htonl(0);
1377 chan = rxq_to_chan(&adap->sge, queue);
1378 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1379 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1380 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1381 ret = t4_mgmt_tx(adap, skb);
1382 return net_xmit_eval(ret);
1383}
1384EXPORT_SYMBOL(cxgb4_create_server);
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1397 const struct in6_addr *sip, __be16 sport,
1398 unsigned int queue)
1399{
1400 unsigned int chan;
1401 struct sk_buff *skb;
1402 struct adapter *adap;
1403 struct cpl_pass_open_req6 *req;
1404 int ret;
1405
1406 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1407 if (!skb)
1408 return -ENOMEM;
1409
1410 adap = netdev2adap(dev);
1411 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
1412 INIT_TP_WR(req, 0);
1413 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1414 req->local_port = sport;
1415 req->peer_port = htons(0);
1416 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1417 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1418 req->peer_ip_hi = cpu_to_be64(0);
1419 req->peer_ip_lo = cpu_to_be64(0);
1420 chan = rxq_to_chan(&adap->sge, queue);
1421 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1422 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1423 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1424 ret = t4_mgmt_tx(adap, skb);
1425 return net_xmit_eval(ret);
1426}
1427EXPORT_SYMBOL(cxgb4_create_server6);
1428
1429int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1430 unsigned int queue, bool ipv6)
1431{
1432 struct sk_buff *skb;
1433 struct adapter *adap;
1434 struct cpl_close_listsvr_req *req;
1435 int ret;
1436
1437 adap = netdev2adap(dev);
1438
1439 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1440 if (!skb)
1441 return -ENOMEM;
1442
1443 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
1444 INIT_TP_WR(req, 0);
1445 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1446 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1447 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1448 ret = t4_mgmt_tx(adap, skb);
1449 return net_xmit_eval(ret);
1450}
1451EXPORT_SYMBOL(cxgb4_remove_server);
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1464 unsigned int *idx)
1465{
1466 unsigned int i = 0;
1467
1468 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1469 ++i;
1470 if (idx)
1471 *idx = i;
1472 return mtus[i];
1473}
1474EXPORT_SYMBOL(cxgb4_best_mtu);
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1493 unsigned short header_size,
1494 unsigned short data_size_max,
1495 unsigned short data_size_align,
1496 unsigned int *mtu_idxp)
1497{
1498 unsigned short max_mtu = header_size + data_size_max;
1499 unsigned short data_size_align_mask = data_size_align - 1;
1500 int mtu_idx, aligned_mtu_idx;
1501
1502
1503
1504
1505
1506
1507 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1508 unsigned short data_size = mtus[mtu_idx] - header_size;
1509
1510
1511
1512
1513 if ((data_size & data_size_align_mask) == 0)
1514 aligned_mtu_idx = mtu_idx;
1515
1516
1517
1518
1519
1520 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1521 break;
1522 }
1523
1524
1525
1526
1527 if (mtu_idx == NMTUS)
1528 mtu_idx--;
1529
1530
1531
1532
1533
1534 if (aligned_mtu_idx >= 0 &&
1535 mtu_idx - aligned_mtu_idx <= 1)
1536 mtu_idx = aligned_mtu_idx;
1537
1538
1539
1540
1541 if (mtu_idxp)
1542 *mtu_idxp = mtu_idx;
1543 return mtus[mtu_idx];
1544}
1545EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1546
1547
1548
1549
1550
1551
1552
1553
1554unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1555{
1556
1557
1558
1559
1560
1561
1562 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1563 return ((viid & 0x7f) << 1);
1564 else
1565 return (viid & 0x7f);
1566}
1567EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1568
1569
1570
1571
1572
1573
1574
1575unsigned int cxgb4_port_chan(const struct net_device *dev)
1576{
1577 return netdev2pinfo(dev)->tx_chan;
1578}
1579EXPORT_SYMBOL(cxgb4_port_chan);
1580
1581unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1582{
1583 struct adapter *adap = netdev2adap(dev);
1584 u32 v1, v2, lp_count, hp_count;
1585
1586 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1587 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1588 if (is_t4(adap->params.chip)) {
1589 lp_count = LP_COUNT_G(v1);
1590 hp_count = HP_COUNT_G(v1);
1591 } else {
1592 lp_count = LP_COUNT_T5_G(v1);
1593 hp_count = HP_COUNT_T5_G(v2);
1594 }
1595 return lpfifo ? lp_count : hp_count;
1596}
1597EXPORT_SYMBOL(cxgb4_dbfifo_count);
1598
1599
1600
1601
1602
1603
1604
1605unsigned int cxgb4_port_viid(const struct net_device *dev)
1606{
1607 return netdev2pinfo(dev)->viid;
1608}
1609EXPORT_SYMBOL(cxgb4_port_viid);
1610
1611
1612
1613
1614
1615
1616
1617unsigned int cxgb4_port_idx(const struct net_device *dev)
1618{
1619 return netdev2pinfo(dev)->port_id;
1620}
1621EXPORT_SYMBOL(cxgb4_port_idx);
1622
1623void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1624 struct tp_tcp_stats *v6)
1625{
1626 struct adapter *adap = pci_get_drvdata(pdev);
1627
1628 spin_lock(&adap->stats_lock);
1629 t4_tp_get_tcp_stats(adap, v4, v6);
1630 spin_unlock(&adap->stats_lock);
1631}
1632EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1633
1634void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1635 const unsigned int *pgsz_order)
1636{
1637 struct adapter *adap = netdev2adap(dev);
1638
1639 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1640 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1641 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1642 HPZ3_V(pgsz_order[3]));
1643}
1644EXPORT_SYMBOL(cxgb4_iscsi_init);
1645
1646int cxgb4_flush_eq_cache(struct net_device *dev)
1647{
1648 struct adapter *adap = netdev2adap(dev);
1649
1650 return t4_sge_ctxt_flush(adap, adap->mbox);
1651}
1652EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1653
1654static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1655{
1656 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
1657 __be64 indices;
1658 int ret;
1659
1660 spin_lock(&adap->win0_lock);
1661 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
1662 sizeof(indices), (__be32 *)&indices,
1663 T4_MEMORY_READ);
1664 spin_unlock(&adap->win0_lock);
1665 if (!ret) {
1666 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
1667 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
1668 }
1669 return ret;
1670}
1671
1672int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
1673 u16 size)
1674{
1675 struct adapter *adap = netdev2adap(dev);
1676 u16 hw_pidx, hw_cidx;
1677 int ret;
1678
1679 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
1680 if (ret)
1681 goto out;
1682
1683 if (pidx != hw_pidx) {
1684 u16 delta;
1685 u32 val;
1686
1687 if (pidx >= hw_pidx)
1688 delta = pidx - hw_pidx;
1689 else
1690 delta = size - hw_pidx + pidx;
1691
1692 if (is_t4(adap->params.chip))
1693 val = PIDX_V(delta);
1694 else
1695 val = PIDX_T5_V(delta);
1696 wmb();
1697 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1698 QID_V(qid) | val);
1699 }
1700out:
1701 return ret;
1702}
1703EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1704
1705int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1706{
1707 struct adapter *adap;
1708 u32 offset, memtype, memaddr;
1709 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
1710 u32 edc0_end, edc1_end, mc0_end, mc1_end;
1711 int ret;
1712
1713 adap = netdev2adap(dev);
1714
1715 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
1716
1717
1718
1719
1720
1721
1722
1723 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
1724 edc0_size = EDRAM0_SIZE_G(size) << 20;
1725 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
1726 edc1_size = EDRAM1_SIZE_G(size) << 20;
1727 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
1728 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
1729
1730 edc0_end = edc0_size;
1731 edc1_end = edc0_end + edc1_size;
1732 mc0_end = edc1_end + mc0_size;
1733
1734 if (offset < edc0_end) {
1735 memtype = MEM_EDC0;
1736 memaddr = offset;
1737 } else if (offset < edc1_end) {
1738 memtype = MEM_EDC1;
1739 memaddr = offset - edc0_end;
1740 } else {
1741 if (offset < mc0_end) {
1742 memtype = MEM_MC0;
1743 memaddr = offset - edc1_end;
1744 } else if (is_t5(adap->params.chip)) {
1745 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1746 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
1747 mc1_end = mc0_end + mc1_size;
1748 if (offset < mc1_end) {
1749 memtype = MEM_MC1;
1750 memaddr = offset - mc0_end;
1751 } else {
1752
1753 goto err;
1754 }
1755 } else {
1756
1757 goto err;
1758 }
1759 }
1760
1761 spin_lock(&adap->win0_lock);
1762 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
1763 spin_unlock(&adap->win0_lock);
1764 return ret;
1765
1766err:
1767 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
1768 stag, offset);
1769 return -EINVAL;
1770}
1771EXPORT_SYMBOL(cxgb4_read_tpte);
1772
1773u64 cxgb4_read_sge_timestamp(struct net_device *dev)
1774{
1775 u32 hi, lo;
1776 struct adapter *adap;
1777
1778 adap = netdev2adap(dev);
1779 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
1780 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
1781
1782 return ((u64)hi << 32) | (u64)lo;
1783}
1784EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
1785
1786int cxgb4_bar2_sge_qregs(struct net_device *dev,
1787 unsigned int qid,
1788 enum cxgb4_bar2_qtype qtype,
1789 int user,
1790 u64 *pbar2_qoffset,
1791 unsigned int *pbar2_qid)
1792{
1793 return t4_bar2_sge_qregs(netdev2adap(dev),
1794 qid,
1795 (qtype == CXGB4_BAR2_QTYPE_EGRESS
1796 ? T4_BAR2_QTYPE_EGRESS
1797 : T4_BAR2_QTYPE_INGRESS),
1798 user,
1799 pbar2_qoffset,
1800 pbar2_qid);
1801}
1802EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
1803
1804static struct pci_driver cxgb4_driver;
1805
1806static void check_neigh_update(struct neighbour *neigh)
1807{
1808 const struct device *parent;
1809 const struct net_device *netdev = neigh->dev;
1810
1811 if (netdev->priv_flags & IFF_802_1Q_VLAN)
1812 netdev = vlan_dev_real_dev(netdev);
1813 parent = netdev->dev.parent;
1814 if (parent && parent->driver == &cxgb4_driver.driver)
1815 t4_l2t_update(dev_get_drvdata(parent), neigh);
1816}
1817
1818static int netevent_cb(struct notifier_block *nb, unsigned long event,
1819 void *data)
1820{
1821 switch (event) {
1822 case NETEVENT_NEIGH_UPDATE:
1823 check_neigh_update(data);
1824 break;
1825 case NETEVENT_REDIRECT:
1826 default:
1827 break;
1828 }
1829 return 0;
1830}
1831
1832static bool netevent_registered;
1833static struct notifier_block cxgb4_netevent_nb = {
1834 .notifier_call = netevent_cb
1835};
1836
1837static void drain_db_fifo(struct adapter *adap, int usecs)
1838{
1839 u32 v1, v2, lp_count, hp_count;
1840
1841 do {
1842 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1843 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1844 if (is_t4(adap->params.chip)) {
1845 lp_count = LP_COUNT_G(v1);
1846 hp_count = HP_COUNT_G(v1);
1847 } else {
1848 lp_count = LP_COUNT_T5_G(v1);
1849 hp_count = HP_COUNT_T5_G(v2);
1850 }
1851
1852 if (lp_count == 0 && hp_count == 0)
1853 break;
1854 set_current_state(TASK_UNINTERRUPTIBLE);
1855 schedule_timeout(usecs_to_jiffies(usecs));
1856 } while (1);
1857}
1858
1859static void disable_txq_db(struct sge_txq *q)
1860{
1861 unsigned long flags;
1862
1863 spin_lock_irqsave(&q->db_lock, flags);
1864 q->db_disabled = 1;
1865 spin_unlock_irqrestore(&q->db_lock, flags);
1866}
1867
1868static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
1869{
1870 spin_lock_irq(&q->db_lock);
1871 if (q->db_pidx_inc) {
1872
1873
1874
1875 wmb();
1876 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1877 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
1878 q->db_pidx_inc = 0;
1879 }
1880 q->db_disabled = 0;
1881 spin_unlock_irq(&q->db_lock);
1882}
1883
1884static void disable_dbs(struct adapter *adap)
1885{
1886 int i;
1887
1888 for_each_ethrxq(&adap->sge, i)
1889 disable_txq_db(&adap->sge.ethtxq[i].q);
1890 if (is_offload(adap)) {
1891 struct sge_uld_txq_info *txq_info =
1892 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1893
1894 if (txq_info) {
1895 for_each_ofldtxq(&adap->sge, i) {
1896 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1897
1898 disable_txq_db(&txq->q);
1899 }
1900 }
1901 }
1902 for_each_port(adap, i)
1903 disable_txq_db(&adap->sge.ctrlq[i].q);
1904}
1905
1906static void enable_dbs(struct adapter *adap)
1907{
1908 int i;
1909
1910 for_each_ethrxq(&adap->sge, i)
1911 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
1912 if (is_offload(adap)) {
1913 struct sge_uld_txq_info *txq_info =
1914 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1915
1916 if (txq_info) {
1917 for_each_ofldtxq(&adap->sge, i) {
1918 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1919
1920 enable_txq_db(adap, &txq->q);
1921 }
1922 }
1923 }
1924 for_each_port(adap, i)
1925 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
1926}
1927
1928static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
1929{
1930 enum cxgb4_uld type = CXGB4_ULD_RDMA;
1931
1932 if (adap->uld && adap->uld[type].handle)
1933 adap->uld[type].control(adap->uld[type].handle, cmd);
1934}
1935
1936static void process_db_full(struct work_struct *work)
1937{
1938 struct adapter *adap;
1939
1940 adap = container_of(work, struct adapter, db_full_task);
1941
1942 drain_db_fifo(adap, dbfifo_drain_delay);
1943 enable_dbs(adap);
1944 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
1945 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1946 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1947 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
1948 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
1949 else
1950 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1951 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
1952}
1953
1954static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
1955{
1956 u16 hw_pidx, hw_cidx;
1957 int ret;
1958
1959 spin_lock_irq(&q->db_lock);
1960 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
1961 if (ret)
1962 goto out;
1963 if (q->db_pidx != hw_pidx) {
1964 u16 delta;
1965 u32 val;
1966
1967 if (q->db_pidx >= hw_pidx)
1968 delta = q->db_pidx - hw_pidx;
1969 else
1970 delta = q->size - hw_pidx + q->db_pidx;
1971
1972 if (is_t4(adap->params.chip))
1973 val = PIDX_V(delta);
1974 else
1975 val = PIDX_T5_V(delta);
1976 wmb();
1977 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1978 QID_V(q->cntxt_id) | val);
1979 }
1980out:
1981 q->db_disabled = 0;
1982 q->db_pidx_inc = 0;
1983 spin_unlock_irq(&q->db_lock);
1984 if (ret)
1985 CH_WARN(adap, "DB drop recovery failed.\n");
1986}
1987
1988static void recover_all_queues(struct adapter *adap)
1989{
1990 int i;
1991
1992 for_each_ethrxq(&adap->sge, i)
1993 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
1994 if (is_offload(adap)) {
1995 struct sge_uld_txq_info *txq_info =
1996 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1997 if (txq_info) {
1998 for_each_ofldtxq(&adap->sge, i) {
1999 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2000
2001 sync_txq_pidx(adap, &txq->q);
2002 }
2003 }
2004 }
2005 for_each_port(adap, i)
2006 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2007}
2008
2009static void process_db_drop(struct work_struct *work)
2010{
2011 struct adapter *adap;
2012
2013 adap = container_of(work, struct adapter, db_drop_task);
2014
2015 if (is_t4(adap->params.chip)) {
2016 drain_db_fifo(adap, dbfifo_drain_delay);
2017 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2018 drain_db_fifo(adap, dbfifo_drain_delay);
2019 recover_all_queues(adap);
2020 drain_db_fifo(adap, dbfifo_drain_delay);
2021 enable_dbs(adap);
2022 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2023 } else if (is_t5(adap->params.chip)) {
2024 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2025 u16 qid = (dropped_db >> 15) & 0x1ffff;
2026 u16 pidx_inc = dropped_db & 0x1fff;
2027 u64 bar2_qoffset;
2028 unsigned int bar2_qid;
2029 int ret;
2030
2031 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2032 0, &bar2_qoffset, &bar2_qid);
2033 if (ret)
2034 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2035 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2036 else
2037 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2038 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2039
2040
2041 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2042 }
2043
2044 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2045 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2046}
2047
2048void t4_db_full(struct adapter *adap)
2049{
2050 if (is_t4(adap->params.chip)) {
2051 disable_dbs(adap);
2052 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2053 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2054 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2055 queue_work(adap->workq, &adap->db_full_task);
2056 }
2057}
2058
2059void t4_db_dropped(struct adapter *adap)
2060{
2061 if (is_t4(adap->params.chip)) {
2062 disable_dbs(adap);
2063 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2064 }
2065 queue_work(adap->workq, &adap->db_drop_task);
2066}
2067
2068void t4_register_netevent_notifier(void)
2069{
2070 if (!netevent_registered) {
2071 register_netevent_notifier(&cxgb4_netevent_nb);
2072 netevent_registered = true;
2073 }
2074}
2075
2076static void detach_ulds(struct adapter *adap)
2077{
2078 unsigned int i;
2079
2080 mutex_lock(&uld_mutex);
2081 list_del(&adap->list_node);
2082 for (i = 0; i < CXGB4_ULD_MAX; i++)
2083 if (adap->uld && adap->uld[i].handle) {
2084 adap->uld[i].state_change(adap->uld[i].handle,
2085 CXGB4_STATE_DETACH);
2086 adap->uld[i].handle = NULL;
2087 }
2088 if (netevent_registered && list_empty(&adapter_list)) {
2089 unregister_netevent_notifier(&cxgb4_netevent_nb);
2090 netevent_registered = false;
2091 }
2092 mutex_unlock(&uld_mutex);
2093}
2094
2095static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2096{
2097 unsigned int i;
2098
2099 mutex_lock(&uld_mutex);
2100 for (i = 0; i < CXGB4_ULD_MAX; i++)
2101 if (adap->uld && adap->uld[i].handle)
2102 adap->uld[i].state_change(adap->uld[i].handle,
2103 new_state);
2104 mutex_unlock(&uld_mutex);
2105}
2106
2107#if IS_ENABLED(CONFIG_IPV6)
2108static int cxgb4_inet6addr_handler(struct notifier_block *this,
2109 unsigned long event, void *data)
2110{
2111 struct inet6_ifaddr *ifa = data;
2112 struct net_device *event_dev = ifa->idev->dev;
2113 const struct device *parent = NULL;
2114#if IS_ENABLED(CONFIG_BONDING)
2115 struct adapter *adap;
2116#endif
2117 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
2118 event_dev = vlan_dev_real_dev(event_dev);
2119#if IS_ENABLED(CONFIG_BONDING)
2120 if (event_dev->flags & IFF_MASTER) {
2121 list_for_each_entry(adap, &adapter_list, list_node) {
2122 switch (event) {
2123 case NETDEV_UP:
2124 cxgb4_clip_get(adap->port[0],
2125 (const u32 *)ifa, 1);
2126 break;
2127 case NETDEV_DOWN:
2128 cxgb4_clip_release(adap->port[0],
2129 (const u32 *)ifa, 1);
2130 break;
2131 default:
2132 break;
2133 }
2134 }
2135 return NOTIFY_OK;
2136 }
2137#endif
2138
2139 if (event_dev)
2140 parent = event_dev->dev.parent;
2141
2142 if (parent && parent->driver == &cxgb4_driver.driver) {
2143 switch (event) {
2144 case NETDEV_UP:
2145 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2146 break;
2147 case NETDEV_DOWN:
2148 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2149 break;
2150 default:
2151 break;
2152 }
2153 }
2154 return NOTIFY_OK;
2155}
2156
2157static bool inet6addr_registered;
2158static struct notifier_block cxgb4_inet6addr_notifier = {
2159 .notifier_call = cxgb4_inet6addr_handler
2160};
2161
2162static void update_clip(const struct adapter *adap)
2163{
2164 int i;
2165 struct net_device *dev;
2166 int ret;
2167
2168 rcu_read_lock();
2169
2170 for (i = 0; i < MAX_NPORTS; i++) {
2171 dev = adap->port[i];
2172 ret = 0;
2173
2174 if (dev)
2175 ret = cxgb4_update_root_dev_clip(dev);
2176
2177 if (ret < 0)
2178 break;
2179 }
2180 rcu_read_unlock();
2181}
2182#endif
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194static int cxgb_up(struct adapter *adap)
2195{
2196 int err;
2197
2198 mutex_lock(&uld_mutex);
2199 err = setup_sge_queues(adap);
2200 if (err)
2201 goto rel_lock;
2202 err = setup_rss(adap);
2203 if (err)
2204 goto freeq;
2205
2206 if (adap->flags & USING_MSIX) {
2207 name_msix_vecs(adap);
2208 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2209 adap->msix_info[0].desc, adap);
2210 if (err)
2211 goto irq_err;
2212 err = request_msix_queue_irqs(adap);
2213 if (err) {
2214 free_irq(adap->msix_info[0].vec, adap);
2215 goto irq_err;
2216 }
2217 } else {
2218 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2219 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2220 adap->port[0]->name, adap);
2221 if (err)
2222 goto irq_err;
2223 }
2224
2225 enable_rx(adap);
2226 t4_sge_start(adap);
2227 t4_intr_enable(adap);
2228 adap->flags |= FULL_INIT_DONE;
2229 mutex_unlock(&uld_mutex);
2230
2231 notify_ulds(adap, CXGB4_STATE_UP);
2232#if IS_ENABLED(CONFIG_IPV6)
2233 update_clip(adap);
2234#endif
2235
2236 INIT_LIST_HEAD(&adap->mac_hlist);
2237 return err;
2238
2239 irq_err:
2240 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2241 freeq:
2242 t4_free_sge_resources(adap);
2243 rel_lock:
2244 mutex_unlock(&uld_mutex);
2245 return err;
2246}
2247
2248static void cxgb_down(struct adapter *adapter)
2249{
2250 cancel_work_sync(&adapter->tid_release_task);
2251 cancel_work_sync(&adapter->db_full_task);
2252 cancel_work_sync(&adapter->db_drop_task);
2253 adapter->tid_release_task_busy = false;
2254 adapter->tid_release_head = NULL;
2255
2256 t4_sge_stop(adapter);
2257 t4_free_sge_resources(adapter);
2258 adapter->flags &= ~FULL_INIT_DONE;
2259}
2260
2261
2262
2263
2264static int cxgb_open(struct net_device *dev)
2265{
2266 int err;
2267 struct port_info *pi = netdev_priv(dev);
2268 struct adapter *adapter = pi->adapter;
2269
2270 netif_carrier_off(dev);
2271
2272 if (!(adapter->flags & FULL_INIT_DONE)) {
2273 err = cxgb_up(adapter);
2274 if (err < 0)
2275 return err;
2276 }
2277
2278
2279
2280
2281 err = t4_update_port_info(pi);
2282 if (err < 0)
2283 return err;
2284
2285 err = link_start(dev);
2286 if (!err)
2287 netif_tx_start_all_queues(dev);
2288 return err;
2289}
2290
2291static int cxgb_close(struct net_device *dev)
2292{
2293 struct port_info *pi = netdev_priv(dev);
2294 struct adapter *adapter = pi->adapter;
2295
2296 netif_tx_stop_all_queues(dev);
2297 netif_carrier_off(dev);
2298 return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
2299}
2300
2301int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2302 __be32 sip, __be16 sport, __be16 vlan,
2303 unsigned int queue, unsigned char port, unsigned char mask)
2304{
2305 int ret;
2306 struct filter_entry *f;
2307 struct adapter *adap;
2308 int i;
2309 u8 *val;
2310
2311 adap = netdev2adap(dev);
2312
2313
2314 stid -= adap->tids.sftid_base;
2315 stid += adap->tids.nftids;
2316
2317
2318
2319 f = &adap->tids.ftid_tab[stid];
2320 ret = writable_filter(f);
2321 if (ret)
2322 return ret;
2323
2324
2325
2326
2327 if (f->valid)
2328 clear_filter(adap, f);
2329
2330
2331 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2332 f->fs.val.lport = cpu_to_be16(sport);
2333 f->fs.mask.lport = ~0;
2334 val = (u8 *)&sip;
2335 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2336 for (i = 0; i < 4; i++) {
2337 f->fs.val.lip[i] = val[i];
2338 f->fs.mask.lip[i] = ~0;
2339 }
2340 if (adap->params.tp.vlan_pri_map & PORT_F) {
2341 f->fs.val.iport = port;
2342 f->fs.mask.iport = mask;
2343 }
2344 }
2345
2346 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2347 f->fs.val.proto = IPPROTO_TCP;
2348 f->fs.mask.proto = ~0;
2349 }
2350
2351 f->fs.dirsteer = 1;
2352 f->fs.iq = queue;
2353
2354 f->locked = 1;
2355 f->fs.rpttid = 1;
2356
2357
2358
2359
2360 f->tid = stid + adap->tids.ftid_base;
2361 ret = set_filter_wr(adap, stid);
2362 if (ret) {
2363 clear_filter(adap, f);
2364 return ret;
2365 }
2366
2367 return 0;
2368}
2369EXPORT_SYMBOL(cxgb4_create_server_filter);
2370
2371int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2372 unsigned int queue, bool ipv6)
2373{
2374 struct filter_entry *f;
2375 struct adapter *adap;
2376
2377 adap = netdev2adap(dev);
2378
2379
2380 stid -= adap->tids.sftid_base;
2381 stid += adap->tids.nftids;
2382
2383 f = &adap->tids.ftid_tab[stid];
2384
2385 f->locked = 0;
2386
2387 return delete_filter(adap, stid);
2388}
2389EXPORT_SYMBOL(cxgb4_remove_server_filter);
2390
2391static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2392 struct rtnl_link_stats64 *ns)
2393{
2394 struct port_stats stats;
2395 struct port_info *p = netdev_priv(dev);
2396 struct adapter *adapter = p->adapter;
2397
2398
2399
2400
2401
2402 spin_lock(&adapter->stats_lock);
2403 if (!netif_device_present(dev)) {
2404 spin_unlock(&adapter->stats_lock);
2405 return ns;
2406 }
2407 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2408 &p->stats_base);
2409 spin_unlock(&adapter->stats_lock);
2410
2411 ns->tx_bytes = stats.tx_octets;
2412 ns->tx_packets = stats.tx_frames;
2413 ns->rx_bytes = stats.rx_octets;
2414 ns->rx_packets = stats.rx_frames;
2415 ns->multicast = stats.rx_mcast_frames;
2416
2417
2418 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2419 stats.rx_runt;
2420 ns->rx_over_errors = 0;
2421 ns->rx_crc_errors = stats.rx_fcs_err;
2422 ns->rx_frame_errors = stats.rx_symbol_err;
2423 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2424 stats.rx_ovflow2 + stats.rx_ovflow3 +
2425 stats.rx_trunc0 + stats.rx_trunc1 +
2426 stats.rx_trunc2 + stats.rx_trunc3;
2427 ns->rx_missed_errors = 0;
2428
2429
2430 ns->tx_aborted_errors = 0;
2431 ns->tx_carrier_errors = 0;
2432 ns->tx_fifo_errors = 0;
2433 ns->tx_heartbeat_errors = 0;
2434 ns->tx_window_errors = 0;
2435
2436 ns->tx_errors = stats.tx_error_frames;
2437 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2438 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2439 return ns;
2440}
2441
2442static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2443{
2444 unsigned int mbox;
2445 int ret = 0, prtad, devad;
2446 struct port_info *pi = netdev_priv(dev);
2447 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2448
2449 switch (cmd) {
2450 case SIOCGMIIPHY:
2451 if (pi->mdio_addr < 0)
2452 return -EOPNOTSUPP;
2453 data->phy_id = pi->mdio_addr;
2454 break;
2455 case SIOCGMIIREG:
2456 case SIOCSMIIREG:
2457 if (mdio_phy_id_is_c45(data->phy_id)) {
2458 prtad = mdio_phy_id_prtad(data->phy_id);
2459 devad = mdio_phy_id_devad(data->phy_id);
2460 } else if (data->phy_id < 32) {
2461 prtad = data->phy_id;
2462 devad = 0;
2463 data->reg_num &= 0x1f;
2464 } else
2465 return -EINVAL;
2466
2467 mbox = pi->adapter->pf;
2468 if (cmd == SIOCGMIIREG)
2469 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2470 data->reg_num, &data->val_out);
2471 else
2472 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2473 data->reg_num, data->val_in);
2474 break;
2475 case SIOCGHWTSTAMP:
2476 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2477 sizeof(pi->tstamp_config)) ?
2478 -EFAULT : 0;
2479 case SIOCSHWTSTAMP:
2480 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2481 sizeof(pi->tstamp_config)))
2482 return -EFAULT;
2483
2484 switch (pi->tstamp_config.rx_filter) {
2485 case HWTSTAMP_FILTER_NONE:
2486 pi->rxtstamp = false;
2487 break;
2488 case HWTSTAMP_FILTER_ALL:
2489 pi->rxtstamp = true;
2490 break;
2491 default:
2492 pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2493 return -ERANGE;
2494 }
2495
2496 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2497 sizeof(pi->tstamp_config)) ?
2498 -EFAULT : 0;
2499 default:
2500 return -EOPNOTSUPP;
2501 }
2502 return ret;
2503}
2504
2505static void cxgb_set_rxmode(struct net_device *dev)
2506{
2507
2508 set_rxmode(dev, -1, false);
2509}
2510
2511static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2512{
2513 int ret;
2514 struct port_info *pi = netdev_priv(dev);
2515
2516 if (new_mtu < 81 || new_mtu > MAX_MTU)
2517 return -EINVAL;
2518 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
2519 -1, -1, -1, true);
2520 if (!ret)
2521 dev->mtu = new_mtu;
2522 return ret;
2523}
2524
2525#ifdef CONFIG_PCI_IOV
2526static int dummy_open(struct net_device *dev)
2527{
2528
2529
2530
2531 netif_carrier_off(dev);
2532 return 0;
2533}
2534
2535
2536static void fill_vf_station_mac_addr(struct adapter *adap)
2537{
2538 unsigned int i;
2539 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
2540 int err;
2541 u8 *na;
2542 u16 a, b;
2543
2544 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
2545 if (!err) {
2546 na = adap->params.vpd.na;
2547 for (i = 0; i < ETH_ALEN; i++)
2548 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
2549 hex2val(na[2 * i + 1]));
2550 a = (hw_addr[0] << 8) | hw_addr[1];
2551 b = (hw_addr[1] << 8) | hw_addr[2];
2552 a ^= b;
2553 a |= 0x0200;
2554 a &= ~0x0100;
2555 macaddr[0] = a >> 8;
2556 macaddr[1] = a & 0xff;
2557
2558 for (i = 2; i < 5; i++)
2559 macaddr[i] = hw_addr[i + 1];
2560
2561 for (i = 0; i < adap->num_vfs; i++) {
2562 macaddr[5] = adap->pf * 16 + i;
2563 ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
2564 }
2565 }
2566}
2567
2568static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2569{
2570 struct port_info *pi = netdev_priv(dev);
2571 struct adapter *adap = pi->adapter;
2572 int ret;
2573
2574
2575 if (!is_valid_ether_addr(mac)) {
2576 dev_err(pi->adapter->pdev_dev,
2577 "Invalid Ethernet address %pM for VF %d\n",
2578 mac, vf);
2579 return -EINVAL;
2580 }
2581
2582 dev_info(pi->adapter->pdev_dev,
2583 "Setting MAC %pM on VF %d\n", mac, vf);
2584 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
2585 if (!ret)
2586 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
2587 return ret;
2588}
2589
2590static int cxgb_get_phys_port_id(struct net_device *dev,
2591 struct netdev_phys_item_id *ppid)
2592{
2593 struct port_info *pi = netdev_priv(dev);
2594 unsigned int phy_port_id;
2595
2596 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
2597 ppid->id_len = sizeof(phy_port_id);
2598 memcpy(ppid->id, &phy_port_id, ppid->id_len);
2599 return 0;
2600}
2601
2602static int cxgb_get_vf_config(struct net_device *dev,
2603 int vf, struct ifla_vf_info *ivi)
2604{
2605 struct port_info *pi = netdev_priv(dev);
2606 struct adapter *adap = pi->adapter;
2607
2608 if (vf >= adap->num_vfs)
2609 return -EINVAL;
2610 ivi->vf = vf;
2611 ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
2612 return 0;
2613}
2614#endif
2615
2616static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2617{
2618 int ret;
2619 struct sockaddr *addr = p;
2620 struct port_info *pi = netdev_priv(dev);
2621
2622 if (!is_valid_ether_addr(addr->sa_data))
2623 return -EADDRNOTAVAIL;
2624
2625 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
2626 pi->xact_addr_filt, addr->sa_data, true, true);
2627 if (ret < 0)
2628 return ret;
2629
2630 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2631 pi->xact_addr_filt = ret;
2632 return 0;
2633}
2634
2635#ifdef CONFIG_NET_POLL_CONTROLLER
2636static void cxgb_netpoll(struct net_device *dev)
2637{
2638 struct port_info *pi = netdev_priv(dev);
2639 struct adapter *adap = pi->adapter;
2640
2641 if (adap->flags & USING_MSIX) {
2642 int i;
2643 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2644
2645 for (i = pi->nqsets; i; i--, rx++)
2646 t4_sge_intr_msix(0, &rx->rspq);
2647 } else
2648 t4_intr_handler(adap)(0, adap);
2649}
2650#endif
2651
2652static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2653{
2654 struct port_info *pi = netdev_priv(dev);
2655 struct adapter *adap = pi->adapter;
2656 struct sched_class *e;
2657 struct ch_sched_params p;
2658 struct ch_sched_queue qe;
2659 u32 req_rate;
2660 int err = 0;
2661
2662 if (!can_sched(dev))
2663 return -ENOTSUPP;
2664
2665 if (index < 0 || index > pi->nqsets - 1)
2666 return -EINVAL;
2667
2668 if (!(adap->flags & FULL_INIT_DONE)) {
2669 dev_err(adap->pdev_dev,
2670 "Failed to rate limit on queue %d. Link Down?\n",
2671 index);
2672 return -EINVAL;
2673 }
2674
2675
2676 req_rate = rate << 10;
2677
2678
2679 if (req_rate >= SCHED_MAX_RATE_KBPS) {
2680 dev_err(adap->pdev_dev,
2681 "Invalid rate %u Mbps, Max rate is %u Gbps\n",
2682 rate, SCHED_MAX_RATE_KBPS);
2683 return -ERANGE;
2684 }
2685
2686
2687 memset(&qe, 0, sizeof(qe));
2688 qe.queue = index;
2689 qe.class = SCHED_CLS_NONE;
2690
2691 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
2692 if (err) {
2693 dev_err(adap->pdev_dev,
2694 "Unbinding Queue %d on port %d fail. Err: %d\n",
2695 index, pi->port_id, err);
2696 return err;
2697 }
2698
2699
2700 if (!req_rate)
2701 return 0;
2702
2703
2704 memset(&p, 0, sizeof(p));
2705 p.type = SCHED_CLASS_TYPE_PACKET;
2706 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
2707 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
2708 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
2709 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
2710 p.u.params.channel = pi->tx_chan;
2711 p.u.params.class = SCHED_CLS_NONE;
2712 p.u.params.minrate = 0;
2713 p.u.params.maxrate = req_rate;
2714 p.u.params.weight = 0;
2715 p.u.params.pktsize = dev->mtu;
2716
2717 e = cxgb4_sched_class_alloc(dev, &p);
2718 if (!e)
2719 return -ENOMEM;
2720
2721
2722 memset(&qe, 0, sizeof(qe));
2723 qe.queue = index;
2724 qe.class = e->idx;
2725
2726 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
2727 if (err)
2728 dev_err(adap->pdev_dev,
2729 "Queue rate limiting failed. Err: %d\n", err);
2730 return err;
2731}
2732
2733int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
2734 struct tc_to_netdev *tc)
2735{
2736 struct port_info *pi = netdev2pinfo(dev);
2737 struct adapter *adap = netdev2adap(dev);
2738
2739 if (!(adap->flags & FULL_INIT_DONE)) {
2740 dev_err(adap->pdev_dev,
2741 "Failed to setup tc on port %d. Link Down?\n",
2742 pi->port_id);
2743 return -EINVAL;
2744 }
2745
2746 if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
2747 tc->type == TC_SETUP_CLSU32) {
2748 switch (tc->cls_u32->command) {
2749 case TC_CLSU32_NEW_KNODE:
2750 case TC_CLSU32_REPLACE_KNODE:
2751 return cxgb4_config_knode(dev, proto, tc->cls_u32);
2752 case TC_CLSU32_DELETE_KNODE:
2753 return cxgb4_delete_knode(dev, proto, tc->cls_u32);
2754 default:
2755 return -EOPNOTSUPP;
2756 }
2757 }
2758
2759 return -EOPNOTSUPP;
2760}
2761
2762static const struct net_device_ops cxgb4_netdev_ops = {
2763 .ndo_size = sizeof(struct net_device_ops),
2764 .ndo_open = cxgb_open,
2765 .ndo_stop = cxgb_close,
2766 .ndo_start_xmit = t4_eth_xmit,
2767 .ndo_select_queue = cxgb_select_queue,
2768 .ndo_get_stats64 = cxgb_get_stats,
2769 .ndo_set_rx_mode = cxgb_set_rxmode,
2770 .ndo_set_mac_address = cxgb_set_mac_addr,
2771 .ndo_set_features = cxgb_set_features,
2772 .ndo_validate_addr = eth_validate_addr,
2773 .ndo_do_ioctl = cxgb_ioctl,
2774 .ndo_change_mtu = cxgb_change_mtu,
2775#ifdef CONFIG_NET_POLL_CONTROLLER
2776 .ndo_poll_controller = cxgb_netpoll,
2777#endif
2778#ifdef CONFIG_NET_RX_BUSY_POLL
2779 .ndo_busy_poll = cxgb_busy_poll,
2780#endif
2781 .extended.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
2782 .ndo_setup_tc = cxgb_setup_tc,
2783};
2784
2785#ifdef CONFIG_PCI_IOV
2786static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
2787 .ndo_open = dummy_open,
2788 .ndo_set_vf_mac = cxgb_set_vf_mac,
2789 .ndo_get_vf_config = cxgb_get_vf_config,
2790 .ndo_get_phys_port_id = cxgb_get_phys_port_id,
2791};
2792#endif
2793
2794static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2795{
2796 struct adapter *adapter = netdev2adap(dev);
2797
2798 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
2799 strlcpy(info->version, cxgb4_driver_version,
2800 sizeof(info->version));
2801 strlcpy(info->bus_info, pci_name(adapter->pdev),
2802 sizeof(info->bus_info));
2803}
2804
2805static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
2806 .get_drvinfo = get_drvinfo,
2807};
2808
2809void t4_fatal_err(struct adapter *adap)
2810{
2811 int port;
2812
2813
2814
2815
2816 t4_shutdown_adapter(adap);
2817 for_each_port(adap, port) {
2818 struct net_device *dev = adap->port[port];
2819
2820
2821
2822
2823 if (!dev)
2824 continue;
2825
2826 netif_tx_stop_all_queues(dev);
2827 netif_carrier_off(dev);
2828 }
2829 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2830}
2831
2832static void setup_memwin(struct adapter *adap)
2833{
2834 u32 nic_win_base = t4_get_util_window(adap);
2835
2836 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
2837}
2838
2839static void setup_memwin_rdma(struct adapter *adap)
2840{
2841 if (adap->vres.ocq.size) {
2842 u32 start;
2843 unsigned int sz_kb;
2844
2845 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
2846 start &= PCI_BASE_ADDRESS_MEM_MASK;
2847 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2848 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2849 t4_write_reg(adap,
2850 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
2851 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
2852 t4_write_reg(adap,
2853 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
2854 adap->vres.ocq.start);
2855 t4_read_reg(adap,
2856 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
2857 }
2858}
2859
2860static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2861{
2862 u32 v;
2863 int ret;
2864
2865
2866 memset(c, 0, sizeof(*c));
2867 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
2868 FW_CMD_REQUEST_F | FW_CMD_READ_F);
2869 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
2870 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
2871 if (ret < 0)
2872 return ret;
2873
2874 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
2875 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
2876 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
2877 if (ret < 0)
2878 return ret;
2879
2880 ret = t4_config_glbl_rss(adap, adap->pf,
2881 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2882 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
2883 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
2884 if (ret < 0)
2885 return ret;
2886
2887 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
2888 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
2889 FW_CMD_CAP_PF);
2890 if (ret < 0)
2891 return ret;
2892
2893 t4_sge_init(adap);
2894
2895
2896 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
2897 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
2898 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
2899 v = t4_read_reg(adap, TP_PIO_DATA_A);
2900 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
2901
2902
2903 adap->params.tp.tx_modq_map = 0xE4;
2904 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
2905 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
2906
2907
2908 v = 0x84218421;
2909 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2910 &v, 1, TP_TX_SCHED_HDR_A);
2911 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2912 &v, 1, TP_TX_SCHED_FIFO_A);
2913 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
2914 &v, 1, TP_TX_SCHED_PCMD_A);
2915
2916#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16
2917 if (is_offload(adap)) {
2918 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
2919 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2920 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2921 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2922 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
2923 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
2924 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2925 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2926 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
2927 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
2928 }
2929
2930
2931 return t4_early_init(adap, adap->pf);
2932}
2933
2934
2935
2936
2937#define MAX_ATIDS 8192U
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955static int adap_init0_tweaks(struct adapter *adapter)
2956{
2957
2958
2959
2960
2961
2962 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
2963
2964
2965
2966
2967 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
2968 dev_err(&adapter->pdev->dev,
2969 "Ignoring illegal rx_dma_offset=%d, using 2\n",
2970 rx_dma_offset);
2971 rx_dma_offset = 2;
2972 }
2973 t4_set_reg_field(adapter, SGE_CONTROL_A,
2974 PKTSHIFT_V(PKTSHIFT_M),
2975 PKTSHIFT_V(rx_dma_offset));
2976
2977
2978
2979
2980
2981 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
2982 CSUM_HAS_PSEUDO_HDR_F, 0);
2983
2984 return 0;
2985}
2986
2987
2988
2989
2990
2991static int phy_aq1202_version(const u8 *phy_fw_data,
2992 size_t phy_fw_size)
2993{
2994 int offset;
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3006 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3007 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3008
3009 offset = le24(phy_fw_data + 0x8) << 12;
3010 offset = le24(phy_fw_data + offset + 0xa);
3011 return be16(phy_fw_data + offset + 0x27e);
3012
3013 #undef be16
3014 #undef le16
3015 #undef le24
3016}
3017
3018static struct info_10gbt_phy_fw {
3019 unsigned int phy_fw_id;
3020 char *phy_fw_file;
3021 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3022 int phy_flash;
3023} phy_info_array[] = {
3024 {
3025 PHY_AQ1202_DEVICEID,
3026 PHY_AQ1202_FIRMWARE,
3027 phy_aq1202_version,
3028 1,
3029 },
3030 {
3031 PHY_BCM84834_DEVICEID,
3032 PHY_BCM84834_FIRMWARE,
3033 NULL,
3034 0,
3035 },
3036 { 0, NULL, NULL },
3037};
3038
3039static struct info_10gbt_phy_fw *find_phy_info(int devid)
3040{
3041 int i;
3042
3043 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3044 if (phy_info_array[i].phy_fw_id == devid)
3045 return &phy_info_array[i];
3046 }
3047 return NULL;
3048}
3049
3050
3051
3052
3053
3054
3055static int adap_init0_phy(struct adapter *adap)
3056{
3057 const struct firmware *phyf;
3058 int ret;
3059 struct info_10gbt_phy_fw *phy_info;
3060
3061
3062
3063 phy_info = find_phy_info(adap->pdev->device);
3064 if (!phy_info) {
3065 dev_warn(adap->pdev_dev,
3066 "No PHY Firmware file found for this PHY\n");
3067 return -EOPNOTSUPP;
3068 }
3069
3070
3071
3072
3073
3074
3075 ret = request_firmware(&phyf, phy_info->phy_fw_file,
3076 adap->pdev_dev);
3077 if (ret < 0) {
3078
3079
3080
3081
3082
3083
3084 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3085 "/lib/firmware/%s, error %d\n",
3086 phy_info->phy_fw_file, -ret);
3087 if (phy_info->phy_flash) {
3088 int cur_phy_fw_ver = 0;
3089
3090 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3091 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3092 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3093 ret = 0;
3094 }
3095
3096 return ret;
3097 }
3098
3099
3100
3101 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3102 phy_info->phy_fw_version,
3103 (u8 *)phyf->data, phyf->size);
3104 if (ret < 0)
3105 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3106 -ret);
3107 else if (ret > 0) {
3108 int new_phy_fw_ver = 0;
3109
3110 if (phy_info->phy_fw_version)
3111 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3112 phyf->size);
3113 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3114 "Firmware /lib/firmware/%s, version %#x\n",
3115 phy_info->phy_fw_file, new_phy_fw_ver);
3116 }
3117
3118 release_firmware(phyf);
3119
3120 return ret;
3121}
3122
3123
3124
3125
3126static int adap_init0_config(struct adapter *adapter, int reset)
3127{
3128 struct fw_caps_config_cmd caps_cmd;
3129 const struct firmware *cf;
3130 unsigned long mtype = 0, maddr = 0;
3131 u32 finiver, finicsum, cfcsum;
3132 int ret;
3133 int config_issued = 0;
3134 char *fw_config_file, fw_config_file_path[256];
3135 char *config_name = NULL;
3136
3137
3138
3139
3140 if (reset) {
3141 ret = t4_fw_reset(adapter, adapter->mbox,
3142 PIORSTMODE_F | PIORST_F);
3143 if (ret < 0)
3144 goto bye;
3145 }
3146
3147
3148
3149
3150
3151
3152 if (is_10gbt_device(adapter->pdev->device)) {
3153 ret = adap_init0_phy(adapter);
3154 if (ret < 0)
3155 goto bye;
3156 }
3157
3158
3159
3160
3161
3162 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3163 case CHELSIO_T4:
3164 fw_config_file = FW4_CFNAME;
3165 break;
3166 case CHELSIO_T5:
3167 fw_config_file = FW5_CFNAME;
3168 break;
3169 case CHELSIO_T6:
3170 fw_config_file = FW6_CFNAME;
3171 break;
3172 default:
3173 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3174 adapter->pdev->device);
3175 ret = -EINVAL;
3176 goto bye;
3177 }
3178
3179 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3180 if (ret < 0) {
3181 config_name = "On FLASH";
3182 mtype = FW_MEMTYPE_CF_FLASH;
3183 maddr = t4_flash_cfg_addr(adapter);
3184 } else {
3185 u32 params[7], val[7];
3186
3187 sprintf(fw_config_file_path,
3188 "/lib/firmware/%s", fw_config_file);
3189 config_name = fw_config_file_path;
3190
3191 if (cf->size >= FLASH_CFG_MAX_SIZE)
3192 ret = -ENOMEM;
3193 else {
3194 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3195 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3196 ret = t4_query_params(adapter, adapter->mbox,
3197 adapter->pf, 0, 1, params, val);
3198 if (ret == 0) {
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209 size_t resid = cf->size & 0x3;
3210 size_t size = cf->size & ~0x3;
3211 __be32 *data = (__be32 *)cf->data;
3212
3213 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3214 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3215
3216 spin_lock(&adapter->win0_lock);
3217 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3218 size, data, T4_MEMORY_WRITE);
3219 if (ret == 0 && resid != 0) {
3220 union {
3221 __be32 word;
3222 char buf[4];
3223 } last;
3224 int i;
3225
3226 last.word = data[size >> 2];
3227 for (i = resid; i < 4; i++)
3228 last.buf[i] = 0;
3229 ret = t4_memory_rw(adapter, 0, mtype,
3230 maddr + size,
3231 4, &last.word,
3232 T4_MEMORY_WRITE);
3233 }
3234 spin_unlock(&adapter->win0_lock);
3235 }
3236 }
3237
3238 release_firmware(cf);
3239 if (ret)
3240 goto bye;
3241 }
3242
3243
3244
3245
3246
3247
3248
3249 memset(&caps_cmd, 0, sizeof(caps_cmd));
3250 caps_cmd.op_to_write =
3251 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3252 FW_CMD_REQUEST_F |
3253 FW_CMD_READ_F);
3254 caps_cmd.cfvalid_to_len16 =
3255 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3256 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3257 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3258 FW_LEN16(caps_cmd));
3259 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3260 &caps_cmd);
3261
3262
3263
3264
3265
3266
3267
3268 if (ret == -ENOENT) {
3269 memset(&caps_cmd, 0, sizeof(caps_cmd));
3270 caps_cmd.op_to_write =
3271 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3272 FW_CMD_REQUEST_F |
3273 FW_CMD_READ_F);
3274 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3275 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3276 sizeof(caps_cmd), &caps_cmd);
3277 config_name = "Firmware Default";
3278 }
3279
3280 config_issued = 1;
3281 if (ret < 0)
3282 goto bye;
3283
3284 finiver = ntohl(caps_cmd.finiver);
3285 finicsum = ntohl(caps_cmd.finicsum);
3286 cfcsum = ntohl(caps_cmd.cfcsum);
3287 if (finicsum != cfcsum)
3288 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3289 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3290 finicsum, cfcsum);
3291
3292
3293
3294
3295 caps_cmd.op_to_write =
3296 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3297 FW_CMD_REQUEST_F |
3298 FW_CMD_WRITE_F);
3299 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3300 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3301 NULL);
3302 if (ret < 0)
3303 goto bye;
3304
3305
3306
3307
3308
3309 ret = adap_init0_tweaks(adapter);
3310 if (ret < 0)
3311 goto bye;
3312
3313
3314
3315
3316
3317 ret = t4_fw_initialize(adapter, adapter->mbox);
3318 if (ret < 0)
3319 goto bye;
3320
3321
3322
3323
3324 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3325 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3326 config_name, finiver, cfcsum);
3327 return 0;
3328
3329
3330
3331
3332
3333
3334bye:
3335 if (config_issued && ret != -ENOENT)
3336 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
3337 config_name, -ret);
3338 return ret;
3339}
3340
3341static struct fw_info fw_info_array[] = {
3342 {
3343 .chip = CHELSIO_T4,
3344 .fs_name = FW4_CFNAME,
3345 .fw_mod_name = FW4_FNAME,
3346 .fw_hdr = {
3347 .chip = FW_HDR_CHIP_T4,
3348 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
3349 .intfver_nic = FW_INTFVER(T4, NIC),
3350 .intfver_vnic = FW_INTFVER(T4, VNIC),
3351 .intfver_ri = FW_INTFVER(T4, RI),
3352 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3353 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3354 },
3355 }, {
3356 .chip = CHELSIO_T5,
3357 .fs_name = FW5_CFNAME,
3358 .fw_mod_name = FW5_FNAME,
3359 .fw_hdr = {
3360 .chip = FW_HDR_CHIP_T5,
3361 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
3362 .intfver_nic = FW_INTFVER(T5, NIC),
3363 .intfver_vnic = FW_INTFVER(T5, VNIC),
3364 .intfver_ri = FW_INTFVER(T5, RI),
3365 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3366 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3367 },
3368 }, {
3369 .chip = CHELSIO_T6,
3370 .fs_name = FW6_CFNAME,
3371 .fw_mod_name = FW6_FNAME,
3372 .fw_hdr = {
3373 .chip = FW_HDR_CHIP_T6,
3374 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
3375 .intfver_nic = FW_INTFVER(T6, NIC),
3376 .intfver_vnic = FW_INTFVER(T6, VNIC),
3377 .intfver_ofld = FW_INTFVER(T6, OFLD),
3378 .intfver_ri = FW_INTFVER(T6, RI),
3379 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3380 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3381 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3382 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3383 },
3384 }
3385
3386};
3387
3388static struct fw_info *find_fw_info(int chip)
3389{
3390 int i;
3391
3392 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
3393 if (fw_info_array[i].chip == chip)
3394 return &fw_info_array[i];
3395 }
3396 return NULL;
3397}
3398
3399
3400
3401
3402static int adap_init0(struct adapter *adap)
3403{
3404 int ret;
3405 u32 v, port_vec;
3406 enum dev_state state;
3407 u32 params[7], val[7];
3408 struct fw_caps_config_cmd caps_cmd;
3409 int reset = 1;
3410
3411
3412
3413
3414 ret = t4_init_devlog_params(adap);
3415 if (ret < 0)
3416 return ret;
3417
3418
3419 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
3420 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
3421 if (ret < 0) {
3422 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3423 ret);
3424 return ret;
3425 }
3426 if (ret == adap->mbox)
3427 adap->flags |= MASTER_PF;
3428
3429
3430
3431
3432
3433
3434
3435
3436 t4_get_fw_version(adap, &adap->params.fw_vers);
3437 t4_get_bs_version(adap, &adap->params.bs_vers);
3438 t4_get_tp_version(adap, &adap->params.tp_vers);
3439 t4_get_exprom_version(adap, &adap->params.er_vers);
3440
3441 ret = t4_check_fw_version(adap);
3442
3443 if (ret)
3444 state = DEV_STATE_UNINIT;
3445 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3446 struct fw_info *fw_info;
3447 struct fw_hdr *card_fw;
3448 const struct firmware *fw;
3449 const u8 *fw_data = NULL;
3450 unsigned int fw_size = 0;
3451
3452
3453
3454
3455 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
3456 if (fw_info == NULL) {
3457 dev_err(adap->pdev_dev,
3458 "unable to get firmware info for chip %d.\n",
3459 CHELSIO_CHIP_VERSION(adap->params.chip));
3460 return -EINVAL;
3461 }
3462
3463
3464
3465
3466 card_fw = t4_alloc_mem(sizeof(*card_fw));
3467
3468
3469 ret = request_firmware(&fw, fw_info->fw_mod_name,
3470 adap->pdev_dev);
3471 if (ret < 0) {
3472 dev_err(adap->pdev_dev,
3473 "unable to load firmware image %s, error %d\n",
3474 fw_info->fw_mod_name, ret);
3475 } else {
3476 fw_data = fw->data;
3477 fw_size = fw->size;
3478 }
3479
3480
3481 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
3482 state, &reset);
3483
3484
3485 release_firmware(fw);
3486 t4_free_mem(card_fw);
3487
3488 if (ret < 0)
3489 goto bye;
3490 }
3491
3492
3493
3494
3495
3496
3497
3498
3499 ret = t4_get_vpd_params(adap, &adap->params.vpd);
3500 if (ret < 0)
3501 goto bye;
3502
3503
3504
3505
3506
3507
3508 v =
3509 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3510 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
3511 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
3512 if (ret < 0)
3513 goto bye;
3514
3515 adap->params.nports = hweight32(port_vec);
3516 adap->params.portvec = port_vec;
3517
3518
3519
3520
3521 if (state == DEV_STATE_INIT) {
3522 dev_info(adap->pdev_dev, "Coming up as %s: "\
3523 "Adapter already initialized\n",
3524 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3525 } else {
3526 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3527 "Initializing adapter\n");
3528
3529
3530
3531
3532 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3533 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3534 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3535 params, val);
3536
3537
3538
3539
3540 if (ret < 0) {
3541 dev_err(adap->pdev_dev, "firmware doesn't support "
3542 "Firmware Configuration Files\n");
3543 goto bye;
3544 }
3545
3546
3547
3548
3549
3550 ret = adap_init0_config(adap, reset);
3551 if (ret == -ENOENT) {
3552 dev_err(adap->pdev_dev, "no Configuration File "
3553 "present on adapter.\n");
3554 goto bye;
3555 }
3556 if (ret < 0) {
3557 dev_err(adap->pdev_dev, "could not initialize "
3558 "adapter, error %d\n", -ret);
3559 goto bye;
3560 }
3561 }
3562
3563
3564
3565
3566
3567 ret = t4_sge_init(adap);
3568 if (ret < 0)
3569 goto bye;
3570
3571 if (is_bypass_device(adap->pdev->device))
3572 adap->params.bypass = 1;
3573
3574
3575
3576
3577#define FW_PARAM_DEV(param) \
3578 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3579 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3580
3581#define FW_PARAM_PFVF(param) \
3582 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3583 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3584 FW_PARAMS_PARAM_Y_V(0) | \
3585 FW_PARAMS_PARAM_Z_V(0)
3586
3587 params[0] = FW_PARAM_PFVF(EQ_START);
3588 params[1] = FW_PARAM_PFVF(L2T_START);
3589 params[2] = FW_PARAM_PFVF(L2T_END);
3590 params[3] = FW_PARAM_PFVF(FILTER_START);
3591 params[4] = FW_PARAM_PFVF(FILTER_END);
3592 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3593 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
3594 if (ret < 0)
3595 goto bye;
3596 adap->sge.egr_start = val[0];
3597 adap->l2t_start = val[1];
3598 adap->l2t_end = val[2];
3599 adap->tids.ftid_base = val[3];
3600 adap->tids.nftids = val[4] - val[3] + 1;
3601 adap->sge.ingr_start = val[5];
3602
3603
3604
3605
3606
3607
3608
3609 params[0] = FW_PARAM_PFVF(EQ_END);
3610 params[1] = FW_PARAM_PFVF(IQFLINT_END);
3611 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3612 if (ret < 0)
3613 goto bye;
3614 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3615 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3616
3617 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3618 sizeof(*adap->sge.egr_map), GFP_KERNEL);
3619 if (!adap->sge.egr_map) {
3620 ret = -ENOMEM;
3621 goto bye;
3622 }
3623
3624 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3625 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3626 if (!adap->sge.ingr_map) {
3627 ret = -ENOMEM;
3628 goto bye;
3629 }
3630
3631
3632
3633
3634 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3635 sizeof(long), GFP_KERNEL);
3636 if (!adap->sge.starving_fl) {
3637 ret = -ENOMEM;
3638 goto bye;
3639 }
3640
3641 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3642 sizeof(long), GFP_KERNEL);
3643 if (!adap->sge.txq_maperr) {
3644 ret = -ENOMEM;
3645 goto bye;
3646 }
3647
3648#ifdef CONFIG_DEBUG_FS
3649 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3650 sizeof(long), GFP_KERNEL);
3651 if (!adap->sge.blocked_fl) {
3652 ret = -ENOMEM;
3653 goto bye;
3654 }
3655#endif
3656
3657 params[0] = FW_PARAM_PFVF(CLIP_START);
3658 params[1] = FW_PARAM_PFVF(CLIP_END);
3659 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3660 if (ret < 0)
3661 goto bye;
3662 adap->clipt_start = val[0];
3663 adap->clipt_end = val[1];
3664
3665
3666
3667
3668
3669 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
3670
3671
3672 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3673 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3674 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3675
3676
3677
3678 if ((val[0] != val[1]) && (ret >= 0)) {
3679 adap->flags |= FW_OFLD_CONN;
3680 adap->tids.aftid_base = val[0];
3681 adap->tids.aftid_end = val[1];
3682 }
3683
3684
3685
3686
3687
3688
3689 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3690 val[0] = 1;
3691 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
3692
3693
3694
3695
3696
3697
3698
3699 if (is_t4(adap->params.chip)) {
3700 adap->params.ulptx_memwrite_dsgl = false;
3701 } else {
3702 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
3703 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
3704 1, params, val);
3705 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
3706 }
3707
3708
3709 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
3710 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
3711 1, params, val);
3712 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
3713
3714
3715
3716
3717
3718 memset(&caps_cmd, 0, sizeof(caps_cmd));
3719 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3720 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3721 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3722 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3723 &caps_cmd);
3724 if (ret < 0)
3725 goto bye;
3726
3727 if (caps_cmd.ofldcaps) {
3728
3729 params[0] = FW_PARAM_DEV(NTID);
3730 params[1] = FW_PARAM_PFVF(SERVER_START);
3731 params[2] = FW_PARAM_PFVF(SERVER_END);
3732 params[3] = FW_PARAM_PFVF(TDDP_START);
3733 params[4] = FW_PARAM_PFVF(TDDP_END);
3734 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3735 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
3736 params, val);
3737 if (ret < 0)
3738 goto bye;
3739 adap->tids.ntids = val[0];
3740 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3741 adap->tids.stid_base = val[1];
3742 adap->tids.nstids = val[2] - val[1] + 1;
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
3753 adap->tids.sftid_base = adap->tids.ftid_base +
3754 DIV_ROUND_UP(adap->tids.nftids, 3);
3755 adap->tids.nsftids = adap->tids.nftids -
3756 DIV_ROUND_UP(adap->tids.nftids, 3);
3757 adap->tids.nftids = adap->tids.sftid_base -
3758 adap->tids.ftid_base;
3759 }
3760 adap->vres.ddp.start = val[3];
3761 adap->vres.ddp.size = val[4] - val[3] + 1;
3762 adap->params.ofldq_wr_cred = val[5];
3763
3764 adap->params.offload = 1;
3765 adap->num_ofld_uld += 1;
3766 }
3767 if (caps_cmd.rdmacaps) {
3768 params[0] = FW_PARAM_PFVF(STAG_START);
3769 params[1] = FW_PARAM_PFVF(STAG_END);
3770 params[2] = FW_PARAM_PFVF(RQ_START);
3771 params[3] = FW_PARAM_PFVF(RQ_END);
3772 params[4] = FW_PARAM_PFVF(PBL_START);
3773 params[5] = FW_PARAM_PFVF(PBL_END);
3774 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
3775 params, val);
3776 if (ret < 0)
3777 goto bye;
3778 adap->vres.stag.start = val[0];
3779 adap->vres.stag.size = val[1] - val[0] + 1;
3780 adap->vres.rq.start = val[2];
3781 adap->vres.rq.size = val[3] - val[2] + 1;
3782 adap->vres.pbl.start = val[4];
3783 adap->vres.pbl.size = val[5] - val[4] + 1;
3784
3785 params[0] = FW_PARAM_PFVF(SQRQ_START);
3786 params[1] = FW_PARAM_PFVF(SQRQ_END);
3787 params[2] = FW_PARAM_PFVF(CQ_START);
3788 params[3] = FW_PARAM_PFVF(CQ_END);
3789 params[4] = FW_PARAM_PFVF(OCQ_START);
3790 params[5] = FW_PARAM_PFVF(OCQ_END);
3791 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
3792 val);
3793 if (ret < 0)
3794 goto bye;
3795 adap->vres.qp.start = val[0];
3796 adap->vres.qp.size = val[1] - val[0] + 1;
3797 adap->vres.cq.start = val[2];
3798 adap->vres.cq.size = val[3] - val[2] + 1;
3799 adap->vres.ocq.start = val[4];
3800 adap->vres.ocq.size = val[5] - val[4] + 1;
3801
3802 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
3803 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
3804 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
3805 val);
3806 if (ret < 0) {
3807 adap->params.max_ordird_qp = 8;
3808 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
3809 ret = 0;
3810 } else {
3811 adap->params.max_ordird_qp = val[0];
3812 adap->params.max_ird_adapter = val[1];
3813 }
3814 dev_info(adap->pdev_dev,
3815 "max_ordird_qp %d max_ird_adapter %d\n",
3816 adap->params.max_ordird_qp,
3817 adap->params.max_ird_adapter);
3818 adap->num_ofld_uld += 2;
3819 }
3820 if (caps_cmd.iscsicaps) {
3821 params[0] = FW_PARAM_PFVF(ISCSI_START);
3822 params[1] = FW_PARAM_PFVF(ISCSI_END);
3823 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
3824 params, val);
3825 if (ret < 0)
3826 goto bye;
3827 adap->vres.iscsi.start = val[0];
3828 adap->vres.iscsi.size = val[1] - val[0] + 1;
3829
3830 adap->num_ofld_uld += 2;
3831 }
3832 if (caps_cmd.cryptocaps) {
3833
3834 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
3835 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
3836 params, val);
3837 if (ret < 0) {
3838 if (ret != -EINVAL)
3839 goto bye;
3840 } else {
3841 adap->vres.ncrypto_fc = val[0];
3842 }
3843 adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
3844 adap->num_uld += 1;
3845 }
3846#undef FW_PARAM_PFVF
3847#undef FW_PARAM_DEV
3848
3849
3850
3851
3852
3853
3854 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3855 if (state != DEV_STATE_INIT) {
3856 int i;
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875 for (i = 0; i < NMTUS; i++)
3876 if (adap->params.mtus[i] == 1492) {
3877 adap->params.mtus[i] = 1488;
3878 break;
3879 }
3880
3881 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3882 adap->params.b_wnd);
3883 }
3884 t4_init_sge_params(adap);
3885 adap->flags |= FW_OK;
3886 t4_init_tp_params(adap);
3887 return 0;
3888
3889
3890
3891
3892
3893
3894bye:
3895 kfree(adap->sge.egr_map);
3896 kfree(adap->sge.ingr_map);
3897 kfree(adap->sge.starving_fl);
3898 kfree(adap->sge.txq_maperr);
3899#ifdef CONFIG_DEBUG_FS
3900 kfree(adap->sge.blocked_fl);
3901#endif
3902 if (ret != -ETIMEDOUT && ret != -EIO)
3903 t4_fw_bye(adap, adap->mbox);
3904 return ret;
3905}
3906
3907
3908
3909static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3910 pci_channel_state_t state)
3911{
3912 int i;
3913 struct adapter *adap = pci_get_drvdata(pdev);
3914
3915 if (!adap)
3916 goto out;
3917
3918 rtnl_lock();
3919 adap->flags &= ~FW_OK;
3920 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3921 spin_lock(&adap->stats_lock);
3922 for_each_port(adap, i) {
3923 struct net_device *dev = adap->port[i];
3924
3925 netif_device_detach(dev);
3926 netif_carrier_off(dev);
3927 }
3928 spin_unlock(&adap->stats_lock);
3929 disable_interrupts(adap);
3930 if (adap->flags & FULL_INIT_DONE)
3931 cxgb_down(adap);
3932 rtnl_unlock();
3933 if ((adap->flags & DEV_ENABLED)) {
3934 pci_disable_device(pdev);
3935 adap->flags &= ~DEV_ENABLED;
3936 }
3937out: return state == pci_channel_io_perm_failure ?
3938 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3939}
3940
3941static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3942{
3943 int i, ret;
3944 struct fw_caps_config_cmd c;
3945 struct adapter *adap = pci_get_drvdata(pdev);
3946
3947 if (!adap) {
3948 pci_restore_state(pdev);
3949 pci_save_state(pdev);
3950 return PCI_ERS_RESULT_RECOVERED;
3951 }
3952
3953 if (!(adap->flags & DEV_ENABLED)) {
3954 if (pci_enable_device(pdev)) {
3955 dev_err(&pdev->dev, "Cannot reenable PCI "
3956 "device after reset\n");
3957 return PCI_ERS_RESULT_DISCONNECT;
3958 }
3959 adap->flags |= DEV_ENABLED;
3960 }
3961
3962 pci_set_master(pdev);
3963 pci_restore_state(pdev);
3964 pci_save_state(pdev);
3965 pci_cleanup_aer_uncorrect_error_status(pdev);
3966
3967 if (t4_wait_dev_ready(adap->regs) < 0)
3968 return PCI_ERS_RESULT_DISCONNECT;
3969 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
3970 return PCI_ERS_RESULT_DISCONNECT;
3971 adap->flags |= FW_OK;
3972 if (adap_init1(adap, &c))
3973 return PCI_ERS_RESULT_DISCONNECT;
3974
3975 for_each_port(adap, i) {
3976 struct port_info *p = adap2pinfo(adap, i);
3977
3978 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
3979 NULL, NULL);
3980 if (ret < 0)
3981 return PCI_ERS_RESULT_DISCONNECT;
3982 p->viid = ret;
3983 p->xact_addr_filt = -1;
3984 }
3985
3986 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3987 adap->params.b_wnd);
3988 setup_memwin(adap);
3989 if (cxgb_up(adap))
3990 return PCI_ERS_RESULT_DISCONNECT;
3991 return PCI_ERS_RESULT_RECOVERED;
3992}
3993
3994static void eeh_resume(struct pci_dev *pdev)
3995{
3996 int i;
3997 struct adapter *adap = pci_get_drvdata(pdev);
3998
3999 if (!adap)
4000 return;
4001
4002 rtnl_lock();
4003 for_each_port(adap, i) {
4004 struct net_device *dev = adap->port[i];
4005
4006 if (netif_running(dev)) {
4007 link_start(dev);
4008 cxgb_set_rxmode(dev);
4009 }
4010 netif_device_attach(dev);
4011 }
4012 rtnl_unlock();
4013}
4014
4015static const struct pci_error_handlers cxgb4_eeh = {
4016 .error_detected = eeh_err_detected,
4017 .slot_reset = eeh_slot_reset,
4018 .resume = eeh_resume,
4019};
4020
4021
4022
4023
4024static inline bool is_x_10g_port(const struct link_config *lc)
4025{
4026 unsigned int speeds, high_speeds;
4027
4028 speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
4029 high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
4030
4031 return high_speeds != 0;
4032}
4033
4034
4035
4036
4037
4038
4039static void cfg_queues(struct adapter *adap)
4040{
4041 struct sge *s = &adap->sge;
4042 int i = 0, n10g = 0, qidx = 0;
4043#ifndef CONFIG_CHELSIO_T4_DCB
4044 int q10g = 0;
4045#endif
4046
4047
4048
4049 if (is_kdump_kernel()) {
4050 adap->params.offload = 0;
4051 adap->params.crypto = 0;
4052 } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) {
4053 adap->params.offload = 0;
4054 adap->params.crypto = 0;
4055 }
4056
4057 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4058#ifdef CONFIG_CHELSIO_T4_DCB
4059
4060
4061
4062
4063 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4064 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4065 MAX_ETH_QSETS, adap->params.nports * 8);
4066 BUG_ON(1);
4067 }
4068
4069 for_each_port(adap, i) {
4070 struct port_info *pi = adap2pinfo(adap, i);
4071
4072 pi->first_qset = qidx;
4073 pi->nqsets = 8;
4074 qidx += pi->nqsets;
4075 }
4076#else
4077
4078
4079
4080
4081 if (n10g)
4082 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4083 if (q10g > netif_get_num_default_rss_queues())
4084 q10g = netif_get_num_default_rss_queues();
4085
4086 for_each_port(adap, i) {
4087 struct port_info *pi = adap2pinfo(adap, i);
4088
4089 pi->first_qset = qidx;
4090 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4091 qidx += pi->nqsets;
4092 }
4093#endif
4094
4095 s->ethqsets = qidx;
4096 s->max_ethqsets = qidx;
4097
4098 if (is_uld(adap)) {
4099
4100
4101
4102
4103
4104 if (n10g) {
4105 i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
4106 s->ofldqsets = roundup(i, adap->params.nports);
4107 } else {
4108 s->ofldqsets = adap->params.nports;
4109 }
4110 }
4111
4112 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4113 struct sge_eth_rxq *r = &s->ethrxq[i];
4114
4115 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4116 r->fl.size = 72;
4117 }
4118
4119 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4120 s->ethtxq[i].q.size = 1024;
4121
4122 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4123 s->ctrlq[i].q.size = 512;
4124
4125 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4126 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
4127}
4128
4129
4130
4131
4132
4133static void reduce_ethqs(struct adapter *adap, int n)
4134{
4135 int i;
4136 struct port_info *pi;
4137
4138 while (n < adap->sge.ethqsets)
4139 for_each_port(adap, i) {
4140 pi = adap2pinfo(adap, i);
4141 if (pi->nqsets > 1) {
4142 pi->nqsets--;
4143 adap->sge.ethqsets--;
4144 if (adap->sge.ethqsets <= n)
4145 break;
4146 }
4147 }
4148
4149 n = 0;
4150 for_each_port(adap, i) {
4151 pi = adap2pinfo(adap, i);
4152 pi->first_qset = n;
4153 n += pi->nqsets;
4154 }
4155}
4156
4157static int get_msix_info(struct adapter *adap)
4158{
4159 struct uld_msix_info *msix_info;
4160 unsigned int max_ingq = 0;
4161
4162 if (is_offload(adap))
4163 max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
4164 if (is_pci_uld(adap))
4165 max_ingq += MAX_OFLD_QSETS * adap->num_uld;
4166
4167 if (!max_ingq)
4168 goto out;
4169
4170 msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
4171 if (!msix_info)
4172 return -ENOMEM;
4173
4174 adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
4175 sizeof(long), GFP_KERNEL);
4176 if (!adap->msix_bmap_ulds.msix_bmap) {
4177 kfree(msix_info);
4178 return -ENOMEM;
4179 }
4180 spin_lock_init(&adap->msix_bmap_ulds.lock);
4181 adap->msix_info_ulds = msix_info;
4182out:
4183 return 0;
4184}
4185
4186static void free_msix_info(struct adapter *adap)
4187{
4188 if (!(adap->num_uld && adap->num_ofld_uld))
4189 return;
4190
4191 kfree(adap->msix_info_ulds);
4192 kfree(adap->msix_bmap_ulds.msix_bmap);
4193}
4194
4195
4196#define EXTRA_VECS 2
4197
4198static int enable_msix(struct adapter *adap)
4199{
4200 int ofld_need = 0, uld_need = 0;
4201 int i, j, want, need, allocated;
4202 struct sge *s = &adap->sge;
4203 unsigned int nchan = adap->params.nports;
4204 struct msix_entry *entries;
4205 int max_ingq = MAX_INGQ;
4206
4207 if (is_pci_uld(adap))
4208 max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
4209 if (is_offload(adap))
4210 max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
4211 entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
4212 GFP_KERNEL);
4213 if (!entries)
4214 return -ENOMEM;
4215
4216
4217 if (get_msix_info(adap)) {
4218 adap->params.offload = 0;
4219 adap->params.crypto = 0;
4220 }
4221
4222 for (i = 0; i < max_ingq + 1; ++i)
4223 entries[i].entry = i;
4224
4225 want = s->max_ethqsets + EXTRA_VECS;
4226 if (is_offload(adap)) {
4227 want += adap->num_ofld_uld * s->ofldqsets;
4228 ofld_need = adap->num_ofld_uld * nchan;
4229 }
4230 if (is_pci_uld(adap)) {
4231 want += adap->num_uld * s->ofldqsets;
4232 uld_need = adap->num_uld * nchan;
4233 }
4234#ifdef CONFIG_CHELSIO_T4_DCB
4235
4236
4237
4238 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
4239#else
4240 need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
4241#endif
4242 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4243 if (allocated < 0) {
4244 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4245 " not using MSI-X\n");
4246 kfree(entries);
4247 return allocated;
4248 }
4249
4250
4251
4252
4253
4254 i = allocated - EXTRA_VECS - ofld_need - uld_need;
4255 if (i < s->max_ethqsets) {
4256 s->max_ethqsets = i;
4257 if (i < s->ethqsets)
4258 reduce_ethqs(adap, i);
4259 }
4260 if (is_uld(adap)) {
4261 if (allocated < want)
4262 s->nqs_per_uld = nchan;
4263 else
4264 s->nqs_per_uld = s->ofldqsets;
4265 }
4266
4267 for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
4268 adap->msix_info[i].vec = entries[i].vector;
4269 if (is_uld(adap)) {
4270 for (j = 0 ; i < allocated; ++i, j++) {
4271 adap->msix_info_ulds[j].vec = entries[i].vector;
4272 adap->msix_info_ulds[j].idx = i;
4273 }
4274 adap->msix_bmap_ulds.mapsize = j;
4275 }
4276 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
4277 "nic %d per uld %d\n",
4278 allocated, s->max_ethqsets, s->nqs_per_uld);
4279
4280 kfree(entries);
4281 return 0;
4282
4283}
4284
4285#undef EXTRA_VECS
4286
4287static int init_rss(struct adapter *adap)
4288{
4289 unsigned int i;
4290 int err;
4291
4292 err = t4_init_rss_mode(adap, adap->mbox);
4293 if (err)
4294 return err;
4295
4296 for_each_port(adap, i) {
4297 struct port_info *pi = adap2pinfo(adap, i);
4298
4299 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4300 if (!pi->rss)
4301 return -ENOMEM;
4302 }
4303 return 0;
4304}
4305
4306static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
4307 enum pci_bus_speed *speed,
4308 enum pcie_link_width *width)
4309{
4310 u32 lnkcap1, lnkcap2;
4311 int err1, err2;
4312
4313#define PCIE_MLW_CAP_SHIFT 4
4314
4315 *speed = PCI_SPEED_UNKNOWN;
4316 *width = PCIE_LNK_WIDTH_UNKNOWN;
4317
4318 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
4319 &lnkcap1);
4320 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
4321 &lnkcap2);
4322 if (!err2 && lnkcap2) {
4323 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
4324 *speed = PCIE_SPEED_8_0GT;
4325 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
4326 *speed = PCIE_SPEED_5_0GT;
4327 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
4328 *speed = PCIE_SPEED_2_5GT;
4329 }
4330 if (!err1) {
4331 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
4332 if (!lnkcap2) {
4333 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
4334 *speed = PCIE_SPEED_5_0GT;
4335 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
4336 *speed = PCIE_SPEED_2_5GT;
4337 }
4338 }
4339
4340 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
4341 return err1 ? err1 : err2 ? err2 : -EINVAL;
4342 return 0;
4343}
4344
4345static void cxgb4_check_pcie_caps(struct adapter *adap)
4346{
4347 enum pcie_link_width width, width_cap;
4348 enum pci_bus_speed speed, speed_cap;
4349
4350#define PCIE_SPEED_STR(speed) \
4351 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4352 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4353 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4354 "Unknown")
4355
4356 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
4357 dev_warn(adap->pdev_dev,
4358 "Unable to determine PCIe device BW capabilities\n");
4359 return;
4360 }
4361
4362 if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
4363 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
4364 dev_warn(adap->pdev_dev,
4365 "Unable to determine PCI Express bandwidth.\n");
4366 return;
4367 }
4368
4369 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
4370 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
4371 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
4372 width, width_cap);
4373 if (speed < speed_cap || width < width_cap)
4374 dev_info(adap->pdev_dev,
4375 "A slot with more lanes and/or higher speed is "
4376 "suggested for optimal performance.\n");
4377}
4378
4379
4380static void print_adapter_info(struct adapter *adapter)
4381{
4382
4383 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
4384 adapter->params.vpd.id,
4385 CHELSIO_CHIP_RELEASE(adapter->params.chip));
4386 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
4387 adapter->params.vpd.sn, adapter->params.vpd.pn);
4388
4389
4390 if (!adapter->params.fw_vers)
4391 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
4392 else
4393 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
4394 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
4395 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
4396 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
4397 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
4398
4399
4400
4401
4402 if (!adapter->params.bs_vers)
4403 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
4404 else
4405 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
4406 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
4407 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
4408 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
4409 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
4410
4411
4412 if (!adapter->params.tp_vers)
4413 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
4414 else
4415 dev_info(adapter->pdev_dev,
4416 "TP Microcode version: %u.%u.%u.%u\n",
4417 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
4418 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
4419 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
4420 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
4421
4422
4423 if (!adapter->params.er_vers)
4424 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
4425 else
4426 dev_info(adapter->pdev_dev,
4427 "Expansion ROM version: %u.%u.%u.%u\n",
4428 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
4429 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
4430 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
4431 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
4432
4433
4434 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
4435 is_offload(adapter) ? "R" : "",
4436 ((adapter->flags & USING_MSIX) ? "MSI-X" :
4437 (adapter->flags & USING_MSI) ? "MSI" : ""),
4438 is_offload(adapter) ? "Offload" : "non-Offload");
4439}
4440
4441static void print_port_info(const struct net_device *dev)
4442{
4443 char buf[80];
4444 char *bufp = buf;
4445 const char *spd = "";
4446 const struct port_info *pi = netdev_priv(dev);
4447 const struct adapter *adap = pi->adapter;
4448
4449 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4450 spd = " 2.5 GT/s";
4451 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4452 spd = " 5 GT/s";
4453 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
4454 spd = " 8 GT/s";
4455
4456 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4457 bufp += sprintf(bufp, "100M/");
4458 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4459 bufp += sprintf(bufp, "1G/");
4460 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4461 bufp += sprintf(bufp, "10G/");
4462 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
4463 bufp += sprintf(bufp, "25G/");
4464 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4465 bufp += sprintf(bufp, "40G/");
4466 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
4467 bufp += sprintf(bufp, "100G/");
4468 if (bufp != buf)
4469 --bufp;
4470 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
4471
4472 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
4473 dev->name, adap->params.vpd.id, adap->name, buf);
4474}
4475
4476static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
4477{
4478 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4479}
4480
4481
4482
4483
4484
4485
4486
4487
4488static void free_some_resources(struct adapter *adapter)
4489{
4490 unsigned int i;
4491
4492 t4_free_mem(adapter->l2t);
4493 t4_cleanup_sched(adapter);
4494 t4_free_mem(adapter->tids.tid_tab);
4495 cxgb4_cleanup_tc_u32(adapter);
4496 kfree(adapter->sge.egr_map);
4497 kfree(adapter->sge.ingr_map);
4498 kfree(adapter->sge.starving_fl);
4499 kfree(adapter->sge.txq_maperr);
4500#ifdef CONFIG_DEBUG_FS
4501 kfree(adapter->sge.blocked_fl);
4502#endif
4503 disable_msi(adapter);
4504
4505 for_each_port(adapter, i)
4506 if (adapter->port[i]) {
4507 struct port_info *pi = adap2pinfo(adapter, i);
4508
4509 if (pi->viid != 0)
4510 t4_free_vi(adapter, adapter->mbox, adapter->pf,
4511 0, pi->viid);
4512 kfree(adap2pinfo(adapter, i)->rss);
4513 free_netdev(adapter->port[i]);
4514 }
4515 if (adapter->flags & FW_OK)
4516 t4_fw_bye(adapter, adapter->pf);
4517}
4518
4519#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4520#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4521 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4522#define SEGMENT_SIZE 128
4523
4524static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
4525{
4526 u16 device_id;
4527
4528
4529 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
4530
4531 switch (device_id >> 12) {
4532 case CHELSIO_T4:
4533 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
4534 case CHELSIO_T5:
4535 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4536 case CHELSIO_T6:
4537 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4538 default:
4539 dev_err(&pdev->dev, "Device %d is not supported\n",
4540 device_id);
4541 }
4542 return -EINVAL;
4543}
4544
4545#ifdef CONFIG_PCI_IOV
4546static void dummy_setup(struct net_device *dev)
4547{
4548 dev->type = ARPHRD_NONE;
4549 dev->mtu = 0;
4550 dev->hard_header_len = 0;
4551 dev->addr_len = 0;
4552 dev->tx_queue_len = 0;
4553 dev->flags |= IFF_NOARP;
4554 dev->priv_flags |= IFF_NO_QUEUE;
4555
4556
4557 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
4558 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
4559 dev->destructor = free_netdev;
4560}
4561
4562static int config_mgmt_dev(struct pci_dev *pdev)
4563{
4564 struct adapter *adap = pci_get_drvdata(pdev);
4565 struct net_device *netdev;
4566 struct port_info *pi;
4567 char name[IFNAMSIZ];
4568 int err;
4569
4570 snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
4571 netdev = alloc_netdev(sizeof(struct port_info), name, dummy_setup);
4572 if (!netdev)
4573 return -ENOMEM;
4574
4575 pi = netdev_priv(netdev);
4576 pi->adapter = adap;
4577 pi->port_id = adap->pf % adap->params.nports;
4578 SET_NETDEV_DEV(netdev, &pdev->dev);
4579
4580 adap->port[0] = netdev;
4581
4582 err = register_netdev(adap->port[0]);
4583 if (err) {
4584 pr_info("Unable to register VF mgmt netdev %s\n", name);
4585 free_netdev(adap->port[0]);
4586 adap->port[0] = NULL;
4587 return err;
4588 }
4589 return 0;
4590}
4591
4592static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
4593{
4594 struct adapter *adap = pci_get_drvdata(pdev);
4595 int err = 0;
4596 int current_vfs = pci_num_vf(pdev);
4597 u32 pcie_fw;
4598
4599 pcie_fw = readl(adap->regs + PCIE_FW_A);
4600
4601 if (!(pcie_fw & PCIE_FW_INIT_F) ||
4602 !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
4603 PCIE_FW_MASTER_G(pcie_fw) != 4) {
4604 dev_warn(&pdev->dev,
4605 "cxgb4 driver needs to be MASTER to support SRIOV\n");
4606 return -EOPNOTSUPP;
4607 }
4608
4609
4610
4611
4612 if (current_vfs && pci_vfs_assigned(pdev)) {
4613 dev_err(&pdev->dev,
4614 "Cannot modify SR-IOV while VFs are assigned\n");
4615 num_vfs = current_vfs;
4616 return num_vfs;
4617 }
4618
4619
4620
4621
4622
4623
4624 if (!num_vfs) {
4625 pci_disable_sriov(pdev);
4626 if (adap->port[0]) {
4627 unregister_netdev(adap->port[0]);
4628 adap->port[0] = NULL;
4629 }
4630
4631 kfree(adap->vfinfo);
4632 adap->vfinfo = NULL;
4633 adap->num_vfs = 0;
4634 return num_vfs;
4635 }
4636
4637 if (num_vfs != current_vfs) {
4638 err = pci_enable_sriov(pdev, num_vfs);
4639 if (err)
4640 return err;
4641
4642 adap->num_vfs = num_vfs;
4643 err = config_mgmt_dev(pdev);
4644 if (err)
4645 return err;
4646 }
4647
4648 adap->vfinfo = kcalloc(adap->num_vfs,
4649 sizeof(struct vf_info), GFP_KERNEL);
4650 if (adap->vfinfo)
4651 fill_vf_station_mac_addr(adap);
4652 return num_vfs;
4653}
4654#endif
4655
4656static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4657{
4658 int func, i, err, s_qpp, qpp, num_seg;
4659 struct port_info *pi;
4660 bool highdma = false;
4661 struct adapter *adapter = NULL;
4662 struct net_device *netdev;
4663 void __iomem *regs;
4664 u32 whoami, pl_rev;
4665 enum chip_type chip;
4666 static int adap_idx = 1;
4667 u32 v, port_vec;
4668
4669 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4670
4671 err = pci_request_regions(pdev, KBUILD_MODNAME);
4672 if (err) {
4673
4674 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4675 return err;
4676 }
4677
4678 err = pci_enable_device(pdev);
4679 if (err) {
4680 dev_err(&pdev->dev, "cannot enable PCI device\n");
4681 goto out_release_regions;
4682 }
4683
4684 regs = pci_ioremap_bar(pdev, 0);
4685 if (!regs) {
4686 dev_err(&pdev->dev, "cannot map device registers\n");
4687 err = -ENOMEM;
4688 goto out_disable_device;
4689 }
4690
4691 err = t4_wait_dev_ready(regs);
4692 if (err < 0)
4693 goto out_unmap_bar0;
4694
4695
4696 whoami = readl(regs + PL_WHOAMI_A);
4697 pl_rev = REV_G(readl(regs + PL_REV_A));
4698 chip = get_chip_type(pdev, pl_rev);
4699 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
4700 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4701 if (func != ent->driver_data) {
4702#ifndef CONFIG_PCI_IOV
4703 iounmap(regs);
4704#endif
4705 pci_disable_device(pdev);
4706 pci_save_state(pdev);
4707 goto sriov;
4708 }
4709
4710 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4711 highdma = true;
4712 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4713 if (err) {
4714 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4715 "coherent allocations\n");
4716 goto out_unmap_bar0;
4717 }
4718 } else {
4719 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4720 if (err) {
4721 dev_err(&pdev->dev, "no usable DMA configuration\n");
4722 goto out_unmap_bar0;
4723 }
4724 }
4725
4726 pci_enable_pcie_error_reporting(pdev);
4727 enable_pcie_relaxed_ordering(pdev);
4728 pci_set_master(pdev);
4729 pci_save_state(pdev);
4730
4731 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4732 if (!adapter) {
4733 err = -ENOMEM;
4734 goto out_unmap_bar0;
4735 }
4736 adap_idx++;
4737
4738 adapter->workq = create_singlethread_workqueue("cxgb4");
4739 if (!adapter->workq) {
4740 err = -ENOMEM;
4741 goto out_free_adapter;
4742 }
4743
4744 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
4745 (sizeof(struct mbox_cmd) *
4746 T4_OS_LOG_MBOX_CMDS),
4747 GFP_KERNEL);
4748 if (!adapter->mbox_log) {
4749 err = -ENOMEM;
4750 goto out_free_adapter;
4751 }
4752 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
4753
4754
4755 adapter->flags |= DEV_ENABLED;
4756
4757 adapter->regs = regs;
4758 adapter->pdev = pdev;
4759 adapter->pdev_dev = &pdev->dev;
4760 adapter->name = pci_name(pdev);
4761 adapter->mbox = func;
4762 adapter->pf = func;
4763 adapter->msg_enable = DFLT_MSG_ENABLE;
4764 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4765
4766 spin_lock_init(&adapter->stats_lock);
4767 spin_lock_init(&adapter->tid_release_lock);
4768 spin_lock_init(&adapter->mbox_lock);
4769
4770 INIT_LIST_HEAD(&adapter->mlist.list);
4771
4772
4773 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
4774 INIT_WORK(&adapter->db_full_task, process_db_full);
4775 INIT_WORK(&adapter->db_drop_task, process_db_drop);
4776
4777 err = t4_prep_adapter(adapter);
4778 if (err)
4779 goto out_free_adapter;
4780
4781
4782 if (!is_t4(adapter->params.chip)) {
4783 s_qpp = (QUEUESPERPAGEPF0_S +
4784 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
4785 adapter->pf);
4786 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
4787 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
4788 num_seg = PAGE_SIZE / SEGMENT_SIZE;
4789
4790
4791
4792
4793
4794
4795 if (qpp > num_seg) {
4796 dev_err(&pdev->dev,
4797 "Incorrect number of egress queues per page\n");
4798 err = -EINVAL;
4799 goto out_free_adapter;
4800 }
4801 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
4802 pci_resource_len(pdev, 2));
4803 if (!adapter->bar2) {
4804 dev_err(&pdev->dev, "cannot map device bar2 region\n");
4805 err = -ENOMEM;
4806 goto out_free_adapter;
4807 }
4808 }
4809
4810 setup_memwin(adapter);
4811 err = adap_init0(adapter);
4812#ifdef CONFIG_DEBUG_FS
4813 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
4814#endif
4815 setup_memwin_rdma(adapter);
4816 if (err)
4817 goto out_unmap_bar;
4818
4819
4820 if (!is_t4(adapter->params.chip))
4821 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
4822 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
4823 T6_STATMODE_V(0)));
4824
4825 for_each_port(adapter, i) {
4826 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4827 MAX_ETH_QSETS);
4828 if (!netdev) {
4829 err = -ENOMEM;
4830 goto out_free_dev;
4831 }
4832
4833 SET_NETDEV_DEV(netdev, &pdev->dev);
4834
4835 adapter->port[i] = netdev;
4836 pi = netdev_priv(netdev);
4837 pi->adapter = adapter;
4838 pi->xact_addr_filt = -1;
4839 pi->port_id = i;
4840 netdev->irq = pdev->irq;
4841
4842 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4843 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4844 NETIF_F_RXCSUM | NETIF_F_RXHASH |
4845 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
4846 NETIF_F_HW_TC;
4847 if (highdma)
4848 netdev->hw_features |= NETIF_F_HIGHDMA;
4849 netdev->features |= netdev->hw_features;
4850 netdev->vlan_features = netdev->features & VLAN_FEAT;
4851
4852 netdev->priv_flags |= IFF_UNICAST_FLT;
4853
4854 netdev->netdev_ops = &cxgb4_netdev_ops;
4855#ifdef CONFIG_CHELSIO_T4_DCB
4856 netdev->dcbnl_ops = &cxgb4_dcb_ops;
4857 cxgb4_dcb_state_init(netdev);
4858#endif
4859 cxgb4_set_ethtool_ops(netdev);
4860 }
4861
4862 pci_set_drvdata(pdev, adapter);
4863
4864 if (adapter->flags & FW_OK) {
4865 err = t4_port_init(adapter, func, func, 0);
4866 if (err)
4867 goto out_free_dev;
4868 } else if (adapter->params.nports == 1) {
4869
4870
4871
4872
4873
4874 u8 hw_addr[ETH_ALEN];
4875 u8 *na = adapter->params.vpd.na;
4876
4877 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
4878 if (!err) {
4879 for (i = 0; i < ETH_ALEN; i++)
4880 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
4881 hex2val(na[2 * i + 1]));
4882 t4_set_hw_addr(adapter, 0, hw_addr);
4883 }
4884 }
4885
4886
4887
4888
4889 cfg_queues(adapter);
4890
4891 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
4892 if (!adapter->l2t) {
4893
4894 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4895 adapter->params.offload = 0;
4896 }
4897
4898#if IS_ENABLED(CONFIG_IPV6)
4899 if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
4900 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
4901
4902
4903
4904 dev_warn(&pdev->dev,
4905 "CLIP not enabled in hardware, continuing\n");
4906 adapter->params.offload = 0;
4907 } else {
4908 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
4909 adapter->clipt_end);
4910 if (!adapter->clipt) {
4911
4912
4913
4914 dev_warn(&pdev->dev,
4915 "could not allocate Clip table, continuing\n");
4916 adapter->params.offload = 0;
4917 }
4918 }
4919#endif
4920
4921 for_each_port(adapter, i) {
4922 pi = adap2pinfo(adapter, i);
4923 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
4924 if (!pi->sched_tbl)
4925 dev_warn(&pdev->dev,
4926 "could not activate scheduling on port %d\n",
4927 i);
4928 }
4929
4930 if (tid_init(&adapter->tids) < 0) {
4931 dev_warn(&pdev->dev, "could not allocate TID table, "
4932 "continuing\n");
4933 adapter->params.offload = 0;
4934 } else {
4935 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
4936 if (!adapter->tc_u32)
4937 dev_warn(&pdev->dev,
4938 "could not offload tc u32, continuing\n");
4939 }
4940
4941 if (is_offload(adapter)) {
4942 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
4943 u32 hash_base, hash_reg;
4944
4945 if (chip <= CHELSIO_T5) {
4946 hash_reg = LE_DB_TID_HASHBASE_A;
4947 hash_base = t4_read_reg(adapter, hash_reg);
4948 adapter->tids.hash_base = hash_base / 4;
4949 } else {
4950 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
4951 hash_base = t4_read_reg(adapter, hash_reg);
4952 adapter->tids.hash_base = hash_base;
4953 }
4954 }
4955 }
4956
4957
4958 if (msi > 1 && enable_msix(adapter) == 0)
4959 adapter->flags |= USING_MSIX;
4960 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
4961 adapter->flags |= USING_MSI;
4962 if (msi > 1)
4963 free_msix_info(adapter);
4964 }
4965
4966
4967 cxgb4_check_pcie_caps(adapter);
4968
4969 err = init_rss(adapter);
4970 if (err)
4971 goto out_free_dev;
4972
4973
4974
4975
4976
4977
4978
4979 for_each_port(adapter, i) {
4980 pi = adap2pinfo(adapter, i);
4981 adapter->port[i]->dev_port = pi->lport;
4982 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
4983 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
4984
4985 err = register_netdev(adapter->port[i]);
4986 if (err)
4987 break;
4988 adapter->chan_map[pi->tx_chan] = i;
4989 print_port_info(adapter->port[i]);
4990 }
4991 if (i == 0) {
4992 dev_err(&pdev->dev, "could not register any net devices\n");
4993 goto out_free_dev;
4994 }
4995 if (err) {
4996 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
4997 err = 0;
4998 }
4999
5000 if (cxgb4_debugfs_root) {
5001 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5002 cxgb4_debugfs_root);
5003 setup_debugfs(adapter);
5004 }
5005
5006
5007 pdev->needs_freset = 1;
5008
5009 if (is_uld(adapter)) {
5010 mutex_lock(&uld_mutex);
5011 list_add_tail(&adapter->list_node, &adapter_list);
5012 mutex_unlock(&uld_mutex);
5013 }
5014
5015 print_adapter_info(adapter);
5016 setup_fw_sge_queues(adapter);
5017 return 0;
5018
5019sriov:
5020#ifdef CONFIG_PCI_IOV
5021 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5022 if (!adapter) {
5023 err = -ENOMEM;
5024 goto free_pci_region;
5025 }
5026
5027 adapter->pdev = pdev;
5028 adapter->pdev_dev = &pdev->dev;
5029 adapter->name = pci_name(pdev);
5030 adapter->mbox = func;
5031 adapter->pf = func;
5032 adapter->regs = regs;
5033 adapter->adap_idx = adap_idx;
5034 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5035 (sizeof(struct mbox_cmd) *
5036 T4_OS_LOG_MBOX_CMDS),
5037 GFP_KERNEL);
5038 if (!adapter->mbox_log) {
5039 err = -ENOMEM;
5040 goto free_adapter;
5041 }
5042 spin_lock_init(&adapter->mbox_lock);
5043 INIT_LIST_HEAD(&adapter->mlist.list);
5044
5045 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5046 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
5047 err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
5048 &v, &port_vec);
5049 if (err < 0) {
5050 dev_err(adapter->pdev_dev, "Could not fetch port params\n");
5051 goto free_adapter;
5052 }
5053
5054 adapter->params.nports = hweight32(port_vec);
5055 pci_set_drvdata(pdev, adapter);
5056 return 0;
5057
5058 free_adapter:
5059 kfree(adapter);
5060 free_pci_region:
5061 iounmap(regs);
5062 pci_disable_sriov(pdev);
5063 pci_release_regions(pdev);
5064 return err;
5065#else
5066 return 0;
5067#endif
5068
5069 out_free_dev:
5070 free_some_resources(adapter);
5071 if (adapter->flags & USING_MSIX)
5072 free_msix_info(adapter);
5073 if (adapter->num_uld || adapter->num_ofld_uld)
5074 t4_uld_mem_free(adapter);
5075 out_unmap_bar:
5076 if (!is_t4(adapter->params.chip))
5077 iounmap(adapter->bar2);
5078 out_free_adapter:
5079 if (adapter->workq)
5080 destroy_workqueue(adapter->workq);
5081
5082 kfree(adapter->mbox_log);
5083 kfree(adapter);
5084 out_unmap_bar0:
5085 iounmap(regs);
5086 out_disable_device:
5087 pci_disable_pcie_error_reporting(pdev);
5088 pci_disable_device(pdev);
5089 out_release_regions:
5090 pci_release_regions(pdev);
5091 return err;
5092}
5093
5094static void remove_one(struct pci_dev *pdev)
5095{
5096 struct adapter *adapter = pci_get_drvdata(pdev);
5097
5098 if (!adapter) {
5099 pci_release_regions(pdev);
5100 return;
5101 }
5102
5103 if (adapter->pf == 4) {
5104 int i;
5105
5106
5107
5108
5109 destroy_workqueue(adapter->workq);
5110
5111 if (is_uld(adapter))
5112 detach_ulds(adapter);
5113
5114 disable_interrupts(adapter);
5115
5116 for_each_port(adapter, i)
5117 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5118 unregister_netdev(adapter->port[i]);
5119
5120 debugfs_remove_recursive(adapter->debugfs_root);
5121
5122
5123
5124
5125 clear_all_filters(adapter);
5126
5127 if (adapter->flags & FULL_INIT_DONE)
5128 cxgb_down(adapter);
5129
5130 if (adapter->flags & USING_MSIX)
5131 free_msix_info(adapter);
5132 if (adapter->num_uld || adapter->num_ofld_uld)
5133 t4_uld_mem_free(adapter);
5134 free_some_resources(adapter);
5135#if IS_ENABLED(CONFIG_IPV6)
5136 t4_cleanup_clip_tbl(adapter);
5137#endif
5138 iounmap(adapter->regs);
5139 if (!is_t4(adapter->params.chip))
5140 iounmap(adapter->bar2);
5141 pci_disable_pcie_error_reporting(pdev);
5142 if ((adapter->flags & DEV_ENABLED)) {
5143 pci_disable_device(pdev);
5144 adapter->flags &= ~DEV_ENABLED;
5145 }
5146 pci_release_regions(pdev);
5147 kfree(adapter->mbox_log);
5148 synchronize_rcu();
5149 kfree(adapter);
5150 }
5151#ifdef CONFIG_PCI_IOV
5152 else {
5153 if (adapter->port[0])
5154 unregister_netdev(adapter->port[0]);
5155 iounmap(adapter->regs);
5156 kfree(adapter->vfinfo);
5157 kfree(adapter);
5158 pci_disable_sriov(pdev);
5159 pci_release_regions(pdev);
5160 }
5161#endif
5162}
5163
5164
5165
5166
5167
5168
5169static void shutdown_one(struct pci_dev *pdev)
5170{
5171 struct adapter *adapter = pci_get_drvdata(pdev);
5172
5173
5174
5175
5176
5177 if (!adapter) {
5178 pci_release_regions(pdev);
5179 return;
5180 }
5181
5182 if (adapter->pf == 4) {
5183 int i;
5184
5185 for_each_port(adapter, i)
5186 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5187 cxgb_close(adapter->port[i]);
5188
5189 t4_uld_clean_up(adapter);
5190 disable_interrupts(adapter);
5191 disable_msi(adapter);
5192
5193 t4_sge_stop(adapter);
5194 if (adapter->flags & FW_OK)
5195 t4_fw_bye(adapter, adapter->mbox);
5196 }
5197#ifdef CONFIG_PCI_IOV
5198 else {
5199 if (adapter->port[0])
5200 unregister_netdev(adapter->port[0]);
5201 iounmap(adapter->regs);
5202 kfree(adapter->vfinfo);
5203 kfree(adapter);
5204 pci_disable_sriov(pdev);
5205 pci_release_regions(pdev);
5206 }
5207#endif
5208}
5209
5210static struct pci_driver cxgb4_driver = {
5211 .name = KBUILD_MODNAME,
5212 .id_table = cxgb4_pci_tbl,
5213 .probe = init_one,
5214 .remove = remove_one,
5215 .shutdown = shutdown_one,
5216#ifdef CONFIG_PCI_IOV
5217 .sriov_configure = cxgb4_iov_configure,
5218#endif
5219 .err_handler = &cxgb4_eeh,
5220};
5221
5222static int __init cxgb4_init_module(void)
5223{
5224 int ret;
5225
5226
5227 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5228 if (!cxgb4_debugfs_root)
5229 pr_warn("could not create debugfs entry, continuing\n");
5230
5231 ret = pci_register_driver(&cxgb4_driver);
5232 if (ret < 0)
5233 debugfs_remove(cxgb4_debugfs_root);
5234
5235#if IS_ENABLED(CONFIG_IPV6)
5236 if (!inet6addr_registered) {
5237 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5238 inet6addr_registered = true;
5239 }
5240#endif
5241
5242 return ret;
5243}
5244
5245static void __exit cxgb4_cleanup_module(void)
5246{
5247#if IS_ENABLED(CONFIG_IPV6)
5248 if (inet6addr_registered) {
5249 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5250 inet6addr_registered = false;
5251 }
5252#endif
5253 pci_unregister_driver(&cxgb4_driver);
5254 debugfs_remove(cxgb4_debugfs_root);
5255}
5256
5257module_init(cxgb4_init_module);
5258module_exit(cxgb4_cleanup_module);
5259