1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
44#include <linux/if.h>
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <net/addrconf.h>
64#include <net/bonding.h>
65#include <linux/uaccess.h>
66#include <linux/crash_dump.h>
67#include <net/udp_tunnel.h>
68#include <net/xfrm.h>
69#if defined(CONFIG_CHELSIO_TLS_DEVICE)
70#include <net/tls.h>
71#endif
72
73#include "cxgb4.h"
74#include "cxgb4_filter.h"
75#include "t4_regs.h"
76#include "t4_values.h"
77#include "t4_msg.h"
78#include "t4fw_api.h"
79#include "t4fw_version.h"
80#include "cxgb4_dcb.h"
81#include "srq.h"
82#include "cxgb4_debugfs.h"
83#include "clip_tbl.h"
84#include "l2t.h"
85#include "smt.h"
86#include "sched.h"
87#include "cxgb4_tc_u32.h"
88#include "cxgb4_tc_flower.h"
89#include "cxgb4_tc_mqprio.h"
90#include "cxgb4_tc_matchall.h"
91#include "cxgb4_ptp.h"
92#include "cxgb4_cudbg.h"
93
94char cxgb4_driver_name[] = KBUILD_MODNAME;
95
96#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
97
98#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
99 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
100 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
101
102
103
104#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
105 static const struct pci_device_id cxgb4_pci_tbl[] = {
106#define CXGB4_UNIFIED_PF 0x4
107
108#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
109
110
111
112
113#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
114
115#define CH_PCI_ID_TABLE_ENTRY(devid) \
116 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
117
118#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
119 { 0, } \
120 }
121
122#include "t4_pci_id_tbl.h"
123
124#define FW4_FNAME "cxgb4/t4fw.bin"
125#define FW5_FNAME "cxgb4/t5fw.bin"
126#define FW6_FNAME "cxgb4/t6fw.bin"
127#define FW4_CFNAME "cxgb4/t4-config.txt"
128#define FW5_CFNAME "cxgb4/t5-config.txt"
129#define FW6_CFNAME "cxgb4/t6-config.txt"
130#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
131#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
132#define PHY_AQ1202_DEVICEID 0x4409
133#define PHY_BCM84834_DEVICEID 0x4486
134
135MODULE_DESCRIPTION(DRV_DESC);
136MODULE_AUTHOR("Chelsio Communications");
137MODULE_LICENSE("Dual BSD/GPL");
138MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
139MODULE_FIRMWARE(FW4_FNAME);
140MODULE_FIRMWARE(FW5_FNAME);
141MODULE_FIRMWARE(FW6_FNAME);
142
143
144
145
146
147
148
149
150
151
152static int msi = 2;
153
154module_param(msi, int, 0644);
155MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
156
157
158
159
160
161
162
163
164
165
166
167
168
169static int rx_dma_offset = 2;
170
171
172
173
174
175
176
177static int select_queue;
178module_param(select_queue, int, 0644);
179MODULE_PARM_DESC(select_queue,
180 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
181
182static struct dentry *cxgb4_debugfs_root;
183
184LIST_HEAD(adapter_list);
185DEFINE_MUTEX(uld_mutex);
186LIST_HEAD(uld_list);
187
188static int cfg_queues(struct adapter *adap);
189
190static void link_report(struct net_device *dev)
191{
192 if (!netif_carrier_ok(dev))
193 netdev_info(dev, "link down\n");
194 else {
195 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
196
197 const char *s;
198 const struct port_info *p = netdev_priv(dev);
199
200 switch (p->link_cfg.speed) {
201 case 100:
202 s = "100Mbps";
203 break;
204 case 1000:
205 s = "1Gbps";
206 break;
207 case 10000:
208 s = "10Gbps";
209 break;
210 case 25000:
211 s = "25Gbps";
212 break;
213 case 40000:
214 s = "40Gbps";
215 break;
216 case 50000:
217 s = "50Gbps";
218 break;
219 case 100000:
220 s = "100Gbps";
221 break;
222 default:
223 pr_info("%s: unsupported speed: %d\n",
224 dev->name, p->link_cfg.speed);
225 return;
226 }
227
228 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
229 fc[p->link_cfg.fc]);
230 }
231}
232
233#ifdef CONFIG_CHELSIO_T4_DCB
234
235static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
236{
237 struct port_info *pi = netdev_priv(dev);
238 struct adapter *adap = pi->adapter;
239 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
240 int i;
241
242
243
244
245 for (i = 0; i < pi->nqsets; i++, txq++) {
246 u32 name, value;
247 int err;
248
249 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
250 FW_PARAMS_PARAM_X_V(
251 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
252 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
253 value = enable ? i : 0xffffffff;
254
255
256
257
258
259 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
260 &name, &value,
261 -FW_CMD_MAX_TIMEOUT);
262
263 if (err)
264 dev_err(adap->pdev_dev,
265 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
266 enable ? "set" : "unset", pi->port_id, i, -err);
267 else
268 txq->dcb_prio = enable ? value : 0;
269 }
270}
271
272int cxgb4_dcb_enabled(const struct net_device *dev)
273{
274 struct port_info *pi = netdev_priv(dev);
275
276 if (!pi->dcb.enabled)
277 return 0;
278
279 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
280 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
281}
282#endif
283
284void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
285{
286 struct net_device *dev = adapter->port[port_id];
287
288
289 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
290 if (link_stat)
291 netif_carrier_on(dev);
292 else {
293#ifdef CONFIG_CHELSIO_T4_DCB
294 if (cxgb4_dcb_enabled(dev)) {
295 cxgb4_dcb_reset(dev);
296 dcb_tx_queue_prio_enable(dev, false);
297 }
298#endif
299 netif_carrier_off(dev);
300 }
301
302 link_report(dev);
303 }
304}
305
306void t4_os_portmod_changed(struct adapter *adap, int port_id)
307{
308 static const char *mod_str[] = {
309 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
310 };
311
312 struct net_device *dev = adap->port[port_id];
313 struct port_info *pi = netdev_priv(dev);
314
315 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
316 netdev_info(dev, "port module unplugged\n");
317 else if (pi->mod_type < ARRAY_SIZE(mod_str))
318 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
319 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
320 netdev_info(dev, "%s: unsupported port module inserted\n",
321 dev->name);
322 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
323 netdev_info(dev, "%s: unknown port module inserted\n",
324 dev->name);
325 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
326 netdev_info(dev, "%s: transceiver module error\n", dev->name);
327 else
328 netdev_info(dev, "%s: unknown module type %d inserted\n",
329 dev->name, pi->mod_type);
330
331
332
333
334 pi->link_cfg.redo_l1cfg = netif_running(dev);
335}
336
337int dbfifo_int_thresh = 10;
338module_param(dbfifo_int_thresh, int, 0644);
339MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
340
341
342
343
344static int dbfifo_drain_delay = 1000;
345module_param(dbfifo_drain_delay, int, 0644);
346MODULE_PARM_DESC(dbfifo_drain_delay,
347 "usecs to sleep while draining the dbfifo");
348
349static inline int cxgb4_set_addr_hash(struct port_info *pi)
350{
351 struct adapter *adap = pi->adapter;
352 u64 vec = 0;
353 bool ucast = false;
354 struct hash_mac_addr *entry;
355
356
357 list_for_each_entry(entry, &adap->mac_hlist, list) {
358 ucast |= is_unicast_ether_addr(entry->addr);
359 vec |= (1ULL << hash_mac_addr(entry->addr));
360 }
361 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
362 vec, false);
363}
364
365static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
366{
367 struct port_info *pi = netdev_priv(netdev);
368 struct adapter *adap = pi->adapter;
369 int ret;
370 u64 mhash = 0;
371 u64 uhash = 0;
372
373
374
375
376
377 u16 idx[1] = {};
378 bool free = false;
379 bool ucast = is_unicast_ether_addr(mac_addr);
380 const u8 *maclist[1] = {mac_addr};
381 struct hash_mac_addr *new_entry;
382
383 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
384 idx, ucast ? &uhash : &mhash, false);
385 if (ret < 0)
386 goto out;
387
388
389
390
391 if (uhash || mhash) {
392 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
393 if (!new_entry)
394 return -ENOMEM;
395 ether_addr_copy(new_entry->addr, mac_addr);
396 list_add_tail(&new_entry->list, &adap->mac_hlist);
397 ret = cxgb4_set_addr_hash(pi);
398 }
399out:
400 return ret < 0 ? ret : 0;
401}
402
403static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
404{
405 struct port_info *pi = netdev_priv(netdev);
406 struct adapter *adap = pi->adapter;
407 int ret;
408 const u8 *maclist[1] = {mac_addr};
409 struct hash_mac_addr *entry, *tmp;
410
411
412
413
414 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
415 if (ether_addr_equal(entry->addr, mac_addr)) {
416 list_del(&entry->list);
417 kfree(entry);
418 return cxgb4_set_addr_hash(pi);
419 }
420 }
421
422 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
423 return ret < 0 ? -EINVAL : 0;
424}
425
426
427
428
429
430static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
431{
432 struct port_info *pi = netdev_priv(dev);
433 struct adapter *adapter = pi->adapter;
434
435 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
436 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
437
438 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
439 mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
440 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
441 sleep_ok);
442}
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
462 int *tcam_idx, const u8 *addr, bool persist,
463 u8 *smt_idx)
464{
465 struct adapter *adapter = pi->adapter;
466 struct hash_mac_addr *entry, *new_entry;
467 int ret;
468
469 ret = t4_change_mac(adapter, adapter->mbox, viid,
470 *tcam_idx, addr, persist, smt_idx);
471
472 if (ret == -ENOMEM) {
473
474
475
476 list_for_each_entry(entry, &adapter->mac_hlist, list) {
477 if (entry->iface_mac) {
478 ether_addr_copy(entry->addr, addr);
479 goto set_hash;
480 }
481 }
482 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
483 if (!new_entry)
484 return -ENOMEM;
485 ether_addr_copy(new_entry->addr, addr);
486 new_entry->iface_mac = true;
487 list_add_tail(&new_entry->list, &adapter->mac_hlist);
488set_hash:
489 ret = cxgb4_set_addr_hash(pi);
490 } else if (ret >= 0) {
491 *tcam_idx = ret;
492 ret = 0;
493 }
494
495 return ret;
496}
497
498
499
500
501
502
503
504static int link_start(struct net_device *dev)
505{
506 struct port_info *pi = netdev_priv(dev);
507 unsigned int mb = pi->adapter->mbox;
508 int ret;
509
510
511
512
513
514 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
515 dev->mtu, -1, -1, -1,
516 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
517 if (ret == 0)
518 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
519 dev->dev_addr, true, &pi->smt_idx);
520 if (ret == 0)
521 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
522 &pi->link_cfg);
523 if (ret == 0) {
524 local_bh_disable();
525 ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
526 true, CXGB4_DCB_ENABLED);
527 local_bh_enable();
528 }
529
530 return ret;
531}
532
533#ifdef CONFIG_CHELSIO_T4_DCB
534
535static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
536{
537 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
538 struct net_device *dev = adap->port[adap->chan_map[port]];
539 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
540 int new_dcb_enabled;
541
542 cxgb4_dcb_handle_fw_update(adap, pcmd);
543 new_dcb_enabled = cxgb4_dcb_enabled(dev);
544
545
546
547
548
549 if (new_dcb_enabled != old_dcb_enabled)
550 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
551}
552#endif
553
554
555
556static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
557 const struct pkt_gl *gl)
558{
559 u8 opcode = ((const struct rss_header *)rsp)->opcode;
560
561 rsp++;
562
563
564
565 if (unlikely(opcode == CPL_FW4_MSG &&
566 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
567 rsp++;
568 opcode = ((const struct rss_header *)rsp)->opcode;
569 rsp++;
570 if (opcode != CPL_SGE_EGR_UPDATE) {
571 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
572 , opcode);
573 goto out;
574 }
575 }
576
577 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
578 const struct cpl_sge_egr_update *p = (void *)rsp;
579 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
580 struct sge_txq *txq;
581
582 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
583 txq->restarts++;
584 if (txq->q_type == CXGB4_TXQ_ETH) {
585 struct sge_eth_txq *eq;
586
587 eq = container_of(txq, struct sge_eth_txq, q);
588 t4_sge_eth_txq_egress_update(q->adap, eq, -1);
589 } else {
590 struct sge_uld_txq *oq;
591
592 oq = container_of(txq, struct sge_uld_txq, q);
593 tasklet_schedule(&oq->qresume_tsk);
594 }
595 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
596 const struct cpl_fw6_msg *p = (void *)rsp;
597
598#ifdef CONFIG_CHELSIO_T4_DCB
599 const struct fw_port_cmd *pcmd = (const void *)p->data;
600 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
601 unsigned int action =
602 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
603
604 if (cmd == FW_PORT_CMD &&
605 (action == FW_PORT_ACTION_GET_PORT_INFO ||
606 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
607 int port = FW_PORT_CMD_PORTID_G(
608 be32_to_cpu(pcmd->op_to_portid));
609 struct net_device *dev;
610 int dcbxdis, state_input;
611
612 dev = q->adap->port[q->adap->chan_map[port]];
613 dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
614 ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
615 : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
616 & FW_PORT_CMD_DCBXDIS32_F));
617 state_input = (dcbxdis
618 ? CXGB4_DCB_INPUT_FW_DISABLED
619 : CXGB4_DCB_INPUT_FW_ENABLED);
620
621 cxgb4_dcb_state_fsm(dev, state_input);
622 }
623
624 if (cmd == FW_PORT_CMD &&
625 action == FW_PORT_ACTION_L2_DCB_CFG)
626 dcb_rpl(q->adap, pcmd);
627 else
628#endif
629 if (p->type == 0)
630 t4_handle_fw_rpl(q->adap, p->data);
631 } else if (opcode == CPL_L2T_WRITE_RPL) {
632 const struct cpl_l2t_write_rpl *p = (void *)rsp;
633
634 do_l2t_write_rpl(q->adap, p);
635 } else if (opcode == CPL_SMT_WRITE_RPL) {
636 const struct cpl_smt_write_rpl *p = (void *)rsp;
637
638 do_smt_write_rpl(q->adap, p);
639 } else if (opcode == CPL_SET_TCB_RPL) {
640 const struct cpl_set_tcb_rpl *p = (void *)rsp;
641
642 filter_rpl(q->adap, p);
643 } else if (opcode == CPL_ACT_OPEN_RPL) {
644 const struct cpl_act_open_rpl *p = (void *)rsp;
645
646 hash_filter_rpl(q->adap, p);
647 } else if (opcode == CPL_ABORT_RPL_RSS) {
648 const struct cpl_abort_rpl_rss *p = (void *)rsp;
649
650 hash_del_filter_rpl(q->adap, p);
651 } else if (opcode == CPL_SRQ_TABLE_RPL) {
652 const struct cpl_srq_table_rpl *p = (void *)rsp;
653
654 do_srq_table_rpl(q->adap, p);
655 } else
656 dev_err(q->adap->pdev_dev,
657 "unexpected CPL %#x on FW event queue\n", opcode);
658out:
659 return 0;
660}
661
662static void disable_msi(struct adapter *adapter)
663{
664 if (adapter->flags & CXGB4_USING_MSIX) {
665 pci_disable_msix(adapter->pdev);
666 adapter->flags &= ~CXGB4_USING_MSIX;
667 } else if (adapter->flags & CXGB4_USING_MSI) {
668 pci_disable_msi(adapter->pdev);
669 adapter->flags &= ~CXGB4_USING_MSI;
670 }
671}
672
673
674
675
676static irqreturn_t t4_nondata_intr(int irq, void *cookie)
677{
678 struct adapter *adap = cookie;
679 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
680
681 if (v & PFSW_F) {
682 adap->swintr = 1;
683 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
684 }
685 if (adap->flags & CXGB4_MASTER_PF)
686 t4_slow_intr_handler(adap);
687 return IRQ_HANDLED;
688}
689
690int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
691 cpumask_var_t *aff_mask, int idx)
692{
693 int rv;
694
695 if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
696 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
697 return -ENOMEM;
698 }
699
700 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
701 *aff_mask);
702
703 rv = irq_set_affinity_hint(vec, *aff_mask);
704 if (rv)
705 dev_warn(adap->pdev_dev,
706 "irq_set_affinity_hint %u failed %d\n",
707 vec, rv);
708
709 return 0;
710}
711
712void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
713{
714 irq_set_affinity_hint(vec, NULL);
715 free_cpumask_var(aff_mask);
716}
717
718static int request_msix_queue_irqs(struct adapter *adap)
719{
720 struct sge *s = &adap->sge;
721 struct msix_info *minfo;
722 int err, ethqidx;
723
724 if (s->fwevtq_msix_idx < 0)
725 return -ENOMEM;
726
727 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
728 t4_sge_intr_msix, 0,
729 adap->msix_info[s->fwevtq_msix_idx].desc,
730 &s->fw_evtq);
731 if (err)
732 return err;
733
734 for_each_ethrxq(s, ethqidx) {
735 minfo = s->ethrxq[ethqidx].msix;
736 err = request_irq(minfo->vec,
737 t4_sge_intr_msix, 0,
738 minfo->desc,
739 &s->ethrxq[ethqidx].rspq);
740 if (err)
741 goto unwind;
742
743 cxgb4_set_msix_aff(adap, minfo->vec,
744 &minfo->aff_mask, ethqidx);
745 }
746 return 0;
747
748unwind:
749 while (--ethqidx >= 0) {
750 minfo = s->ethrxq[ethqidx].msix;
751 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
752 free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
753 }
754 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
755 return err;
756}
757
758static void free_msix_queue_irqs(struct adapter *adap)
759{
760 struct sge *s = &adap->sge;
761 struct msix_info *minfo;
762 int i;
763
764 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
765 for_each_ethrxq(s, i) {
766 minfo = s->ethrxq[i].msix;
767 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
768 free_irq(minfo->vec, &s->ethrxq[i].rspq);
769 }
770}
771
772static int setup_ppod_edram(struct adapter *adap)
773{
774 unsigned int param, val;
775 int ret;
776
777
778
779
780
781
782
783 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
784 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
785
786 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
787 if (ret < 0) {
788 dev_warn(adap->pdev_dev,
789 "querying PPOD_EDRAM support failed: %d\n",
790 ret);
791 return -1;
792 }
793
794 if (val != 1)
795 return -1;
796
797 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
798 if (ret < 0) {
799 dev_err(adap->pdev_dev,
800 "setting PPOD_EDRAM failed: %d\n", ret);
801 return -1;
802 }
803 return 0;
804}
805
806static void adap_config_hpfilter(struct adapter *adapter)
807{
808 u32 param, val = 0;
809 int ret;
810
811
812
813
814 param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
815 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
816 1, ¶m, &val);
817
818
819
820
821 if (ret < 0)
822 dev_err(adapter->pdev_dev,
823 "HP filter region isn't supported by FW\n");
824}
825
826static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
827 u16 rss_size, u16 viid)
828{
829 struct adapter *adap = pi->adapter;
830 int ret;
831
832 ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
833 rss_size);
834 if (ret)
835 return ret;
836
837
838
839
840
841
842 return t4_config_vi_rss(adap, adap->mbox, viid,
843 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
844 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
845 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
846 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
847 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
848 rss[0]);
849}
850
851
852
853
854
855
856
857
858
859
860int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
861{
862 struct adapter *adapter = pi->adapter;
863 const struct sge_eth_rxq *rxq;
864 int i, err;
865 u16 *rss;
866
867 rxq = &adapter->sge.ethrxq[pi->first_qset];
868 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
869 if (!rss)
870 return -ENOMEM;
871
872
873 for (i = 0; i < pi->rss_size; i++, queues++)
874 rss[i] = rxq[*queues].rspq.abs_id;
875
876 err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
877 kfree(rss);
878 return err;
879}
880
881
882
883
884
885
886
887static int setup_rss(struct adapter *adap)
888{
889 int i, j, err;
890
891 for_each_port(adap, i) {
892 const struct port_info *pi = adap2pinfo(adap, i);
893
894
895 for (j = 0; j < pi->rss_size; j++)
896 pi->rss[j] = j % pi->nqsets;
897
898 err = cxgb4_write_rss(pi, pi->rss);
899 if (err)
900 return err;
901 }
902 return 0;
903}
904
905
906
907
908static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
909{
910 qid -= p->ingr_start;
911 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
912}
913
914void cxgb4_quiesce_rx(struct sge_rspq *q)
915{
916 if (q->handler)
917 napi_disable(&q->napi);
918}
919
920
921
922
923static void quiesce_rx(struct adapter *adap)
924{
925 int i;
926
927 for (i = 0; i < adap->sge.ingr_sz; i++) {
928 struct sge_rspq *q = adap->sge.ingr_map[i];
929
930 if (!q)
931 continue;
932
933 cxgb4_quiesce_rx(q);
934 }
935}
936
937
938static void disable_interrupts(struct adapter *adap)
939{
940 struct sge *s = &adap->sge;
941
942 if (adap->flags & CXGB4_FULL_INIT_DONE) {
943 t4_intr_disable(adap);
944 if (adap->flags & CXGB4_USING_MSIX) {
945 free_msix_queue_irqs(adap);
946 free_irq(adap->msix_info[s->nd_msix_idx].vec,
947 adap);
948 } else {
949 free_irq(adap->pdev->irq, adap);
950 }
951 quiesce_rx(adap);
952 }
953}
954
955void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
956{
957 if (q->handler)
958 napi_enable(&q->napi);
959
960
961 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
962 SEINTARM_V(q->intr_params) |
963 INGRESSQID_V(q->cntxt_id));
964}
965
966
967
968
969static void enable_rx(struct adapter *adap)
970{
971 int i;
972
973 for (i = 0; i < adap->sge.ingr_sz; i++) {
974 struct sge_rspq *q = adap->sge.ingr_map[i];
975
976 if (!q)
977 continue;
978
979 cxgb4_enable_rx(adap, q);
980 }
981}
982
983static int setup_non_data_intr(struct adapter *adap)
984{
985 int msix;
986
987 adap->sge.nd_msix_idx = -1;
988 if (!(adap->flags & CXGB4_USING_MSIX))
989 return 0;
990
991
992 msix = cxgb4_get_msix_idx_from_bmap(adap);
993 if (msix < 0)
994 return -ENOMEM;
995
996 snprintf(adap->msix_info[msix].desc,
997 sizeof(adap->msix_info[msix].desc),
998 "%s", adap->port[0]->name);
999
1000 adap->sge.nd_msix_idx = msix;
1001 return 0;
1002}
1003
1004static int setup_fw_sge_queues(struct adapter *adap)
1005{
1006 struct sge *s = &adap->sge;
1007 int msix, err = 0;
1008
1009 bitmap_zero(s->starving_fl, s->egr_sz);
1010 bitmap_zero(s->txq_maperr, s->egr_sz);
1011
1012 if (adap->flags & CXGB4_USING_MSIX) {
1013 s->fwevtq_msix_idx = -1;
1014 msix = cxgb4_get_msix_idx_from_bmap(adap);
1015 if (msix < 0)
1016 return -ENOMEM;
1017
1018 snprintf(adap->msix_info[msix].desc,
1019 sizeof(adap->msix_info[msix].desc),
1020 "%s-FWeventq", adap->port[0]->name);
1021 } else {
1022 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1023 NULL, NULL, NULL, -1);
1024 if (err)
1025 return err;
1026 msix = -((int)s->intrq.abs_id + 1);
1027 }
1028
1029 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1030 msix, NULL, fwevtq_handler, NULL, -1);
1031 if (err && msix >= 0)
1032 cxgb4_free_msix_idx_in_bmap(adap, msix);
1033
1034 s->fwevtq_msix_idx = msix;
1035 return err;
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046static int setup_sge_queues(struct adapter *adap)
1047{
1048 struct sge_uld_rxq_info *rxq_info = NULL;
1049 struct sge *s = &adap->sge;
1050 unsigned int cmplqid = 0;
1051 int err, i, j, msix = 0;
1052
1053 if (is_uld(adap))
1054 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
1055
1056 if (!(adap->flags & CXGB4_USING_MSIX))
1057 msix = -((int)s->intrq.abs_id + 1);
1058
1059 for_each_port(adap, i) {
1060 struct net_device *dev = adap->port[i];
1061 struct port_info *pi = netdev_priv(dev);
1062 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1063 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1064
1065 for (j = 0; j < pi->nqsets; j++, q++) {
1066 if (msix >= 0) {
1067 msix = cxgb4_get_msix_idx_from_bmap(adap);
1068 if (msix < 0) {
1069 err = msix;
1070 goto freeout;
1071 }
1072
1073 snprintf(adap->msix_info[msix].desc,
1074 sizeof(adap->msix_info[msix].desc),
1075 "%s-Rx%d", dev->name, j);
1076 q->msix = &adap->msix_info[msix];
1077 }
1078
1079 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1080 msix, &q->fl,
1081 t4_ethrx_handler,
1082 NULL,
1083 t4_get_tp_ch_map(adap,
1084 pi->tx_chan));
1085 if (err)
1086 goto freeout;
1087 q->rspq.idx = j;
1088 memset(&q->stats, 0, sizeof(q->stats));
1089 }
1090
1091 q = &s->ethrxq[pi->first_qset];
1092 for (j = 0; j < pi->nqsets; j++, t++, q++) {
1093 err = t4_sge_alloc_eth_txq(adap, t, dev,
1094 netdev_get_tx_queue(dev, j),
1095 q->rspq.cntxt_id,
1096 !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
1097 if (err)
1098 goto freeout;
1099 }
1100 }
1101
1102 for_each_port(adap, i) {
1103
1104
1105
1106 if (rxq_info)
1107 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
1108
1109 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1110 s->fw_evtq.cntxt_id, cmplqid);
1111 if (err)
1112 goto freeout;
1113 }
1114
1115 if (!is_t4(adap->params.chip)) {
1116 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
1117 netdev_get_tx_queue(adap->port[0], 0)
1118 , s->fw_evtq.cntxt_id, false);
1119 if (err)
1120 goto freeout;
1121 }
1122
1123 t4_write_reg(adap, is_t4(adap->params.chip) ?
1124 MPS_TRC_RSS_CONTROL_A :
1125 MPS_T5_TRC_RSS_CONTROL_A,
1126 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1127 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1128 return 0;
1129freeout:
1130 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
1131 t4_free_sge_resources(adap);
1132 return err;
1133}
1134
1135static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1136 struct net_device *sb_dev)
1137{
1138 int txq;
1139
1140#ifdef CONFIG_CHELSIO_T4_DCB
1141
1142
1143
1144
1145
1146 if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
1147 u16 vlan_tci;
1148 int err;
1149
1150 err = vlan_get_tag(skb, &vlan_tci);
1151 if (unlikely(err)) {
1152 if (net_ratelimit())
1153 netdev_warn(dev,
1154 "TX Packet without VLAN Tag on DCB Link\n");
1155 txq = 0;
1156 } else {
1157 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1158#ifdef CONFIG_CHELSIO_T4_FCOE
1159 if (skb->protocol == htons(ETH_P_FCOE))
1160 txq = skb->priority & 0x7;
1161#endif
1162 }
1163 return txq;
1164 }
1165#endif
1166
1167 if (dev->num_tc) {
1168 struct port_info *pi = netdev2pinfo(dev);
1169 u8 ver, proto;
1170
1171 ver = ip_hdr(skb)->version;
1172 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
1173 ip_hdr(skb)->protocol;
1174
1175
1176 txq = netdev_pick_tx(dev, skb, sb_dev);
1177 if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
1178 skb->encapsulation ||
1179 (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
1180 txq = txq % pi->nqsets;
1181
1182 return txq;
1183 }
1184
1185 if (select_queue) {
1186 txq = (skb_rx_queue_recorded(skb)
1187 ? skb_get_rx_queue(skb)
1188 : smp_processor_id());
1189
1190 while (unlikely(txq >= dev->real_num_tx_queues))
1191 txq -= dev->real_num_tx_queues;
1192
1193 return txq;
1194 }
1195
1196 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
1197}
1198
1199static int closest_timer(const struct sge *s, int time)
1200{
1201 int i, delta, match = 0, min_delta = INT_MAX;
1202
1203 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1204 delta = time - s->timer_val[i];
1205 if (delta < 0)
1206 delta = -delta;
1207 if (delta < min_delta) {
1208 min_delta = delta;
1209 match = i;
1210 }
1211 }
1212 return match;
1213}
1214
1215static int closest_thres(const struct sge *s, int thres)
1216{
1217 int i, delta, match = 0, min_delta = INT_MAX;
1218
1219 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1220 delta = thres - s->counter_val[i];
1221 if (delta < 0)
1222 delta = -delta;
1223 if (delta < min_delta) {
1224 min_delta = delta;
1225 match = i;
1226 }
1227 }
1228 return match;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1241 unsigned int us, unsigned int cnt)
1242{
1243 struct adapter *adap = q->adap;
1244
1245 if ((us | cnt) == 0)
1246 cnt = 1;
1247
1248 if (cnt) {
1249 int err;
1250 u32 v, new_idx;
1251
1252 new_idx = closest_thres(&adap->sge, cnt);
1253 if (q->desc && q->pktcnt_idx != new_idx) {
1254
1255 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1256 FW_PARAMS_PARAM_X_V(
1257 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1258 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1259 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1260 &v, &new_idx);
1261 if (err)
1262 return err;
1263 }
1264 q->pktcnt_idx = new_idx;
1265 }
1266
1267 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1268 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1269 return 0;
1270}
1271
1272static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1273{
1274 netdev_features_t changed = dev->features ^ features;
1275 const struct port_info *pi = netdev_priv(dev);
1276 int err;
1277
1278 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1279 return 0;
1280
1281 err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
1282 pi->viid_mirror, -1, -1, -1, -1,
1283 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1284 if (unlikely(err))
1285 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1286 return err;
1287}
1288
1289static int setup_debugfs(struct adapter *adap)
1290{
1291 if (IS_ERR_OR_NULL(adap->debugfs_root))
1292 return -1;
1293
1294#ifdef CONFIG_DEBUG_FS
1295 t4_setup_debugfs(adap);
1296#endif
1297 return 0;
1298}
1299
1300static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
1301 struct sge_eth_rxq *mirror_rxq)
1302{
1303 if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
1304 !(adap->flags & CXGB4_SHUTTING_DOWN))
1305 cxgb4_quiesce_rx(&mirror_rxq->rspq);
1306
1307 if (adap->flags & CXGB4_USING_MSIX) {
1308 cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
1309 mirror_rxq->msix->aff_mask);
1310 free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
1311 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1312 }
1313
1314 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1315}
1316
1317static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
1318{
1319 struct port_info *pi = netdev2pinfo(dev);
1320 struct adapter *adap = netdev2adap(dev);
1321 struct sge_eth_rxq *mirror_rxq;
1322 struct sge *s = &adap->sge;
1323 int ret = 0, msix = 0;
1324 u16 i, rxqid;
1325 u16 *rss;
1326
1327 if (!pi->vi_mirror_count)
1328 return 0;
1329
1330 if (s->mirror_rxq[pi->port_id])
1331 return 0;
1332
1333 mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
1334 if (!mirror_rxq)
1335 return -ENOMEM;
1336
1337 s->mirror_rxq[pi->port_id] = mirror_rxq;
1338
1339 if (!(adap->flags & CXGB4_USING_MSIX))
1340 msix = -((int)adap->sge.intrq.abs_id + 1);
1341
1342 for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
1343 mirror_rxq = &s->mirror_rxq[pi->port_id][i];
1344
1345
1346 if (msix >= 0) {
1347 msix = cxgb4_get_msix_idx_from_bmap(adap);
1348 if (msix < 0) {
1349 ret = msix;
1350 goto out_free_queues;
1351 }
1352
1353 mirror_rxq->msix = &adap->msix_info[msix];
1354 snprintf(mirror_rxq->msix->desc,
1355 sizeof(mirror_rxq->msix->desc),
1356 "%s-mirrorrxq%d", dev->name, i);
1357 }
1358
1359 init_rspq(adap, &mirror_rxq->rspq,
1360 CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
1361 CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
1362 CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
1363 CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);
1364
1365 mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
1366
1367 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
1368 dev, msix, &mirror_rxq->fl,
1369 t4_ethrx_handler, NULL, 0);
1370 if (ret)
1371 goto out_free_msix_idx;
1372
1373
1374 if (adap->flags & CXGB4_USING_MSIX) {
1375 ret = request_irq(mirror_rxq->msix->vec,
1376 t4_sge_intr_msix, 0,
1377 mirror_rxq->msix->desc,
1378 &mirror_rxq->rspq);
1379 if (ret)
1380 goto out_free_rxq;
1381
1382 cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
1383 &mirror_rxq->msix->aff_mask, i);
1384 }
1385
1386
1387 cxgb4_enable_rx(adap, &mirror_rxq->rspq);
1388 }
1389
1390
1391 rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
1392 if (!rss) {
1393 ret = -ENOMEM;
1394 goto out_free_queues;
1395 }
1396
1397 mirror_rxq = &s->mirror_rxq[pi->port_id][0];
1398 for (i = 0; i < pi->rss_size; i++)
1399 rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
1400
1401 ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
1402 kfree(rss);
1403 if (ret)
1404 goto out_free_queues;
1405
1406 return 0;
1407
1408out_free_rxq:
1409 free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1410
1411out_free_msix_idx:
1412 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1413
1414out_free_queues:
1415 while (rxqid-- > 0)
1416 cxgb4_port_mirror_free_rxq(adap,
1417 &s->mirror_rxq[pi->port_id][rxqid]);
1418
1419 kfree(s->mirror_rxq[pi->port_id]);
1420 s->mirror_rxq[pi->port_id] = NULL;
1421 return ret;
1422}
1423
1424static void cxgb4_port_mirror_free_queues(struct net_device *dev)
1425{
1426 struct port_info *pi = netdev2pinfo(dev);
1427 struct adapter *adap = netdev2adap(dev);
1428 struct sge *s = &adap->sge;
1429 u16 i;
1430
1431 if (!pi->vi_mirror_count)
1432 return;
1433
1434 if (!s->mirror_rxq[pi->port_id])
1435 return;
1436
1437 for (i = 0; i < pi->nmirrorqsets; i++)
1438 cxgb4_port_mirror_free_rxq(adap,
1439 &s->mirror_rxq[pi->port_id][i]);
1440
1441 kfree(s->mirror_rxq[pi->port_id]);
1442 s->mirror_rxq[pi->port_id] = NULL;
1443}
1444
1445static int cxgb4_port_mirror_start(struct net_device *dev)
1446{
1447 struct port_info *pi = netdev2pinfo(dev);
1448 struct adapter *adap = netdev2adap(dev);
1449 int ret, idx = -1;
1450
1451 if (!pi->vi_mirror_count)
1452 return 0;
1453
1454
1455
1456
1457
1458
1459 ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
1460 dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
1461 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
1462 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
1463 if (ret) {
1464 dev_err(adap->pdev_dev,
1465 "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
1466 pi->viid_mirror, ret);
1467 return ret;
1468 }
1469
1470
1471
1472
1473
1474 ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
1475 dev->dev_addr, true, NULL);
1476 if (ret) {
1477 dev_err(adap->pdev_dev,
1478 "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
1479 pi->viid_mirror, ret);
1480 return ret;
1481 }
1482
1483
1484
1485
1486
1487
1488
1489 local_bh_disable();
1490 ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
1491 false);
1492 local_bh_enable();
1493 if (ret)
1494 dev_err(adap->pdev_dev,
1495 "Failed starting Mirror VI 0x%x, ret: %d\n",
1496 pi->viid_mirror, ret);
1497
1498 return ret;
1499}
1500
1501static void cxgb4_port_mirror_stop(struct net_device *dev)
1502{
1503 struct port_info *pi = netdev2pinfo(dev);
1504 struct adapter *adap = netdev2adap(dev);
1505
1506 if (!pi->vi_mirror_count)
1507 return;
1508
1509 t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
1510 false);
1511}
1512
1513int cxgb4_port_mirror_alloc(struct net_device *dev)
1514{
1515 struct port_info *pi = netdev2pinfo(dev);
1516 struct adapter *adap = netdev2adap(dev);
1517 int ret = 0;
1518
1519 if (!pi->nmirrorqsets)
1520 return -EOPNOTSUPP;
1521
1522 mutex_lock(&pi->vi_mirror_mutex);
1523 if (pi->viid_mirror) {
1524 pi->vi_mirror_count++;
1525 goto out_unlock;
1526 }
1527
1528 ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
1529 &pi->viid_mirror);
1530 if (ret)
1531 goto out_unlock;
1532
1533 pi->vi_mirror_count = 1;
1534
1535 if (adap->flags & CXGB4_FULL_INIT_DONE) {
1536 ret = cxgb4_port_mirror_alloc_queues(dev);
1537 if (ret)
1538 goto out_free_vi;
1539
1540 ret = cxgb4_port_mirror_start(dev);
1541 if (ret)
1542 goto out_free_queues;
1543 }
1544
1545 mutex_unlock(&pi->vi_mirror_mutex);
1546 return 0;
1547
1548out_free_queues:
1549 cxgb4_port_mirror_free_queues(dev);
1550
1551out_free_vi:
1552 pi->vi_mirror_count = 0;
1553 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1554 pi->viid_mirror = 0;
1555
1556out_unlock:
1557 mutex_unlock(&pi->vi_mirror_mutex);
1558 return ret;
1559}
1560
1561void cxgb4_port_mirror_free(struct net_device *dev)
1562{
1563 struct port_info *pi = netdev2pinfo(dev);
1564 struct adapter *adap = netdev2adap(dev);
1565
1566 mutex_lock(&pi->vi_mirror_mutex);
1567 if (!pi->viid_mirror)
1568 goto out_unlock;
1569
1570 if (pi->vi_mirror_count > 1) {
1571 pi->vi_mirror_count--;
1572 goto out_unlock;
1573 }
1574
1575 cxgb4_port_mirror_stop(dev);
1576 cxgb4_port_mirror_free_queues(dev);
1577
1578 pi->vi_mirror_count = 0;
1579 t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1580 pi->viid_mirror = 0;
1581
1582out_unlock:
1583 mutex_unlock(&pi->vi_mirror_mutex);
1584}
1585
1586
1587
1588
1589
1590
1591
1592
1593int cxgb4_alloc_atid(struct tid_info *t, void *data)
1594{
1595 int atid = -1;
1596
1597 spin_lock_bh(&t->atid_lock);
1598 if (t->afree) {
1599 union aopen_entry *p = t->afree;
1600
1601 atid = (p - t->atid_tab) + t->atid_base;
1602 t->afree = p->next;
1603 p->data = data;
1604 t->atids_in_use++;
1605 }
1606 spin_unlock_bh(&t->atid_lock);
1607 return atid;
1608}
1609EXPORT_SYMBOL(cxgb4_alloc_atid);
1610
1611
1612
1613
1614void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1615{
1616 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1617
1618 spin_lock_bh(&t->atid_lock);
1619 p->next = t->afree;
1620 t->afree = p;
1621 t->atids_in_use--;
1622 spin_unlock_bh(&t->atid_lock);
1623}
1624EXPORT_SYMBOL(cxgb4_free_atid);
1625
1626
1627
1628
1629int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1630{
1631 int stid;
1632
1633 spin_lock_bh(&t->stid_lock);
1634 if (family == PF_INET) {
1635 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1636 if (stid < t->nstids)
1637 __set_bit(stid, t->stid_bmap);
1638 else
1639 stid = -1;
1640 } else {
1641 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1642 if (stid < 0)
1643 stid = -1;
1644 }
1645 if (stid >= 0) {
1646 t->stid_tab[stid].data = data;
1647 stid += t->stid_base;
1648
1649
1650
1651
1652 if (family == PF_INET6) {
1653 t->stids_in_use += 2;
1654 t->v6_stids_in_use += 2;
1655 } else {
1656 t->stids_in_use++;
1657 }
1658 }
1659 spin_unlock_bh(&t->stid_lock);
1660 return stid;
1661}
1662EXPORT_SYMBOL(cxgb4_alloc_stid);
1663
1664
1665
1666int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1667{
1668 int stid;
1669
1670 spin_lock_bh(&t->stid_lock);
1671 if (family == PF_INET) {
1672 stid = find_next_zero_bit(t->stid_bmap,
1673 t->nstids + t->nsftids, t->nstids);
1674 if (stid < (t->nstids + t->nsftids))
1675 __set_bit(stid, t->stid_bmap);
1676 else
1677 stid = -1;
1678 } else {
1679 stid = -1;
1680 }
1681 if (stid >= 0) {
1682 t->stid_tab[stid].data = data;
1683 stid -= t->nstids;
1684 stid += t->sftid_base;
1685 t->sftids_in_use++;
1686 }
1687 spin_unlock_bh(&t->stid_lock);
1688 return stid;
1689}
1690EXPORT_SYMBOL(cxgb4_alloc_sftid);
1691
1692
1693
1694void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1695{
1696
1697 if (t->nsftids && (stid >= t->sftid_base)) {
1698 stid -= t->sftid_base;
1699 stid += t->nstids;
1700 } else {
1701 stid -= t->stid_base;
1702 }
1703
1704 spin_lock_bh(&t->stid_lock);
1705 if (family == PF_INET)
1706 __clear_bit(stid, t->stid_bmap);
1707 else
1708 bitmap_release_region(t->stid_bmap, stid, 1);
1709 t->stid_tab[stid].data = NULL;
1710 if (stid < t->nstids) {
1711 if (family == PF_INET6) {
1712 t->stids_in_use -= 2;
1713 t->v6_stids_in_use -= 2;
1714 } else {
1715 t->stids_in_use--;
1716 }
1717 } else {
1718 t->sftids_in_use--;
1719 }
1720
1721 spin_unlock_bh(&t->stid_lock);
1722}
1723EXPORT_SYMBOL(cxgb4_free_stid);
1724
1725
1726
1727
1728static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1729 unsigned int tid)
1730{
1731 struct cpl_tid_release *req;
1732
1733 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1734 req = __skb_put(skb, sizeof(*req));
1735 INIT_TP_WR(req, tid);
1736 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1737}
1738
1739
1740
1741
1742
1743static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1744 unsigned int tid)
1745{
1746 struct adapter *adap = container_of(t, struct adapter, tids);
1747 void **p = &t->tid_tab[tid - t->tid_base];
1748
1749 spin_lock_bh(&adap->tid_release_lock);
1750 *p = adap->tid_release_head;
1751
1752 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1753 if (!adap->tid_release_task_busy) {
1754 adap->tid_release_task_busy = true;
1755 queue_work(adap->workq, &adap->tid_release_task);
1756 }
1757 spin_unlock_bh(&adap->tid_release_lock);
1758}
1759
1760
1761
1762
1763static void process_tid_release_list(struct work_struct *work)
1764{
1765 struct sk_buff *skb;
1766 struct adapter *adap;
1767
1768 adap = container_of(work, struct adapter, tid_release_task);
1769
1770 spin_lock_bh(&adap->tid_release_lock);
1771 while (adap->tid_release_head) {
1772 void **p = adap->tid_release_head;
1773 unsigned int chan = (uintptr_t)p & 3;
1774 p = (void *)p - chan;
1775
1776 adap->tid_release_head = *p;
1777 *p = NULL;
1778 spin_unlock_bh(&adap->tid_release_lock);
1779
1780 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1781 GFP_KERNEL)))
1782 schedule_timeout_uninterruptible(1);
1783
1784 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1785 t4_ofld_send(adap, skb);
1786 spin_lock_bh(&adap->tid_release_lock);
1787 }
1788 adap->tid_release_task_busy = false;
1789 spin_unlock_bh(&adap->tid_release_lock);
1790}
1791
1792
1793
1794
1795
1796void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1797 unsigned short family)
1798{
1799 struct adapter *adap = container_of(t, struct adapter, tids);
1800 struct sk_buff *skb;
1801
1802 WARN_ON(tid_out_of_range(&adap->tids, tid));
1803
1804 if (t->tid_tab[tid - adap->tids.tid_base]) {
1805 t->tid_tab[tid - adap->tids.tid_base] = NULL;
1806 atomic_dec(&t->conns_in_use);
1807 if (t->hash_base && (tid >= t->hash_base)) {
1808 if (family == AF_INET6)
1809 atomic_sub(2, &t->hash_tids_in_use);
1810 else
1811 atomic_dec(&t->hash_tids_in_use);
1812 } else {
1813 if (family == AF_INET6)
1814 atomic_sub(2, &t->tids_in_use);
1815 else
1816 atomic_dec(&t->tids_in_use);
1817 }
1818 }
1819
1820 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1821 if (likely(skb)) {
1822 mk_tid_release(skb, chan, tid);
1823 t4_ofld_send(adap, skb);
1824 } else
1825 cxgb4_queue_tid_release(t, chan, tid);
1826}
1827EXPORT_SYMBOL(cxgb4_remove_tid);
1828
1829
1830
1831
1832static int tid_init(struct tid_info *t)
1833{
1834 struct adapter *adap = container_of(t, struct adapter, tids);
1835 unsigned int max_ftids = t->nftids + t->nsftids;
1836 unsigned int natids = t->natids;
1837 unsigned int hpftid_bmap_size;
1838 unsigned int eotid_bmap_size;
1839 unsigned int stid_bmap_size;
1840 unsigned int ftid_bmap_size;
1841 size_t size;
1842
1843 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1844 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1845 hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
1846 eotid_bmap_size = BITS_TO_LONGS(t->neotids);
1847 size = t->ntids * sizeof(*t->tid_tab) +
1848 natids * sizeof(*t->atid_tab) +
1849 t->nstids * sizeof(*t->stid_tab) +
1850 t->nsftids * sizeof(*t->stid_tab) +
1851 stid_bmap_size * sizeof(long) +
1852 t->nhpftids * sizeof(*t->hpftid_tab) +
1853 hpftid_bmap_size * sizeof(long) +
1854 max_ftids * sizeof(*t->ftid_tab) +
1855 ftid_bmap_size * sizeof(long) +
1856 t->neotids * sizeof(*t->eotid_tab) +
1857 eotid_bmap_size * sizeof(long);
1858
1859 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1860 if (!t->tid_tab)
1861 return -ENOMEM;
1862
1863 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1864 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1865 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1866 t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1867 t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
1868 t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
1869 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1870 t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
1871 t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
1872 spin_lock_init(&t->stid_lock);
1873 spin_lock_init(&t->atid_lock);
1874 spin_lock_init(&t->ftid_lock);
1875
1876 t->stids_in_use = 0;
1877 t->v6_stids_in_use = 0;
1878 t->sftids_in_use = 0;
1879 t->afree = NULL;
1880 t->atids_in_use = 0;
1881 atomic_set(&t->tids_in_use, 0);
1882 atomic_set(&t->conns_in_use, 0);
1883 atomic_set(&t->hash_tids_in_use, 0);
1884 atomic_set(&t->eotids_in_use, 0);
1885
1886
1887 if (natids) {
1888 while (--natids)
1889 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1890 t->afree = t->atid_tab;
1891 }
1892
1893 if (is_offload(adap)) {
1894 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1895
1896 if (!t->stid_base &&
1897 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1898 __set_bit(0, t->stid_bmap);
1899
1900 if (t->neotids)
1901 bitmap_zero(t->eotid_bmap, t->neotids);
1902 }
1903
1904 if (t->nhpftids)
1905 bitmap_zero(t->hpftid_bmap, t->nhpftids);
1906 bitmap_zero(t->ftid_bmap, t->nftids);
1907 return 0;
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1923 __be32 sip, __be16 sport, __be16 vlan,
1924 unsigned int queue)
1925{
1926 unsigned int chan;
1927 struct sk_buff *skb;
1928 struct adapter *adap;
1929 struct cpl_pass_open_req *req;
1930 int ret;
1931
1932 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1933 if (!skb)
1934 return -ENOMEM;
1935
1936 adap = netdev2adap(dev);
1937 req = __skb_put(skb, sizeof(*req));
1938 INIT_TP_WR(req, 0);
1939 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1940 req->local_port = sport;
1941 req->peer_port = htons(0);
1942 req->local_ip = sip;
1943 req->peer_ip = htonl(0);
1944 chan = rxq_to_chan(&adap->sge, queue);
1945 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1946 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1947 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1948 ret = t4_mgmt_tx(adap, skb);
1949 return net_xmit_eval(ret);
1950}
1951EXPORT_SYMBOL(cxgb4_create_server);
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1964 const struct in6_addr *sip, __be16 sport,
1965 unsigned int queue)
1966{
1967 unsigned int chan;
1968 struct sk_buff *skb;
1969 struct adapter *adap;
1970 struct cpl_pass_open_req6 *req;
1971 int ret;
1972
1973 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1974 if (!skb)
1975 return -ENOMEM;
1976
1977 adap = netdev2adap(dev);
1978 req = __skb_put(skb, sizeof(*req));
1979 INIT_TP_WR(req, 0);
1980 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1981 req->local_port = sport;
1982 req->peer_port = htons(0);
1983 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1984 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1985 req->peer_ip_hi = cpu_to_be64(0);
1986 req->peer_ip_lo = cpu_to_be64(0);
1987 chan = rxq_to_chan(&adap->sge, queue);
1988 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1989 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1990 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1991 ret = t4_mgmt_tx(adap, skb);
1992 return net_xmit_eval(ret);
1993}
1994EXPORT_SYMBOL(cxgb4_create_server6);
1995
1996int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1997 unsigned int queue, bool ipv6)
1998{
1999 struct sk_buff *skb;
2000 struct adapter *adap;
2001 struct cpl_close_listsvr_req *req;
2002 int ret;
2003
2004 adap = netdev2adap(dev);
2005
2006 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2007 if (!skb)
2008 return -ENOMEM;
2009
2010 req = __skb_put(skb, sizeof(*req));
2011 INIT_TP_WR(req, 0);
2012 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
2013 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
2014 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
2015 ret = t4_mgmt_tx(adap, skb);
2016 return net_xmit_eval(ret);
2017}
2018EXPORT_SYMBOL(cxgb4_remove_server);
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2031 unsigned int *idx)
2032{
2033 unsigned int i = 0;
2034
2035 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2036 ++i;
2037 if (idx)
2038 *idx = i;
2039 return mtus[i];
2040}
2041EXPORT_SYMBOL(cxgb4_best_mtu);
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
2060 unsigned short header_size,
2061 unsigned short data_size_max,
2062 unsigned short data_size_align,
2063 unsigned int *mtu_idxp)
2064{
2065 unsigned short max_mtu = header_size + data_size_max;
2066 unsigned short data_size_align_mask = data_size_align - 1;
2067 int mtu_idx, aligned_mtu_idx;
2068
2069
2070
2071
2072
2073
2074 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
2075 unsigned short data_size = mtus[mtu_idx] - header_size;
2076
2077
2078
2079
2080 if ((data_size & data_size_align_mask) == 0)
2081 aligned_mtu_idx = mtu_idx;
2082
2083
2084
2085
2086
2087 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
2088 break;
2089 }
2090
2091
2092
2093
2094 if (mtu_idx == NMTUS)
2095 mtu_idx--;
2096
2097
2098
2099
2100
2101 if (aligned_mtu_idx >= 0 &&
2102 mtu_idx - aligned_mtu_idx <= 1)
2103 mtu_idx = aligned_mtu_idx;
2104
2105
2106
2107
2108 if (mtu_idxp)
2109 *mtu_idxp = mtu_idx;
2110 return mtus[mtu_idx];
2111}
2112EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
2113
2114
2115
2116
2117
2118
2119
2120unsigned int cxgb4_port_chan(const struct net_device *dev)
2121{
2122 return netdev2pinfo(dev)->tx_chan;
2123}
2124EXPORT_SYMBOL(cxgb4_port_chan);
2125
2126
2127
2128
2129
2130
2131
2132unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
2133{
2134 return netdev2pinfo(dev)->rx_cchan;
2135}
2136EXPORT_SYMBOL(cxgb4_port_e2cchan);
2137
2138unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2139{
2140 struct adapter *adap = netdev2adap(dev);
2141 u32 v1, v2, lp_count, hp_count;
2142
2143 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2144 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2145 if (is_t4(adap->params.chip)) {
2146 lp_count = LP_COUNT_G(v1);
2147 hp_count = HP_COUNT_G(v1);
2148 } else {
2149 lp_count = LP_COUNT_T5_G(v1);
2150 hp_count = HP_COUNT_T5_G(v2);
2151 }
2152 return lpfifo ? lp_count : hp_count;
2153}
2154EXPORT_SYMBOL(cxgb4_dbfifo_count);
2155
2156
2157
2158
2159
2160
2161
2162unsigned int cxgb4_port_viid(const struct net_device *dev)
2163{
2164 return netdev2pinfo(dev)->viid;
2165}
2166EXPORT_SYMBOL(cxgb4_port_viid);
2167
2168
2169
2170
2171
2172
2173
2174unsigned int cxgb4_port_idx(const struct net_device *dev)
2175{
2176 return netdev2pinfo(dev)->port_id;
2177}
2178EXPORT_SYMBOL(cxgb4_port_idx);
2179
2180void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2181 struct tp_tcp_stats *v6)
2182{
2183 struct adapter *adap = pci_get_drvdata(pdev);
2184
2185 spin_lock(&adap->stats_lock);
2186 t4_tp_get_tcp_stats(adap, v4, v6, false);
2187 spin_unlock(&adap->stats_lock);
2188}
2189EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2190
2191void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2192 const unsigned int *pgsz_order)
2193{
2194 struct adapter *adap = netdev2adap(dev);
2195
2196 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2197 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2198 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2199 HPZ3_V(pgsz_order[3]));
2200}
2201EXPORT_SYMBOL(cxgb4_iscsi_init);
2202
2203int cxgb4_flush_eq_cache(struct net_device *dev)
2204{
2205 struct adapter *adap = netdev2adap(dev);
2206
2207 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
2208}
2209EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2210
2211static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2212{
2213 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
2214 __be64 indices;
2215 int ret;
2216
2217 spin_lock(&adap->win0_lock);
2218 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2219 sizeof(indices), (__be32 *)&indices,
2220 T4_MEMORY_READ);
2221 spin_unlock(&adap->win0_lock);
2222 if (!ret) {
2223 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2224 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2225 }
2226 return ret;
2227}
2228
2229int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2230 u16 size)
2231{
2232 struct adapter *adap = netdev2adap(dev);
2233 u16 hw_pidx, hw_cidx;
2234 int ret;
2235
2236 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2237 if (ret)
2238 goto out;
2239
2240 if (pidx != hw_pidx) {
2241 u16 delta;
2242 u32 val;
2243
2244 if (pidx >= hw_pidx)
2245 delta = pidx - hw_pidx;
2246 else
2247 delta = size - hw_pidx + pidx;
2248
2249 if (is_t4(adap->params.chip))
2250 val = PIDX_V(delta);
2251 else
2252 val = PIDX_T5_V(delta);
2253 wmb();
2254 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2255 QID_V(qid) | val);
2256 }
2257out:
2258 return ret;
2259}
2260EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2261
2262int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2263{
2264 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2265 u32 edc0_end, edc1_end, mc0_end, mc1_end;
2266 u32 offset, memtype, memaddr;
2267 struct adapter *adap;
2268 u32 hma_size = 0;
2269 int ret;
2270
2271 adap = netdev2adap(dev);
2272
2273 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2274
2275
2276
2277
2278
2279
2280
2281 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2282 edc0_size = EDRAM0_SIZE_G(size) << 20;
2283 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2284 edc1_size = EDRAM1_SIZE_G(size) << 20;
2285 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2286 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2287
2288 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
2289 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2290 hma_size = EXT_MEM1_SIZE_G(size) << 20;
2291 }
2292 edc0_end = edc0_size;
2293 edc1_end = edc0_end + edc1_size;
2294 mc0_end = edc1_end + mc0_size;
2295
2296 if (offset < edc0_end) {
2297 memtype = MEM_EDC0;
2298 memaddr = offset;
2299 } else if (offset < edc1_end) {
2300 memtype = MEM_EDC1;
2301 memaddr = offset - edc0_end;
2302 } else {
2303 if (hma_size && (offset < (edc1_end + hma_size))) {
2304 memtype = MEM_HMA;
2305 memaddr = offset - edc1_end;
2306 } else if (offset < mc0_end) {
2307 memtype = MEM_MC0;
2308 memaddr = offset - edc1_end;
2309 } else if (is_t5(adap->params.chip)) {
2310 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2311 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2312 mc1_end = mc0_end + mc1_size;
2313 if (offset < mc1_end) {
2314 memtype = MEM_MC1;
2315 memaddr = offset - mc0_end;
2316 } else {
2317
2318 goto err;
2319 }
2320 } else {
2321
2322 goto err;
2323 }
2324 }
2325
2326 spin_lock(&adap->win0_lock);
2327 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2328 spin_unlock(&adap->win0_lock);
2329 return ret;
2330
2331err:
2332 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2333 stag, offset);
2334 return -EINVAL;
2335}
2336EXPORT_SYMBOL(cxgb4_read_tpte);
2337
2338u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2339{
2340 u32 hi, lo;
2341 struct adapter *adap;
2342
2343 adap = netdev2adap(dev);
2344 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2345 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2346
2347 return ((u64)hi << 32) | (u64)lo;
2348}
2349EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2350
2351int cxgb4_bar2_sge_qregs(struct net_device *dev,
2352 unsigned int qid,
2353 enum cxgb4_bar2_qtype qtype,
2354 int user,
2355 u64 *pbar2_qoffset,
2356 unsigned int *pbar2_qid)
2357{
2358 return t4_bar2_sge_qregs(netdev2adap(dev),
2359 qid,
2360 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2361 ? T4_BAR2_QTYPE_EGRESS
2362 : T4_BAR2_QTYPE_INGRESS),
2363 user,
2364 pbar2_qoffset,
2365 pbar2_qid);
2366}
2367EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2368
2369static struct pci_driver cxgb4_driver;
2370
2371static void check_neigh_update(struct neighbour *neigh)
2372{
2373 const struct device *parent;
2374 const struct net_device *netdev = neigh->dev;
2375
2376 if (is_vlan_dev(netdev))
2377 netdev = vlan_dev_real_dev(netdev);
2378 parent = netdev->dev.parent;
2379 if (parent && parent->driver == &cxgb4_driver.driver)
2380 t4_l2t_update(dev_get_drvdata(parent), neigh);
2381}
2382
2383static int netevent_cb(struct notifier_block *nb, unsigned long event,
2384 void *data)
2385{
2386 switch (event) {
2387 case NETEVENT_NEIGH_UPDATE:
2388 check_neigh_update(data);
2389 break;
2390 case NETEVENT_REDIRECT:
2391 default:
2392 break;
2393 }
2394 return 0;
2395}
2396
2397static bool netevent_registered;
2398static struct notifier_block cxgb4_netevent_nb = {
2399 .notifier_call = netevent_cb
2400};
2401
2402static void drain_db_fifo(struct adapter *adap, int usecs)
2403{
2404 u32 v1, v2, lp_count, hp_count;
2405
2406 do {
2407 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2408 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2409 if (is_t4(adap->params.chip)) {
2410 lp_count = LP_COUNT_G(v1);
2411 hp_count = HP_COUNT_G(v1);
2412 } else {
2413 lp_count = LP_COUNT_T5_G(v1);
2414 hp_count = HP_COUNT_T5_G(v2);
2415 }
2416
2417 if (lp_count == 0 && hp_count == 0)
2418 break;
2419 set_current_state(TASK_UNINTERRUPTIBLE);
2420 schedule_timeout(usecs_to_jiffies(usecs));
2421 } while (1);
2422}
2423
2424static void disable_txq_db(struct sge_txq *q)
2425{
2426 unsigned long flags;
2427
2428 spin_lock_irqsave(&q->db_lock, flags);
2429 q->db_disabled = 1;
2430 spin_unlock_irqrestore(&q->db_lock, flags);
2431}
2432
2433static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2434{
2435 spin_lock_irq(&q->db_lock);
2436 if (q->db_pidx_inc) {
2437
2438
2439
2440 wmb();
2441 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2442 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2443 q->db_pidx_inc = 0;
2444 }
2445 q->db_disabled = 0;
2446 spin_unlock_irq(&q->db_lock);
2447}
2448
2449static void disable_dbs(struct adapter *adap)
2450{
2451 int i;
2452
2453 for_each_ethrxq(&adap->sge, i)
2454 disable_txq_db(&adap->sge.ethtxq[i].q);
2455 if (is_offload(adap)) {
2456 struct sge_uld_txq_info *txq_info =
2457 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2458
2459 if (txq_info) {
2460 for_each_ofldtxq(&adap->sge, i) {
2461 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2462
2463 disable_txq_db(&txq->q);
2464 }
2465 }
2466 }
2467 for_each_port(adap, i)
2468 disable_txq_db(&adap->sge.ctrlq[i].q);
2469}
2470
2471static void enable_dbs(struct adapter *adap)
2472{
2473 int i;
2474
2475 for_each_ethrxq(&adap->sge, i)
2476 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2477 if (is_offload(adap)) {
2478 struct sge_uld_txq_info *txq_info =
2479 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2480
2481 if (txq_info) {
2482 for_each_ofldtxq(&adap->sge, i) {
2483 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2484
2485 enable_txq_db(adap, &txq->q);
2486 }
2487 }
2488 }
2489 for_each_port(adap, i)
2490 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2491}
2492
2493static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2494{
2495 enum cxgb4_uld type = CXGB4_ULD_RDMA;
2496
2497 if (adap->uld && adap->uld[type].handle)
2498 adap->uld[type].control(adap->uld[type].handle, cmd);
2499}
2500
2501static void process_db_full(struct work_struct *work)
2502{
2503 struct adapter *adap;
2504
2505 adap = container_of(work, struct adapter, db_full_task);
2506
2507 drain_db_fifo(adap, dbfifo_drain_delay);
2508 enable_dbs(adap);
2509 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2510 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2511 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2512 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2513 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2514 else
2515 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2516 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2517}
2518
2519static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2520{
2521 u16 hw_pidx, hw_cidx;
2522 int ret;
2523
2524 spin_lock_irq(&q->db_lock);
2525 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2526 if (ret)
2527 goto out;
2528 if (q->db_pidx != hw_pidx) {
2529 u16 delta;
2530 u32 val;
2531
2532 if (q->db_pidx >= hw_pidx)
2533 delta = q->db_pidx - hw_pidx;
2534 else
2535 delta = q->size - hw_pidx + q->db_pidx;
2536
2537 if (is_t4(adap->params.chip))
2538 val = PIDX_V(delta);
2539 else
2540 val = PIDX_T5_V(delta);
2541 wmb();
2542 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2543 QID_V(q->cntxt_id) | val);
2544 }
2545out:
2546 q->db_disabled = 0;
2547 q->db_pidx_inc = 0;
2548 spin_unlock_irq(&q->db_lock);
2549 if (ret)
2550 CH_WARN(adap, "DB drop recovery failed.\n");
2551}
2552
2553static void recover_all_queues(struct adapter *adap)
2554{
2555 int i;
2556
2557 for_each_ethrxq(&adap->sge, i)
2558 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2559 if (is_offload(adap)) {
2560 struct sge_uld_txq_info *txq_info =
2561 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2562 if (txq_info) {
2563 for_each_ofldtxq(&adap->sge, i) {
2564 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2565
2566 sync_txq_pidx(adap, &txq->q);
2567 }
2568 }
2569 }
2570 for_each_port(adap, i)
2571 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2572}
2573
2574static void process_db_drop(struct work_struct *work)
2575{
2576 struct adapter *adap;
2577
2578 adap = container_of(work, struct adapter, db_drop_task);
2579
2580 if (is_t4(adap->params.chip)) {
2581 drain_db_fifo(adap, dbfifo_drain_delay);
2582 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2583 drain_db_fifo(adap, dbfifo_drain_delay);
2584 recover_all_queues(adap);
2585 drain_db_fifo(adap, dbfifo_drain_delay);
2586 enable_dbs(adap);
2587 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2588 } else if (is_t5(adap->params.chip)) {
2589 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2590 u16 qid = (dropped_db >> 15) & 0x1ffff;
2591 u16 pidx_inc = dropped_db & 0x1fff;
2592 u64 bar2_qoffset;
2593 unsigned int bar2_qid;
2594 int ret;
2595
2596 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2597 0, &bar2_qoffset, &bar2_qid);
2598 if (ret)
2599 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2600 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2601 else
2602 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2603 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2604
2605
2606 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2607 }
2608
2609 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2610 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2611}
2612
2613void t4_db_full(struct adapter *adap)
2614{
2615 if (is_t4(adap->params.chip)) {
2616 disable_dbs(adap);
2617 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2618 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2619 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2620 queue_work(adap->workq, &adap->db_full_task);
2621 }
2622}
2623
2624void t4_db_dropped(struct adapter *adap)
2625{
2626 if (is_t4(adap->params.chip)) {
2627 disable_dbs(adap);
2628 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2629 }
2630 queue_work(adap->workq, &adap->db_drop_task);
2631}
2632
2633void t4_register_netevent_notifier(void)
2634{
2635 if (!netevent_registered) {
2636 register_netevent_notifier(&cxgb4_netevent_nb);
2637 netevent_registered = true;
2638 }
2639}
2640
2641static void detach_ulds(struct adapter *adap)
2642{
2643 unsigned int i;
2644
2645 mutex_lock(&uld_mutex);
2646 list_del(&adap->list_node);
2647
2648 for (i = 0; i < CXGB4_ULD_MAX; i++)
2649 if (adap->uld && adap->uld[i].handle)
2650 adap->uld[i].state_change(adap->uld[i].handle,
2651 CXGB4_STATE_DETACH);
2652
2653 if (netevent_registered && list_empty(&adapter_list)) {
2654 unregister_netevent_notifier(&cxgb4_netevent_nb);
2655 netevent_registered = false;
2656 }
2657 mutex_unlock(&uld_mutex);
2658}
2659
2660static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2661{
2662 unsigned int i;
2663
2664 mutex_lock(&uld_mutex);
2665 for (i = 0; i < CXGB4_ULD_MAX; i++)
2666 if (adap->uld && adap->uld[i].handle)
2667 adap->uld[i].state_change(adap->uld[i].handle,
2668 new_state);
2669 mutex_unlock(&uld_mutex);
2670}
2671
2672#if IS_ENABLED(CONFIG_IPV6)
2673static int cxgb4_inet6addr_handler(struct notifier_block *this,
2674 unsigned long event, void *data)
2675{
2676 struct inet6_ifaddr *ifa = data;
2677 struct net_device *event_dev = ifa->idev->dev;
2678 const struct device *parent = NULL;
2679#if IS_ENABLED(CONFIG_BONDING)
2680 struct adapter *adap;
2681#endif
2682 if (is_vlan_dev(event_dev))
2683 event_dev = vlan_dev_real_dev(event_dev);
2684#if IS_ENABLED(CONFIG_BONDING)
2685 if (event_dev->flags & IFF_MASTER) {
2686 list_for_each_entry(adap, &adapter_list, list_node) {
2687 switch (event) {
2688 case NETDEV_UP:
2689 cxgb4_clip_get(adap->port[0],
2690 (const u32 *)ifa, 1);
2691 break;
2692 case NETDEV_DOWN:
2693 cxgb4_clip_release(adap->port[0],
2694 (const u32 *)ifa, 1);
2695 break;
2696 default:
2697 break;
2698 }
2699 }
2700 return NOTIFY_OK;
2701 }
2702#endif
2703
2704 if (event_dev)
2705 parent = event_dev->dev.parent;
2706
2707 if (parent && parent->driver == &cxgb4_driver.driver) {
2708 switch (event) {
2709 case NETDEV_UP:
2710 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2711 break;
2712 case NETDEV_DOWN:
2713 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2714 break;
2715 default:
2716 break;
2717 }
2718 }
2719 return NOTIFY_OK;
2720}
2721
2722static bool inet6addr_registered;
2723static struct notifier_block cxgb4_inet6addr_notifier = {
2724 .notifier_call = cxgb4_inet6addr_handler
2725};
2726
2727static void update_clip(const struct adapter *adap)
2728{
2729 int i;
2730 struct net_device *dev;
2731 int ret;
2732
2733 rcu_read_lock();
2734
2735 for (i = 0; i < MAX_NPORTS; i++) {
2736 dev = adap->port[i];
2737 ret = 0;
2738
2739 if (dev)
2740 ret = cxgb4_update_root_dev_clip(dev);
2741
2742 if (ret < 0)
2743 break;
2744 }
2745 rcu_read_unlock();
2746}
2747#endif
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759static int cxgb_up(struct adapter *adap)
2760{
2761 struct sge *s = &adap->sge;
2762 int err;
2763
2764 mutex_lock(&uld_mutex);
2765 err = setup_sge_queues(adap);
2766 if (err)
2767 goto rel_lock;
2768 err = setup_rss(adap);
2769 if (err)
2770 goto freeq;
2771
2772 if (adap->flags & CXGB4_USING_MSIX) {
2773 if (s->nd_msix_idx < 0) {
2774 err = -ENOMEM;
2775 goto irq_err;
2776 }
2777
2778 err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
2779 t4_nondata_intr, 0,
2780 adap->msix_info[s->nd_msix_idx].desc, adap);
2781 if (err)
2782 goto irq_err;
2783
2784 err = request_msix_queue_irqs(adap);
2785 if (err)
2786 goto irq_err_free_nd_msix;
2787 } else {
2788 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2789 (adap->flags & CXGB4_USING_MSI) ? 0
2790 : IRQF_SHARED,
2791 adap->port[0]->name, adap);
2792 if (err)
2793 goto irq_err;
2794 }
2795
2796 enable_rx(adap);
2797 t4_sge_start(adap);
2798 t4_intr_enable(adap);
2799 adap->flags |= CXGB4_FULL_INIT_DONE;
2800 mutex_unlock(&uld_mutex);
2801
2802 notify_ulds(adap, CXGB4_STATE_UP);
2803#if IS_ENABLED(CONFIG_IPV6)
2804 update_clip(adap);
2805#endif
2806 return err;
2807
2808irq_err_free_nd_msix:
2809 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
2810irq_err:
2811 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2812freeq:
2813 t4_free_sge_resources(adap);
2814rel_lock:
2815 mutex_unlock(&uld_mutex);
2816 return err;
2817}
2818
2819static void cxgb_down(struct adapter *adapter)
2820{
2821 cancel_work_sync(&adapter->tid_release_task);
2822 cancel_work_sync(&adapter->db_full_task);
2823 cancel_work_sync(&adapter->db_drop_task);
2824 adapter->tid_release_task_busy = false;
2825 adapter->tid_release_head = NULL;
2826
2827 t4_sge_stop(adapter);
2828 t4_free_sge_resources(adapter);
2829
2830 adapter->flags &= ~CXGB4_FULL_INIT_DONE;
2831}
2832
2833
2834
2835
2836int cxgb_open(struct net_device *dev)
2837{
2838 struct port_info *pi = netdev_priv(dev);
2839 struct adapter *adapter = pi->adapter;
2840 int err;
2841
2842 netif_carrier_off(dev);
2843
2844 if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
2845 err = cxgb_up(adapter);
2846 if (err < 0)
2847 return err;
2848 }
2849
2850
2851
2852
2853 err = t4_update_port_info(pi);
2854 if (err < 0)
2855 return err;
2856
2857 err = link_start(dev);
2858 if (err)
2859 return err;
2860
2861 if (pi->nmirrorqsets) {
2862 mutex_lock(&pi->vi_mirror_mutex);
2863 err = cxgb4_port_mirror_alloc_queues(dev);
2864 if (err)
2865 goto out_unlock;
2866
2867 err = cxgb4_port_mirror_start(dev);
2868 if (err)
2869 goto out_free_queues;
2870 mutex_unlock(&pi->vi_mirror_mutex);
2871 }
2872
2873 netif_tx_start_all_queues(dev);
2874 return 0;
2875
2876out_free_queues:
2877 cxgb4_port_mirror_free_queues(dev);
2878
2879out_unlock:
2880 mutex_unlock(&pi->vi_mirror_mutex);
2881 return err;
2882}
2883
2884int cxgb_close(struct net_device *dev)
2885{
2886 struct port_info *pi = netdev_priv(dev);
2887 struct adapter *adapter = pi->adapter;
2888 int ret;
2889
2890 netif_tx_stop_all_queues(dev);
2891 netif_carrier_off(dev);
2892 ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2893 false, false, false);
2894#ifdef CONFIG_CHELSIO_T4_DCB
2895 cxgb4_dcb_reset(dev);
2896 dcb_tx_queue_prio_enable(dev, false);
2897#endif
2898 if (ret)
2899 return ret;
2900
2901 if (pi->nmirrorqsets) {
2902 mutex_lock(&pi->vi_mirror_mutex);
2903 cxgb4_port_mirror_stop(dev);
2904 cxgb4_port_mirror_free_queues(dev);
2905 mutex_unlock(&pi->vi_mirror_mutex);
2906 }
2907
2908 return 0;
2909}
2910
2911int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2912 __be32 sip, __be16 sport, __be16 vlan,
2913 unsigned int queue, unsigned char port, unsigned char mask)
2914{
2915 int ret;
2916 struct filter_entry *f;
2917 struct adapter *adap;
2918 int i;
2919 u8 *val;
2920
2921 adap = netdev2adap(dev);
2922
2923
2924 stid -= adap->tids.sftid_base;
2925 stid += adap->tids.nftids;
2926
2927
2928
2929 f = &adap->tids.ftid_tab[stid];
2930 ret = writable_filter(f);
2931 if (ret)
2932 return ret;
2933
2934
2935
2936
2937 if (f->valid)
2938 clear_filter(adap, f);
2939
2940
2941 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2942 f->fs.val.lport = be16_to_cpu(sport);
2943 f->fs.mask.lport = ~0;
2944 val = (u8 *)&sip;
2945 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2946 for (i = 0; i < 4; i++) {
2947 f->fs.val.lip[i] = val[i];
2948 f->fs.mask.lip[i] = ~0;
2949 }
2950 if (adap->params.tp.vlan_pri_map & PORT_F) {
2951 f->fs.val.iport = port;
2952 f->fs.mask.iport = mask;
2953 }
2954 }
2955
2956 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2957 f->fs.val.proto = IPPROTO_TCP;
2958 f->fs.mask.proto = ~0;
2959 }
2960
2961 f->fs.dirsteer = 1;
2962 f->fs.iq = queue;
2963
2964 f->locked = 1;
2965 f->fs.rpttid = 1;
2966
2967
2968
2969
2970 f->tid = stid + adap->tids.ftid_base;
2971 ret = set_filter_wr(adap, stid);
2972 if (ret) {
2973 clear_filter(adap, f);
2974 return ret;
2975 }
2976
2977 return 0;
2978}
2979EXPORT_SYMBOL(cxgb4_create_server_filter);
2980
2981int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2982 unsigned int queue, bool ipv6)
2983{
2984 struct filter_entry *f;
2985 struct adapter *adap;
2986
2987 adap = netdev2adap(dev);
2988
2989
2990 stid -= adap->tids.sftid_base;
2991 stid += adap->tids.nftids;
2992
2993 f = &adap->tids.ftid_tab[stid];
2994
2995 f->locked = 0;
2996
2997 return delete_filter(adap, stid);
2998}
2999EXPORT_SYMBOL(cxgb4_remove_server_filter);
3000
3001static void cxgb_get_stats(struct net_device *dev,
3002 struct rtnl_link_stats64 *ns)
3003{
3004 struct port_stats stats;
3005 struct port_info *p = netdev_priv(dev);
3006 struct adapter *adapter = p->adapter;
3007
3008
3009
3010
3011
3012 spin_lock(&adapter->stats_lock);
3013 if (!netif_device_present(dev)) {
3014 spin_unlock(&adapter->stats_lock);
3015 return;
3016 }
3017 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
3018 &p->stats_base);
3019 spin_unlock(&adapter->stats_lock);
3020
3021 ns->tx_bytes = stats.tx_octets;
3022 ns->tx_packets = stats.tx_frames;
3023 ns->rx_bytes = stats.rx_octets;
3024 ns->rx_packets = stats.rx_frames;
3025 ns->multicast = stats.rx_mcast_frames;
3026
3027
3028 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3029 stats.rx_runt;
3030 ns->rx_over_errors = 0;
3031 ns->rx_crc_errors = stats.rx_fcs_err;
3032 ns->rx_frame_errors = stats.rx_symbol_err;
3033 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
3034 stats.rx_ovflow2 + stats.rx_ovflow3 +
3035 stats.rx_trunc0 + stats.rx_trunc1 +
3036 stats.rx_trunc2 + stats.rx_trunc3;
3037 ns->rx_missed_errors = 0;
3038
3039
3040 ns->tx_aborted_errors = 0;
3041 ns->tx_carrier_errors = 0;
3042 ns->tx_fifo_errors = 0;
3043 ns->tx_heartbeat_errors = 0;
3044 ns->tx_window_errors = 0;
3045
3046 ns->tx_errors = stats.tx_error_frames;
3047 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3048 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3049}
3050
3051static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3052{
3053 unsigned int mbox;
3054 int ret = 0, prtad, devad;
3055 struct port_info *pi = netdev_priv(dev);
3056 struct adapter *adapter = pi->adapter;
3057 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3058
3059 switch (cmd) {
3060 case SIOCGMIIPHY:
3061 if (pi->mdio_addr < 0)
3062 return -EOPNOTSUPP;
3063 data->phy_id = pi->mdio_addr;
3064 break;
3065 case SIOCGMIIREG:
3066 case SIOCSMIIREG:
3067 if (mdio_phy_id_is_c45(data->phy_id)) {
3068 prtad = mdio_phy_id_prtad(data->phy_id);
3069 devad = mdio_phy_id_devad(data->phy_id);
3070 } else if (data->phy_id < 32) {
3071 prtad = data->phy_id;
3072 devad = 0;
3073 data->reg_num &= 0x1f;
3074 } else
3075 return -EINVAL;
3076
3077 mbox = pi->adapter->pf;
3078 if (cmd == SIOCGMIIREG)
3079 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3080 data->reg_num, &data->val_out);
3081 else
3082 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3083 data->reg_num, data->val_in);
3084 break;
3085 case SIOCGHWTSTAMP:
3086 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3087 sizeof(pi->tstamp_config)) ?
3088 -EFAULT : 0;
3089 case SIOCSHWTSTAMP:
3090 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
3091 sizeof(pi->tstamp_config)))
3092 return -EFAULT;
3093
3094 if (!is_t4(adapter->params.chip)) {
3095 switch (pi->tstamp_config.tx_type) {
3096 case HWTSTAMP_TX_OFF:
3097 case HWTSTAMP_TX_ON:
3098 break;
3099 default:
3100 return -ERANGE;
3101 }
3102
3103 switch (pi->tstamp_config.rx_filter) {
3104 case HWTSTAMP_FILTER_NONE:
3105 pi->rxtstamp = false;
3106 break;
3107 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3108 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3109 cxgb4_ptprx_timestamping(pi, pi->port_id,
3110 PTP_TS_L4);
3111 break;
3112 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3113 cxgb4_ptprx_timestamping(pi, pi->port_id,
3114 PTP_TS_L2_L4);
3115 break;
3116 case HWTSTAMP_FILTER_ALL:
3117 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3118 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3119 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3120 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3121 pi->rxtstamp = true;
3122 break;
3123 default:
3124 pi->tstamp_config.rx_filter =
3125 HWTSTAMP_FILTER_NONE;
3126 return -ERANGE;
3127 }
3128
3129 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
3130 (pi->tstamp_config.rx_filter ==
3131 HWTSTAMP_FILTER_NONE)) {
3132 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
3133 pi->ptp_enable = false;
3134 }
3135
3136 if (pi->tstamp_config.rx_filter !=
3137 HWTSTAMP_FILTER_NONE) {
3138 if (cxgb4_ptp_redirect_rx_packet(adapter,
3139 pi) >= 0)
3140 pi->ptp_enable = true;
3141 }
3142 } else {
3143
3144 switch (pi->tstamp_config.rx_filter) {
3145 case HWTSTAMP_FILTER_NONE:
3146 pi->rxtstamp = false;
3147 break;
3148 case HWTSTAMP_FILTER_ALL:
3149 pi->rxtstamp = true;
3150 break;
3151 default:
3152 pi->tstamp_config.rx_filter =
3153 HWTSTAMP_FILTER_NONE;
3154 return -ERANGE;
3155 }
3156 }
3157 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3158 sizeof(pi->tstamp_config)) ?
3159 -EFAULT : 0;
3160 default:
3161 return -EOPNOTSUPP;
3162 }
3163 return ret;
3164}
3165
3166static void cxgb_set_rxmode(struct net_device *dev)
3167{
3168
3169 set_rxmode(dev, -1, false);
3170}
3171
3172static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3173{
3174 struct port_info *pi = netdev_priv(dev);
3175 int ret;
3176
3177 ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
3178 pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
3179 if (!ret)
3180 dev->mtu = new_mtu;
3181 return ret;
3182}
3183
3184#ifdef CONFIG_PCI_IOV
3185static int cxgb4_mgmt_open(struct net_device *dev)
3186{
3187
3188
3189
3190 netif_carrier_off(dev);
3191 return 0;
3192}
3193
3194
3195static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
3196{
3197 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
3198 unsigned int i, vf, nvfs;
3199 u16 a, b;
3200 int err;
3201 u8 *na;
3202
3203 adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
3204 PCI_CAP_ID_VPD);
3205 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
3206 if (err)
3207 return;
3208
3209 na = adap->params.vpd.na;
3210 for (i = 0; i < ETH_ALEN; i++)
3211 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
3212 hex2val(na[2 * i + 1]));
3213
3214 a = (hw_addr[0] << 8) | hw_addr[1];
3215 b = (hw_addr[1] << 8) | hw_addr[2];
3216 a ^= b;
3217 a |= 0x0200;
3218 a &= ~0x0100;
3219 macaddr[0] = a >> 8;
3220 macaddr[1] = a & 0xff;
3221
3222 for (i = 2; i < 5; i++)
3223 macaddr[i] = hw_addr[i + 1];
3224
3225 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
3226 vf < nvfs; vf++) {
3227 macaddr[5] = adap->pf * nvfs + vf;
3228 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
3229 }
3230}
3231
3232static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3233{
3234 struct port_info *pi = netdev_priv(dev);
3235 struct adapter *adap = pi->adapter;
3236 int ret;
3237
3238
3239 if (!is_valid_ether_addr(mac)) {
3240 dev_err(pi->adapter->pdev_dev,
3241 "Invalid Ethernet address %pM for VF %d\n",
3242 mac, vf);
3243 return -EINVAL;
3244 }
3245
3246 dev_info(pi->adapter->pdev_dev,
3247 "Setting MAC %pM on VF %d\n", mac, vf);
3248 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
3249 if (!ret)
3250 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
3251 return ret;
3252}
3253
3254static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
3255 int vf, struct ifla_vf_info *ivi)
3256{
3257 struct port_info *pi = netdev_priv(dev);
3258 struct adapter *adap = pi->adapter;
3259 struct vf_info *vfinfo;
3260
3261 if (vf >= adap->num_vfs)
3262 return -EINVAL;
3263 vfinfo = &adap->vfinfo[vf];
3264
3265 ivi->vf = vf;
3266 ivi->max_tx_rate = vfinfo->tx_rate;
3267 ivi->min_tx_rate = 0;
3268 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
3269 ivi->vlan = vfinfo->vlan;
3270 ivi->linkstate = vfinfo->link_state;
3271 return 0;
3272}
3273
3274static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
3275 struct netdev_phys_item_id *ppid)
3276{
3277 struct port_info *pi = netdev_priv(dev);
3278 unsigned int phy_port_id;
3279
3280 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
3281 ppid->id_len = sizeof(phy_port_id);
3282 memcpy(ppid->id, &phy_port_id, ppid->id_len);
3283 return 0;
3284}
3285
3286static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
3287 int min_tx_rate, int max_tx_rate)
3288{
3289 struct port_info *pi = netdev_priv(dev);
3290 struct adapter *adap = pi->adapter;
3291 unsigned int link_ok, speed, mtu;
3292 u32 fw_pfvf, fw_class;
3293 int class_id = vf;
3294 int ret;
3295 u16 pktsize;
3296
3297 if (vf >= adap->num_vfs)
3298 return -EINVAL;
3299
3300 if (min_tx_rate) {
3301 dev_err(adap->pdev_dev,
3302 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
3303 min_tx_rate, vf);
3304 return -EINVAL;
3305 }
3306
3307 if (max_tx_rate == 0) {
3308
3309 fw_pfvf =
3310 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3311 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3312 fw_class = 0xffffffff;
3313 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3314 &fw_pfvf, &fw_class);
3315 if (ret) {
3316 dev_err(adap->pdev_dev,
3317 "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
3318 ret, adap->pf, vf);
3319 return -EINVAL;
3320 }
3321 dev_info(adap->pdev_dev,
3322 "PF %d VF %d is unbound from TX Rate Limiting\n",
3323 adap->pf, vf);
3324 adap->vfinfo[vf].tx_rate = 0;
3325 return 0;
3326 }
3327
3328 ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
3329 if (ret != FW_SUCCESS) {
3330 dev_err(adap->pdev_dev,
3331 "Failed to get link information for VF %d\n", vf);
3332 return -EINVAL;
3333 }
3334
3335 if (!link_ok) {
3336 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
3337 return -EINVAL;
3338 }
3339
3340 if (max_tx_rate > speed) {
3341 dev_err(adap->pdev_dev,
3342 "Max tx rate %d for VF %d can't be > link-speed %u",
3343 max_tx_rate, vf, speed);
3344 return -EINVAL;
3345 }
3346
3347 pktsize = mtu;
3348
3349 pktsize = pktsize - sizeof(struct ethhdr) - 4;
3350
3351 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
3352
3353 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
3354 SCHED_CLASS_LEVEL_CL_RL,
3355 SCHED_CLASS_MODE_CLASS,
3356 SCHED_CLASS_RATEUNIT_BITS,
3357 SCHED_CLASS_RATEMODE_ABS,
3358 pi->tx_chan, class_id, 0,
3359 max_tx_rate * 1000, 0, pktsize, 0);
3360 if (ret) {
3361 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
3362 ret);
3363 return -EINVAL;
3364 }
3365 dev_info(adap->pdev_dev,
3366 "Class %d with MSS %u configured with rate %u\n",
3367 class_id, pktsize, max_tx_rate);
3368
3369
3370 fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3371 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3372 fw_class = class_id;
3373 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
3374 &fw_class);
3375 if (ret) {
3376 dev_err(adap->pdev_dev,
3377 "Err %d in binding PF %d VF %d to Traffic Class %d\n",
3378 ret, adap->pf, vf, class_id);
3379 return -EINVAL;
3380 }
3381 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
3382 adap->pf, vf, class_id);
3383 adap->vfinfo[vf].tx_rate = max_tx_rate;
3384 return 0;
3385}
3386
3387static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
3388 u16 vlan, u8 qos, __be16 vlan_proto)
3389{
3390 struct port_info *pi = netdev_priv(dev);
3391 struct adapter *adap = pi->adapter;
3392 int ret;
3393
3394 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
3395 return -EINVAL;
3396
3397 if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
3398 return -EPROTONOSUPPORT;
3399
3400 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
3401 if (!ret) {
3402 adap->vfinfo[vf].vlan = vlan;
3403 return 0;
3404 }
3405
3406 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
3407 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
3408 return ret;
3409}
3410
3411static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
3412 int link)
3413{
3414 struct port_info *pi = netdev_priv(dev);
3415 struct adapter *adap = pi->adapter;
3416 u32 param, val;
3417 int ret = 0;
3418
3419 if (vf >= adap->num_vfs)
3420 return -EINVAL;
3421
3422 switch (link) {
3423 case IFLA_VF_LINK_STATE_AUTO:
3424 val = FW_VF_LINK_STATE_AUTO;
3425 break;
3426
3427 case IFLA_VF_LINK_STATE_ENABLE:
3428 val = FW_VF_LINK_STATE_ENABLE;
3429 break;
3430
3431 case IFLA_VF_LINK_STATE_DISABLE:
3432 val = FW_VF_LINK_STATE_DISABLE;
3433 break;
3434
3435 default:
3436 return -EINVAL;
3437 }
3438
3439 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3440 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
3441 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3442 ¶m, &val);
3443 if (ret) {
3444 dev_err(adap->pdev_dev,
3445 "Error %d in setting PF %d VF %d link state\n",
3446 ret, adap->pf, vf);
3447 return -EINVAL;
3448 }
3449
3450 adap->vfinfo[vf].link_state = link;
3451 return ret;
3452}
3453#endif
3454
3455static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3456{
3457 int ret;
3458 struct sockaddr *addr = p;
3459 struct port_info *pi = netdev_priv(dev);
3460
3461 if (!is_valid_ether_addr(addr->sa_data))
3462 return -EADDRNOTAVAIL;
3463
3464 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3465 addr->sa_data, true, &pi->smt_idx);
3466 if (ret < 0)
3467 return ret;
3468
3469 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3470 return 0;
3471}
3472
3473#ifdef CONFIG_NET_POLL_CONTROLLER
3474static void cxgb_netpoll(struct net_device *dev)
3475{
3476 struct port_info *pi = netdev_priv(dev);
3477 struct adapter *adap = pi->adapter;
3478
3479 if (adap->flags & CXGB4_USING_MSIX) {
3480 int i;
3481 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3482
3483 for (i = pi->nqsets; i; i--, rx++)
3484 t4_sge_intr_msix(0, &rx->rspq);
3485 } else
3486 t4_intr_handler(adap)(0, adap);
3487}
3488#endif
3489
3490static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
3491{
3492 struct port_info *pi = netdev_priv(dev);
3493 struct adapter *adap = pi->adapter;
3494 struct ch_sched_queue qe = { 0 };
3495 struct ch_sched_params p = { 0 };
3496 struct sched_class *e;
3497 u32 req_rate;
3498 int err = 0;
3499
3500 if (!can_sched(dev))
3501 return -ENOTSUPP;
3502
3503 if (index < 0 || index > pi->nqsets - 1)
3504 return -EINVAL;
3505
3506 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3507 dev_err(adap->pdev_dev,
3508 "Failed to rate limit on queue %d. Link Down?\n",
3509 index);
3510 return -EINVAL;
3511 }
3512
3513 qe.queue = index;
3514 e = cxgb4_sched_queue_lookup(dev, &qe);
3515 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
3516 dev_err(adap->pdev_dev,
3517 "Queue %u already bound to class %u of type: %u\n",
3518 index, e->idx, e->info.u.params.level);
3519 return -EBUSY;
3520 }
3521
3522
3523 req_rate = rate * 1000;
3524
3525
3526 if (req_rate > SCHED_MAX_RATE_KBPS) {
3527 dev_err(adap->pdev_dev,
3528 "Invalid rate %u Mbps, Max rate is %u Mbps\n",
3529 rate, SCHED_MAX_RATE_KBPS / 1000);
3530 return -ERANGE;
3531 }
3532
3533
3534 memset(&qe, 0, sizeof(qe));
3535 qe.queue = index;
3536 qe.class = SCHED_CLS_NONE;
3537
3538 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
3539 if (err) {
3540 dev_err(adap->pdev_dev,
3541 "Unbinding Queue %d on port %d fail. Err: %d\n",
3542 index, pi->port_id, err);
3543 return err;
3544 }
3545
3546
3547 if (!req_rate)
3548 return 0;
3549
3550
3551 p.type = SCHED_CLASS_TYPE_PACKET;
3552 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
3553 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
3554 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
3555 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
3556 p.u.params.channel = pi->tx_chan;
3557 p.u.params.class = SCHED_CLS_NONE;
3558 p.u.params.minrate = 0;
3559 p.u.params.maxrate = req_rate;
3560 p.u.params.weight = 0;
3561 p.u.params.pktsize = dev->mtu;
3562
3563 e = cxgb4_sched_class_alloc(dev, &p);
3564 if (!e)
3565 return -ENOMEM;
3566
3567
3568 memset(&qe, 0, sizeof(qe));
3569 qe.queue = index;
3570 qe.class = e->idx;
3571
3572 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
3573 if (err)
3574 dev_err(adap->pdev_dev,
3575 "Queue rate limiting failed. Err: %d\n", err);
3576 return err;
3577}
3578
3579static int cxgb_setup_tc_flower(struct net_device *dev,
3580 struct flow_cls_offload *cls_flower)
3581{
3582 switch (cls_flower->command) {
3583 case FLOW_CLS_REPLACE:
3584 return cxgb4_tc_flower_replace(dev, cls_flower);
3585 case FLOW_CLS_DESTROY:
3586 return cxgb4_tc_flower_destroy(dev, cls_flower);
3587 case FLOW_CLS_STATS:
3588 return cxgb4_tc_flower_stats(dev, cls_flower);
3589 default:
3590 return -EOPNOTSUPP;
3591 }
3592}
3593
3594static int cxgb_setup_tc_cls_u32(struct net_device *dev,
3595 struct tc_cls_u32_offload *cls_u32)
3596{
3597 switch (cls_u32->command) {
3598 case TC_CLSU32_NEW_KNODE:
3599 case TC_CLSU32_REPLACE_KNODE:
3600 return cxgb4_config_knode(dev, cls_u32);
3601 case TC_CLSU32_DELETE_KNODE:
3602 return cxgb4_delete_knode(dev, cls_u32);
3603 default:
3604 return -EOPNOTSUPP;
3605 }
3606}
3607
3608static int cxgb_setup_tc_matchall(struct net_device *dev,
3609 struct tc_cls_matchall_offload *cls_matchall,
3610 bool ingress)
3611{
3612 struct adapter *adap = netdev2adap(dev);
3613
3614 if (!adap->tc_matchall)
3615 return -ENOMEM;
3616
3617 switch (cls_matchall->command) {
3618 case TC_CLSMATCHALL_REPLACE:
3619 return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
3620 case TC_CLSMATCHALL_DESTROY:
3621 return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
3622 case TC_CLSMATCHALL_STATS:
3623 if (ingress)
3624 return cxgb4_tc_matchall_stats(dev, cls_matchall);
3625 break;
3626 default:
3627 break;
3628 }
3629
3630 return -EOPNOTSUPP;
3631}
3632
3633static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
3634 void *type_data, void *cb_priv)
3635{
3636 struct net_device *dev = cb_priv;
3637 struct port_info *pi = netdev2pinfo(dev);
3638 struct adapter *adap = netdev2adap(dev);
3639
3640 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3641 dev_err(adap->pdev_dev,
3642 "Failed to setup tc on port %d. Link Down?\n",
3643 pi->port_id);
3644 return -EINVAL;
3645 }
3646
3647 if (!tc_cls_can_offload_and_chain0(dev, type_data))
3648 return -EOPNOTSUPP;
3649
3650 switch (type) {
3651 case TC_SETUP_CLSU32:
3652 return cxgb_setup_tc_cls_u32(dev, type_data);
3653 case TC_SETUP_CLSFLOWER:
3654 return cxgb_setup_tc_flower(dev, type_data);
3655 case TC_SETUP_CLSMATCHALL:
3656 return cxgb_setup_tc_matchall(dev, type_data, true);
3657 default:
3658 return -EOPNOTSUPP;
3659 }
3660}
3661
3662static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
3663 void *type_data, void *cb_priv)
3664{
3665 struct net_device *dev = cb_priv;
3666 struct port_info *pi = netdev2pinfo(dev);
3667 struct adapter *adap = netdev2adap(dev);
3668
3669 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3670 dev_err(adap->pdev_dev,
3671 "Failed to setup tc on port %d. Link Down?\n",
3672 pi->port_id);
3673 return -EINVAL;
3674 }
3675
3676 if (!tc_cls_can_offload_and_chain0(dev, type_data))
3677 return -EOPNOTSUPP;
3678
3679 switch (type) {
3680 case TC_SETUP_CLSMATCHALL:
3681 return cxgb_setup_tc_matchall(dev, type_data, false);
3682 default:
3683 break;
3684 }
3685
3686 return -EOPNOTSUPP;
3687}
3688
3689static int cxgb_setup_tc_mqprio(struct net_device *dev,
3690 struct tc_mqprio_qopt_offload *mqprio)
3691{
3692 struct adapter *adap = netdev2adap(dev);
3693
3694 if (!is_ethofld(adap) || !adap->tc_mqprio)
3695 return -ENOMEM;
3696
3697 return cxgb4_setup_tc_mqprio(dev, mqprio);
3698}
3699
3700static LIST_HEAD(cxgb_block_cb_list);
3701
3702static int cxgb_setup_tc_block(struct net_device *dev,
3703 struct flow_block_offload *f)
3704{
3705 struct port_info *pi = netdev_priv(dev);
3706 flow_setup_cb_t *cb;
3707 bool ingress_only;
3708
3709 pi->tc_block_shared = f->block_shared;
3710 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
3711 cb = cxgb_setup_tc_block_egress_cb;
3712 ingress_only = false;
3713 } else {
3714 cb = cxgb_setup_tc_block_ingress_cb;
3715 ingress_only = true;
3716 }
3717
3718 return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
3719 cb, pi, dev, ingress_only);
3720}
3721
3722static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3723 void *type_data)
3724{
3725 switch (type) {
3726 case TC_SETUP_QDISC_MQPRIO:
3727 return cxgb_setup_tc_mqprio(dev, type_data);
3728 case TC_SETUP_BLOCK:
3729 return cxgb_setup_tc_block(dev, type_data);
3730 default:
3731 return -EOPNOTSUPP;
3732 }
3733}
3734
3735static int cxgb_udp_tunnel_unset_port(struct net_device *netdev,
3736 unsigned int table, unsigned int entry,
3737 struct udp_tunnel_info *ti)
3738{
3739 struct port_info *pi = netdev_priv(netdev);
3740 struct adapter *adapter = pi->adapter;
3741 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3742 int ret = 0, i;
3743
3744 switch (ti->type) {
3745 case UDP_TUNNEL_TYPE_VXLAN:
3746 adapter->vxlan_port = 0;
3747 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3748 break;
3749 case UDP_TUNNEL_TYPE_GENEVE:
3750 adapter->geneve_port = 0;
3751 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3752 break;
3753 default:
3754 return -EINVAL;
3755 }
3756
3757
3758
3759
3760 if (!adapter->rawf_cnt)
3761 return 0;
3762 for_each_port(adapter, i) {
3763 pi = adap2pinfo(adapter, i);
3764 ret = t4_free_raw_mac_filt(adapter, pi->viid,
3765 match_all_mac, match_all_mac,
3766 adapter->rawf_start + pi->port_id,
3767 1, pi->port_id, false);
3768 if (ret < 0) {
3769 netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3770 i);
3771 return ret;
3772 }
3773 }
3774
3775 return 0;
3776}
3777
3778static int cxgb_udp_tunnel_set_port(struct net_device *netdev,
3779 unsigned int table, unsigned int entry,
3780 struct udp_tunnel_info *ti)
3781{
3782 struct port_info *pi = netdev_priv(netdev);
3783 struct adapter *adapter = pi->adapter;
3784 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3785 int i, ret;
3786
3787 switch (ti->type) {
3788 case UDP_TUNNEL_TYPE_VXLAN:
3789 adapter->vxlan_port = ti->port;
3790 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3791 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3792 break;
3793 case UDP_TUNNEL_TYPE_GENEVE:
3794 adapter->geneve_port = ti->port;
3795 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3796 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3797 break;
3798 default:
3799 return -EINVAL;
3800 }
3801
3802
3803
3804
3805
3806
3807
3808 for_each_port(adapter, i) {
3809 pi = adap2pinfo(adapter, i);
3810
3811 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3812 match_all_mac,
3813 match_all_mac,
3814 adapter->rawf_start + pi->port_id,
3815 1, pi->port_id, false);
3816 if (ret < 0) {
3817 netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3818 be16_to_cpu(ti->port));
3819 return ret;
3820 }
3821 }
3822
3823 return 0;
3824}
3825
3826static const struct udp_tunnel_nic_info cxgb_udp_tunnels = {
3827 .set_port = cxgb_udp_tunnel_set_port,
3828 .unset_port = cxgb_udp_tunnel_unset_port,
3829 .tables = {
3830 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
3831 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
3832 },
3833};
3834
3835static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3836 struct net_device *dev,
3837 netdev_features_t features)
3838{
3839 struct port_info *pi = netdev_priv(dev);
3840 struct adapter *adapter = pi->adapter;
3841
3842 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3843 return features;
3844
3845
3846 if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3847 return features;
3848
3849
3850 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3851}
3852
3853static netdev_features_t cxgb_fix_features(struct net_device *dev,
3854 netdev_features_t features)
3855{
3856
3857 if (!(features & NETIF_F_RXCSUM))
3858 features &= ~NETIF_F_GRO;
3859
3860 return features;
3861}
3862
3863static const struct net_device_ops cxgb4_netdev_ops = {
3864 .ndo_open = cxgb_open,
3865 .ndo_stop = cxgb_close,
3866 .ndo_start_xmit = t4_start_xmit,
3867 .ndo_select_queue = cxgb_select_queue,
3868 .ndo_get_stats64 = cxgb_get_stats,
3869 .ndo_set_rx_mode = cxgb_set_rxmode,
3870 .ndo_set_mac_address = cxgb_set_mac_addr,
3871 .ndo_set_features = cxgb_set_features,
3872 .ndo_validate_addr = eth_validate_addr,
3873 .ndo_do_ioctl = cxgb_ioctl,
3874 .ndo_change_mtu = cxgb_change_mtu,
3875#ifdef CONFIG_NET_POLL_CONTROLLER
3876 .ndo_poll_controller = cxgb_netpoll,
3877#endif
3878#ifdef CONFIG_CHELSIO_T4_FCOE
3879 .ndo_fcoe_enable = cxgb_fcoe_enable,
3880 .ndo_fcoe_disable = cxgb_fcoe_disable,
3881#endif
3882 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
3883 .ndo_setup_tc = cxgb_setup_tc,
3884 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
3885 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
3886 .ndo_features_check = cxgb_features_check,
3887 .ndo_fix_features = cxgb_fix_features,
3888};
3889
3890#ifdef CONFIG_PCI_IOV
3891static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3892 .ndo_open = cxgb4_mgmt_open,
3893 .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
3894 .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
3895 .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
3896 .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
3897 .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
3898 .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
3899};
3900#endif
3901
3902static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3903 struct ethtool_drvinfo *info)
3904{
3905 struct adapter *adapter = netdev2adap(dev);
3906
3907 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3908 strlcpy(info->bus_info, pci_name(adapter->pdev),
3909 sizeof(info->bus_info));
3910}
3911
3912static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
3913 .get_drvinfo = cxgb4_mgmt_get_drvinfo,
3914};
3915
3916static void notify_fatal_err(struct work_struct *work)
3917{
3918 struct adapter *adap;
3919
3920 adap = container_of(work, struct adapter, fatal_err_notify_task);
3921 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3922}
3923
3924void t4_fatal_err(struct adapter *adap)
3925{
3926 int port;
3927
3928 if (pci_channel_offline(adap->pdev))
3929 return;
3930
3931
3932
3933
3934 t4_shutdown_adapter(adap);
3935 for_each_port(adap, port) {
3936 struct net_device *dev = adap->port[port];
3937
3938
3939
3940
3941 if (!dev)
3942 continue;
3943
3944 netif_tx_stop_all_queues(dev);
3945 netif_carrier_off(dev);
3946 }
3947 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3948 queue_work(adap->workq, &adap->fatal_err_notify_task);
3949}
3950
3951static void setup_memwin(struct adapter *adap)
3952{
3953 u32 nic_win_base = t4_get_util_window(adap);
3954
3955 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3956}
3957
3958static void setup_memwin_rdma(struct adapter *adap)
3959{
3960 if (adap->vres.ocq.size) {
3961 u32 start;
3962 unsigned int sz_kb;
3963
3964 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3965 start &= PCI_BASE_ADDRESS_MEM_MASK;
3966 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3967 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3968 t4_write_reg(adap,
3969 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3970 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3971 t4_write_reg(adap,
3972 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3973 adap->vres.ocq.start);
3974 t4_read_reg(adap,
3975 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3976 }
3977}
3978
3979
3980
3981
3982#define HMA_MAX_ADDR_IN_CMD 5
3983
3984#define HMA_PAGE_SIZE PAGE_SIZE
3985
3986#define HMA_MAX_NO_FW_ADDRESS (16 << 10)
3987
3988#define HMA_PAGE_ORDER \
3989 ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3990 ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3991
3992
3993
3994
3995#define HMA_MIN_TOTAL_SIZE 1
3996#define HMA_MAX_TOTAL_SIZE \
3997 (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3998 HMA_MAX_NO_FW_ADDRESS) >> 20)
3999
4000static void adap_free_hma_mem(struct adapter *adapter)
4001{
4002 struct scatterlist *iter;
4003 struct page *page;
4004 int i;
4005
4006 if (!adapter->hma.sgt)
4007 return;
4008
4009 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
4010 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
4011 adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
4012 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
4013 }
4014
4015 for_each_sg(adapter->hma.sgt->sgl, iter,
4016 adapter->hma.sgt->orig_nents, i) {
4017 page = sg_page(iter);
4018 if (page)
4019 __free_pages(page, HMA_PAGE_ORDER);
4020 }
4021
4022 kfree(adapter->hma.phy_addr);
4023 sg_free_table(adapter->hma.sgt);
4024 kfree(adapter->hma.sgt);
4025 adapter->hma.sgt = NULL;
4026}
4027
4028static int adap_config_hma(struct adapter *adapter)
4029{
4030 struct scatterlist *sgl, *iter;
4031 struct sg_table *sgt;
4032 struct page *newpage;
4033 unsigned int i, j, k;
4034 u32 param, hma_size;
4035 unsigned int ncmds;
4036 size_t page_size;
4037 u32 page_order;
4038 int node, ret;
4039
4040
4041
4042
4043 if (is_kdump_kernel() ||
4044 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
4045 return 0;
4046
4047
4048 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4049 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
4050 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
4051 1, ¶m, &hma_size);
4052
4053
4054
4055 if (ret || !hma_size)
4056 return 0;
4057
4058 if (hma_size < HMA_MIN_TOTAL_SIZE ||
4059 hma_size > HMA_MAX_TOTAL_SIZE) {
4060 dev_err(adapter->pdev_dev,
4061 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
4062 hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
4063 return -EINVAL;
4064 }
4065
4066 page_size = HMA_PAGE_SIZE;
4067 page_order = HMA_PAGE_ORDER;
4068 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
4069 if (unlikely(!adapter->hma.sgt)) {
4070 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
4071 return -ENOMEM;
4072 }
4073 sgt = adapter->hma.sgt;
4074
4075
4076 sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
4077 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
4078 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
4079 kfree(adapter->hma.sgt);
4080 adapter->hma.sgt = NULL;
4081 return -ENOMEM;
4082 }
4083
4084 sgl = adapter->hma.sgt->sgl;
4085 node = dev_to_node(adapter->pdev_dev);
4086 for_each_sg(sgl, iter, sgt->orig_nents, i) {
4087 newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
4088 __GFP_ZERO, page_order);
4089 if (!newpage) {
4090 dev_err(adapter->pdev_dev,
4091 "Not enough memory for HMA page allocation\n");
4092 ret = -ENOMEM;
4093 goto free_hma;
4094 }
4095 sg_set_page(iter, newpage, page_size << page_order, 0);
4096 }
4097
4098 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
4099 DMA_BIDIRECTIONAL);
4100 if (!sgt->nents) {
4101 dev_err(adapter->pdev_dev,
4102 "Not enough memory for HMA DMA mapping");
4103 ret = -ENOMEM;
4104 goto free_hma;
4105 }
4106 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
4107
4108 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
4109 GFP_KERNEL);
4110 if (unlikely(!adapter->hma.phy_addr))
4111 goto free_hma;
4112
4113 for_each_sg(sgl, iter, sgt->nents, i) {
4114 newpage = sg_page(iter);
4115 adapter->hma.phy_addr[i] = sg_dma_address(iter);
4116 }
4117
4118 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
4119
4120 for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
4121 struct fw_hma_cmd hma_cmd;
4122 u8 naddr = HMA_MAX_ADDR_IN_CMD;
4123 u8 soc = 0, eoc = 0;
4124 u8 hma_mode = 1;
4125
4126 soc = (i == 0) ? 1 : 0;
4127 eoc = (i == ncmds - 1) ? 1 : 0;
4128
4129
4130
4131
4132 if (i == ncmds - 1) {
4133 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
4134 naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
4135 }
4136 memset(&hma_cmd, 0, sizeof(hma_cmd));
4137 hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
4138 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4139 hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
4140
4141 hma_cmd.mode_to_pcie_params =
4142 htonl(FW_HMA_CMD_MODE_V(hma_mode) |
4143 FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
4144
4145
4146 hma_cmd.naddr_size =
4147 htonl(FW_HMA_CMD_SIZE_V(hma_size) |
4148 FW_HMA_CMD_NADDR_V(naddr));
4149
4150
4151 hma_cmd.addr_size_pkd =
4152 htonl(FW_HMA_CMD_ADDR_SIZE_V
4153 ((page_size << page_order) >> 12));
4154
4155
4156 for (j = 0; j < naddr; j++) {
4157 hma_cmd.phy_address[j] =
4158 cpu_to_be64(adapter->hma.phy_addr[j + k]);
4159 }
4160 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
4161 sizeof(hma_cmd), &hma_cmd);
4162 if (ret) {
4163 dev_err(adapter->pdev_dev,
4164 "HMA FW command failed with err %d\n", ret);
4165 goto free_hma;
4166 }
4167 }
4168
4169 if (!ret)
4170 dev_info(adapter->pdev_dev,
4171 "Reserved %uMB host memory for HMA\n", hma_size);
4172 return ret;
4173
4174free_hma:
4175 adap_free_hma_mem(adapter);
4176 return ret;
4177}
4178
4179static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4180{
4181 u32 v;
4182 int ret;
4183
4184
4185
4186
4187 ret = t4_get_pfres(adap);
4188 if (ret) {
4189 dev_err(adap->pdev_dev,
4190 "Unable to retrieve resource provisioning information\n");
4191 return ret;
4192 }
4193
4194
4195 memset(c, 0, sizeof(*c));
4196 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4197 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4198 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4199 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
4200 if (ret < 0)
4201 return ret;
4202
4203 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4204 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4205 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
4206 if (ret < 0)
4207 return ret;
4208
4209 ret = t4_config_glbl_rss(adap, adap->pf,
4210 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4211 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4212 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
4213 if (ret < 0)
4214 return ret;
4215
4216 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4217 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
4218 FW_CMD_CAP_PF);
4219 if (ret < 0)
4220 return ret;
4221
4222 t4_sge_init(adap);
4223
4224
4225 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
4226 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
4227 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4228 v = t4_read_reg(adap, TP_PIO_DATA_A);
4229 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
4230
4231
4232 adap->params.tp.tx_modq_map = 0xE4;
4233 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4234 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
4235
4236
4237 v = 0x84218421;
4238 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4239 &v, 1, TP_TX_SCHED_HDR_A);
4240 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4241 &v, 1, TP_TX_SCHED_FIFO_A);
4242 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4243 &v, 1, TP_TX_SCHED_PCMD_A);
4244
4245#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16
4246 if (is_offload(adap)) {
4247 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4248 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4249 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4250 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4251 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4252 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4253 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4254 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4255 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4256 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4257 }
4258
4259
4260 return t4_early_init(adap, adap->pf);
4261}
4262
4263
4264
4265
4266#define MAX_ATIDS 8192U
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284static int adap_init0_tweaks(struct adapter *adapter)
4285{
4286
4287
4288
4289
4290
4291 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4292
4293
4294
4295
4296 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4297 dev_err(&adapter->pdev->dev,
4298 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4299 rx_dma_offset);
4300 rx_dma_offset = 2;
4301 }
4302 t4_set_reg_field(adapter, SGE_CONTROL_A,
4303 PKTSHIFT_V(PKTSHIFT_M),
4304 PKTSHIFT_V(rx_dma_offset));
4305
4306
4307
4308
4309
4310 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4311 CSUM_HAS_PSEUDO_HDR_F, 0);
4312
4313 return 0;
4314}
4315
4316
4317
4318
4319
4320static int phy_aq1202_version(const u8 *phy_fw_data,
4321 size_t phy_fw_size)
4322{
4323 int offset;
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
4335 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
4336 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
4337
4338 offset = le24(phy_fw_data + 0x8) << 12;
4339 offset = le24(phy_fw_data + offset + 0xa);
4340 return be16(phy_fw_data + offset + 0x27e);
4341
4342 #undef be16
4343 #undef le16
4344 #undef le24
4345}
4346
4347static struct info_10gbt_phy_fw {
4348 unsigned int phy_fw_id;
4349 char *phy_fw_file;
4350 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
4351 int phy_flash;
4352} phy_info_array[] = {
4353 {
4354 PHY_AQ1202_DEVICEID,
4355 PHY_AQ1202_FIRMWARE,
4356 phy_aq1202_version,
4357 1,
4358 },
4359 {
4360 PHY_BCM84834_DEVICEID,
4361 PHY_BCM84834_FIRMWARE,
4362 NULL,
4363 0,
4364 },
4365 { 0, NULL, NULL },
4366};
4367
4368static struct info_10gbt_phy_fw *find_phy_info(int devid)
4369{
4370 int i;
4371
4372 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
4373 if (phy_info_array[i].phy_fw_id == devid)
4374 return &phy_info_array[i];
4375 }
4376 return NULL;
4377}
4378
4379
4380
4381
4382
4383
4384static int adap_init0_phy(struct adapter *adap)
4385{
4386 const struct firmware *phyf;
4387 int ret;
4388 struct info_10gbt_phy_fw *phy_info;
4389
4390
4391
4392 phy_info = find_phy_info(adap->pdev->device);
4393 if (!phy_info) {
4394 dev_warn(adap->pdev_dev,
4395 "No PHY Firmware file found for this PHY\n");
4396 return -EOPNOTSUPP;
4397 }
4398
4399
4400
4401
4402
4403
4404 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
4405 adap->pdev_dev);
4406 if (ret < 0) {
4407
4408
4409
4410
4411
4412
4413 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
4414 "/lib/firmware/%s, error %d\n",
4415 phy_info->phy_fw_file, -ret);
4416 if (phy_info->phy_flash) {
4417 int cur_phy_fw_ver = 0;
4418
4419 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4420 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
4421 "FLASH copy, version %#x\n", cur_phy_fw_ver);
4422 ret = 0;
4423 }
4424
4425 return ret;
4426 }
4427
4428
4429
4430 spin_lock_bh(&adap->win0_lock);
4431 ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
4432 (u8 *)phyf->data, phyf->size);
4433 spin_unlock_bh(&adap->win0_lock);
4434 if (ret < 0)
4435 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
4436 -ret);
4437 else if (ret > 0) {
4438 int new_phy_fw_ver = 0;
4439
4440 if (phy_info->phy_fw_version)
4441 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
4442 phyf->size);
4443 dev_info(adap->pdev_dev, "Successfully transferred PHY "
4444 "Firmware /lib/firmware/%s, version %#x\n",
4445 phy_info->phy_fw_file, new_phy_fw_ver);
4446 }
4447
4448 release_firmware(phyf);
4449
4450 return ret;
4451}
4452
4453
4454
4455
4456static int adap_init0_config(struct adapter *adapter, int reset)
4457{
4458 char *fw_config_file, fw_config_file_path[256];
4459 u32 finiver, finicsum, cfcsum, param, val;
4460 struct fw_caps_config_cmd caps_cmd;
4461 unsigned long mtype = 0, maddr = 0;
4462 const struct firmware *cf;
4463 char *config_name = NULL;
4464 int config_issued = 0;
4465 int ret;
4466
4467
4468
4469
4470 if (reset) {
4471 ret = t4_fw_reset(adapter, adapter->mbox,
4472 PIORSTMODE_F | PIORST_F);
4473 if (ret < 0)
4474 goto bye;
4475 }
4476
4477
4478
4479
4480
4481
4482 if (is_10gbt_device(adapter->pdev->device)) {
4483 ret = adap_init0_phy(adapter);
4484 if (ret < 0)
4485 goto bye;
4486 }
4487
4488
4489
4490
4491
4492 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4493 case CHELSIO_T4:
4494 fw_config_file = FW4_CFNAME;
4495 break;
4496 case CHELSIO_T5:
4497 fw_config_file = FW5_CFNAME;
4498 break;
4499 case CHELSIO_T6:
4500 fw_config_file = FW6_CFNAME;
4501 break;
4502 default:
4503 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4504 adapter->pdev->device);
4505 ret = -EINVAL;
4506 goto bye;
4507 }
4508
4509 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4510 if (ret < 0) {
4511 config_name = "On FLASH";
4512 mtype = FW_MEMTYPE_CF_FLASH;
4513 maddr = t4_flash_cfg_addr(adapter);
4514 } else {
4515 u32 params[7], val[7];
4516
4517 sprintf(fw_config_file_path,
4518 "/lib/firmware/%s", fw_config_file);
4519 config_name = fw_config_file_path;
4520
4521 if (cf->size >= FLASH_CFG_MAX_SIZE)
4522 ret = -ENOMEM;
4523 else {
4524 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4525 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4526 ret = t4_query_params(adapter, adapter->mbox,
4527 adapter->pf, 0, 1, params, val);
4528 if (ret == 0) {
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539 size_t resid = cf->size & 0x3;
4540 size_t size = cf->size & ~0x3;
4541 __be32 *data = (__be32 *)cf->data;
4542
4543 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
4544 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
4545
4546 spin_lock(&adapter->win0_lock);
4547 ret = t4_memory_rw(adapter, 0, mtype, maddr,
4548 size, data, T4_MEMORY_WRITE);
4549 if (ret == 0 && resid != 0) {
4550 union {
4551 __be32 word;
4552 char buf[4];
4553 } last;
4554 int i;
4555
4556 last.word = data[size >> 2];
4557 for (i = resid; i < 4; i++)
4558 last.buf[i] = 0;
4559 ret = t4_memory_rw(adapter, 0, mtype,
4560 maddr + size,
4561 4, &last.word,
4562 T4_MEMORY_WRITE);
4563 }
4564 spin_unlock(&adapter->win0_lock);
4565 }
4566 }
4567
4568 release_firmware(cf);
4569 if (ret)
4570 goto bye;
4571 }
4572
4573 val = 0;
4574
4575
4576
4577
4578 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4579 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
4580 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
4581 1, ¶m, &val);
4582
4583
4584
4585
4586 if (ret < 0) {
4587 dev_warn(adapter->pdev_dev,
4588 "Hash filter with ofld is not supported by FW\n");
4589 }
4590
4591
4592
4593
4594
4595
4596
4597 memset(&caps_cmd, 0, sizeof(caps_cmd));
4598 caps_cmd.op_to_write =
4599 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4600 FW_CMD_REQUEST_F |
4601 FW_CMD_READ_F);
4602 caps_cmd.cfvalid_to_len16 =
4603 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
4604 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
4605 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
4606 FW_LEN16(caps_cmd));
4607 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4608 &caps_cmd);
4609
4610
4611
4612
4613
4614
4615
4616 if (ret == -ENOENT) {
4617 memset(&caps_cmd, 0, sizeof(caps_cmd));
4618 caps_cmd.op_to_write =
4619 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4620 FW_CMD_REQUEST_F |
4621 FW_CMD_READ_F);
4622 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4623 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4624 sizeof(caps_cmd), &caps_cmd);
4625 config_name = "Firmware Default";
4626 }
4627
4628 config_issued = 1;
4629 if (ret < 0)
4630 goto bye;
4631
4632 finiver = ntohl(caps_cmd.finiver);
4633 finicsum = ntohl(caps_cmd.finicsum);
4634 cfcsum = ntohl(caps_cmd.cfcsum);
4635 if (finicsum != cfcsum)
4636 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4637 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4638 finicsum, cfcsum);
4639
4640
4641
4642
4643 caps_cmd.op_to_write =
4644 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4645 FW_CMD_REQUEST_F |
4646 FW_CMD_WRITE_F);
4647 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4648 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4649 NULL);
4650 if (ret < 0)
4651 goto bye;
4652
4653
4654
4655
4656
4657 ret = adap_init0_tweaks(adapter);
4658 if (ret < 0)
4659 goto bye;
4660
4661
4662 ret = adap_config_hma(adapter);
4663 if (ret)
4664 dev_err(adapter->pdev_dev,
4665 "HMA configuration failed with error %d\n", ret);
4666
4667 if (is_t6(adapter->params.chip)) {
4668 adap_config_hpfilter(adapter);
4669 ret = setup_ppod_edram(adapter);
4670 if (!ret)
4671 dev_info(adapter->pdev_dev, "Successfully enabled "
4672 "ppod edram feature\n");
4673 }
4674
4675
4676
4677
4678
4679 ret = t4_fw_initialize(adapter, adapter->mbox);
4680 if (ret < 0)
4681 goto bye;
4682
4683
4684
4685
4686 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4687 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4688 config_name, finiver, cfcsum);
4689 return 0;
4690
4691
4692
4693
4694
4695
4696bye:
4697 if (config_issued && ret != -ENOENT)
4698 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4699 config_name, -ret);
4700 return ret;
4701}
4702
4703static struct fw_info fw_info_array[] = {
4704 {
4705 .chip = CHELSIO_T4,
4706 .fs_name = FW4_CFNAME,
4707 .fw_mod_name = FW4_FNAME,
4708 .fw_hdr = {
4709 .chip = FW_HDR_CHIP_T4,
4710 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4711 .intfver_nic = FW_INTFVER(T4, NIC),
4712 .intfver_vnic = FW_INTFVER(T4, VNIC),
4713 .intfver_ri = FW_INTFVER(T4, RI),
4714 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4715 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4716 },
4717 }, {
4718 .chip = CHELSIO_T5,
4719 .fs_name = FW5_CFNAME,
4720 .fw_mod_name = FW5_FNAME,
4721 .fw_hdr = {
4722 .chip = FW_HDR_CHIP_T5,
4723 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4724 .intfver_nic = FW_INTFVER(T5, NIC),
4725 .intfver_vnic = FW_INTFVER(T5, VNIC),
4726 .intfver_ri = FW_INTFVER(T5, RI),
4727 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4728 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4729 },
4730 }, {
4731 .chip = CHELSIO_T6,
4732 .fs_name = FW6_CFNAME,
4733 .fw_mod_name = FW6_FNAME,
4734 .fw_hdr = {
4735 .chip = FW_HDR_CHIP_T6,
4736 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4737 .intfver_nic = FW_INTFVER(T6, NIC),
4738 .intfver_vnic = FW_INTFVER(T6, VNIC),
4739 .intfver_ofld = FW_INTFVER(T6, OFLD),
4740 .intfver_ri = FW_INTFVER(T6, RI),
4741 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4742 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4743 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4744 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4745 },
4746 }
4747
4748};
4749
4750static struct fw_info *find_fw_info(int chip)
4751{
4752 int i;
4753
4754 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4755 if (fw_info_array[i].chip == chip)
4756 return &fw_info_array[i];
4757 }
4758 return NULL;
4759}
4760
4761
4762
4763
4764static int adap_init0(struct adapter *adap, int vpd_skip)
4765{
4766 struct fw_caps_config_cmd caps_cmd;
4767 u32 params[7], val[7];
4768 enum dev_state state;
4769 u32 v, port_vec;
4770 int reset = 1;
4771 int ret;
4772
4773
4774
4775
4776 ret = t4_init_devlog_params(adap);
4777 if (ret < 0)
4778 return ret;
4779
4780
4781 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4782 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
4783 if (ret < 0) {
4784 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4785 ret);
4786 return ret;
4787 }
4788 if (ret == adap->mbox)
4789 adap->flags |= CXGB4_MASTER_PF;
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799 t4_get_version_info(adap);
4800 ret = t4_check_fw_version(adap);
4801
4802 if (ret)
4803 state = DEV_STATE_UNINIT;
4804 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
4805 struct fw_info *fw_info;
4806 struct fw_hdr *card_fw;
4807 const struct firmware *fw;
4808 const u8 *fw_data = NULL;
4809 unsigned int fw_size = 0;
4810
4811
4812
4813
4814 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4815 if (fw_info == NULL) {
4816 dev_err(adap->pdev_dev,
4817 "unable to get firmware info for chip %d.\n",
4818 CHELSIO_CHIP_VERSION(adap->params.chip));
4819 return -EINVAL;
4820 }
4821
4822
4823
4824
4825 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
4826 if (!card_fw) {
4827 ret = -ENOMEM;
4828 goto bye;
4829 }
4830
4831
4832 ret = request_firmware(&fw, fw_info->fw_mod_name,
4833 adap->pdev_dev);
4834 if (ret < 0) {
4835 dev_err(adap->pdev_dev,
4836 "unable to load firmware image %s, error %d\n",
4837 fw_info->fw_mod_name, ret);
4838 } else {
4839 fw_data = fw->data;
4840 fw_size = fw->size;
4841 }
4842
4843
4844 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4845 state, &reset);
4846
4847
4848 release_firmware(fw);
4849 kvfree(card_fw);
4850
4851 if (ret < 0)
4852 goto bye;
4853 }
4854
4855
4856
4857
4858 if (state == DEV_STATE_INIT) {
4859 ret = adap_config_hma(adap);
4860 if (ret)
4861 dev_err(adap->pdev_dev,
4862 "HMA configuration failed with error %d\n",
4863 ret);
4864 dev_info(adap->pdev_dev, "Coming up as %s: "\
4865 "Adapter already initialized\n",
4866 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
4867 } else {
4868 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4869 "Initializing adapter\n");
4870
4871
4872
4873
4874 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4875 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4876 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4877 params, val);
4878
4879
4880
4881
4882 if (ret < 0) {
4883 dev_err(adap->pdev_dev, "firmware doesn't support "
4884 "Firmware Configuration Files\n");
4885 goto bye;
4886 }
4887
4888
4889
4890
4891
4892 ret = adap_init0_config(adap, reset);
4893 if (ret == -ENOENT) {
4894 dev_err(adap->pdev_dev, "no Configuration File "
4895 "present on adapter.\n");
4896 goto bye;
4897 }
4898 if (ret < 0) {
4899 dev_err(adap->pdev_dev, "could not initialize "
4900 "adapter, error %d\n", -ret);
4901 goto bye;
4902 }
4903 }
4904
4905
4906
4907
4908
4909 ret = t4_get_pfres(adap);
4910 if (ret) {
4911 dev_err(adap->pdev_dev,
4912 "Unable to retrieve resource provisioning information\n");
4913 goto bye;
4914 }
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926 if (!vpd_skip) {
4927 ret = t4_get_vpd_params(adap, &adap->params.vpd);
4928 if (ret < 0)
4929 goto bye;
4930 }
4931
4932
4933
4934
4935
4936 v =
4937 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4938 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
4939 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4940 if (ret < 0)
4941 goto bye;
4942
4943 adap->params.nports = hweight32(port_vec);
4944 adap->params.portvec = port_vec;
4945
4946
4947
4948
4949
4950 ret = t4_sge_init(adap);
4951 if (ret < 0)
4952 goto bye;
4953
4954
4955
4956
4957 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4958 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
4959 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4960 1, params, val);
4961
4962 if (!ret) {
4963 adap->sge.dbqtimer_tick = val[0];
4964 ret = t4_read_sge_dbqtimers(adap,
4965 ARRAY_SIZE(adap->sge.dbqtimer_val),
4966 adap->sge.dbqtimer_val);
4967 }
4968
4969 if (!ret)
4970 adap->flags |= CXGB4_SGE_DBQ_TIMER;
4971
4972 if (is_bypass_device(adap->pdev->device))
4973 adap->params.bypass = 1;
4974
4975
4976
4977
4978 params[0] = FW_PARAM_PFVF(EQ_START);
4979 params[1] = FW_PARAM_PFVF(L2T_START);
4980 params[2] = FW_PARAM_PFVF(L2T_END);
4981 params[3] = FW_PARAM_PFVF(FILTER_START);
4982 params[4] = FW_PARAM_PFVF(FILTER_END);
4983 params[5] = FW_PARAM_PFVF(IQFLINT_START);
4984 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
4985 if (ret < 0)
4986 goto bye;
4987 adap->sge.egr_start = val[0];
4988 adap->l2t_start = val[1];
4989 adap->l2t_end = val[2];
4990 adap->tids.ftid_base = val[3];
4991 adap->tids.nftids = val[4] - val[3] + 1;
4992 adap->sge.ingr_start = val[5];
4993
4994 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4995 params[0] = FW_PARAM_PFVF(HPFILTER_START);
4996 params[1] = FW_PARAM_PFVF(HPFILTER_END);
4997 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4998 params, val);
4999 if (ret < 0)
5000 goto bye;
5001
5002 adap->tids.hpftid_base = val[0];
5003 adap->tids.nhpftids = val[1] - val[0] + 1;
5004
5005
5006
5007
5008 params[0] = FW_PARAM_PFVF(RAWF_START);
5009 params[1] = FW_PARAM_PFVF(RAWF_END);
5010 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5011 params, val);
5012 if (ret == 0) {
5013 adap->rawf_start = val[0];
5014 adap->rawf_cnt = val[1] - val[0] + 1;
5015 }
5016
5017 adap->tids.tid_base =
5018 t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
5019 }
5020
5021
5022
5023
5024
5025
5026
5027 params[0] = FW_PARAM_PFVF(EQ_END);
5028 params[1] = FW_PARAM_PFVF(IQFLINT_END);
5029 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5030 if (ret < 0)
5031 goto bye;
5032 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
5033 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
5034
5035 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
5036 sizeof(*adap->sge.egr_map), GFP_KERNEL);
5037 if (!adap->sge.egr_map) {
5038 ret = -ENOMEM;
5039 goto bye;
5040 }
5041
5042 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
5043 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
5044 if (!adap->sge.ingr_map) {
5045 ret = -ENOMEM;
5046 goto bye;
5047 }
5048
5049
5050
5051
5052 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5053 sizeof(long), GFP_KERNEL);
5054 if (!adap->sge.starving_fl) {
5055 ret = -ENOMEM;
5056 goto bye;
5057 }
5058
5059 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5060 sizeof(long), GFP_KERNEL);
5061 if (!adap->sge.txq_maperr) {
5062 ret = -ENOMEM;
5063 goto bye;
5064 }
5065
5066#ifdef CONFIG_DEBUG_FS
5067 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5068 sizeof(long), GFP_KERNEL);
5069 if (!adap->sge.blocked_fl) {
5070 ret = -ENOMEM;
5071 goto bye;
5072 }
5073#endif
5074
5075 params[0] = FW_PARAM_PFVF(CLIP_START);
5076 params[1] = FW_PARAM_PFVF(CLIP_END);
5077 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5078 if (ret < 0)
5079 goto bye;
5080 adap->clipt_start = val[0];
5081 adap->clipt_end = val[1];
5082
5083
5084 params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
5085 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5086 if (ret < 0) {
5087
5088
5089
5090
5091 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
5092 } else {
5093 adap->params.nsched_cls = val[0];
5094 }
5095
5096
5097 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5098 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5099 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5100
5101
5102
5103 if ((val[0] != val[1]) && (ret >= 0)) {
5104 adap->flags |= CXGB4_FW_OFLD_CONN;
5105 adap->tids.aftid_base = val[0];
5106 adap->tids.aftid_end = val[1];
5107 }
5108
5109
5110
5111
5112
5113
5114 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5115 val[0] = 1;
5116 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5117
5118
5119
5120
5121
5122
5123
5124 if (is_t4(adap->params.chip)) {
5125 adap->params.ulptx_memwrite_dsgl = false;
5126 } else {
5127 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5128 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5129 1, params, val);
5130 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5131 }
5132
5133
5134 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
5135 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5136 1, params, val);
5137 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
5138
5139
5140 if (is_t4(adap->params.chip)) {
5141 adap->params.filter2_wr_support = 0;
5142 } else {
5143 params[0] = FW_PARAM_DEV(FILTER2_WR);
5144 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5145 1, params, val);
5146 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
5147 }
5148
5149
5150
5151
5152
5153 params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
5154 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5155 1, params, val);
5156 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
5157
5158
5159
5160
5161
5162 memset(&caps_cmd, 0, sizeof(caps_cmd));
5163 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5164 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5165 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5166 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5167 &caps_cmd);
5168 if (ret < 0)
5169 goto bye;
5170
5171
5172
5173
5174
5175 if (caps_cmd.ofldcaps)
5176 adap->params.offload = 1;
5177
5178 if (caps_cmd.ofldcaps ||
5179 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
5180 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
5181
5182 params[0] = FW_PARAM_DEV(NTID);
5183 params[1] = FW_PARAM_PFVF(SERVER_START);
5184 params[2] = FW_PARAM_PFVF(SERVER_END);
5185 params[3] = FW_PARAM_PFVF(TDDP_START);
5186 params[4] = FW_PARAM_PFVF(TDDP_END);
5187 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5188 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5189 params, val);
5190 if (ret < 0)
5191 goto bye;
5192 adap->tids.ntids = val[0];
5193 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5194 adap->tids.stid_base = val[1];
5195 adap->tids.nstids = val[2] - val[1] + 1;
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
5206 adap->tids.sftid_base = adap->tids.ftid_base +
5207 DIV_ROUND_UP(adap->tids.nftids, 3);
5208 adap->tids.nsftids = adap->tids.nftids -
5209 DIV_ROUND_UP(adap->tids.nftids, 3);
5210 adap->tids.nftids = adap->tids.sftid_base -
5211 adap->tids.ftid_base;
5212 }
5213 adap->vres.ddp.start = val[3];
5214 adap->vres.ddp.size = val[4] - val[3] + 1;
5215 adap->params.ofldq_wr_cred = val[5];
5216
5217 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
5218 init_hash_filter(adap);
5219 } else {
5220 adap->num_ofld_uld += 1;
5221 }
5222
5223 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
5224 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
5225 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
5226 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5227 params, val);
5228 if (!ret) {
5229 adap->tids.eotid_base = val[0];
5230 adap->tids.neotids = min_t(u32, MAX_ATIDS,
5231 val[1] - val[0] + 1);
5232 adap->params.ethofld = 1;
5233 }
5234 }
5235 }
5236 if (caps_cmd.rdmacaps) {
5237 params[0] = FW_PARAM_PFVF(STAG_START);
5238 params[1] = FW_PARAM_PFVF(STAG_END);
5239 params[2] = FW_PARAM_PFVF(RQ_START);
5240 params[3] = FW_PARAM_PFVF(RQ_END);
5241 params[4] = FW_PARAM_PFVF(PBL_START);
5242 params[5] = FW_PARAM_PFVF(PBL_END);
5243 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5244 params, val);
5245 if (ret < 0)
5246 goto bye;
5247 adap->vres.stag.start = val[0];
5248 adap->vres.stag.size = val[1] - val[0] + 1;
5249 adap->vres.rq.start = val[2];
5250 adap->vres.rq.size = val[3] - val[2] + 1;
5251 adap->vres.pbl.start = val[4];
5252 adap->vres.pbl.size = val[5] - val[4] + 1;
5253
5254 params[0] = FW_PARAM_PFVF(SRQ_START);
5255 params[1] = FW_PARAM_PFVF(SRQ_END);
5256 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5257 params, val);
5258 if (!ret) {
5259 adap->vres.srq.start = val[0];
5260 adap->vres.srq.size = val[1] - val[0] + 1;
5261 }
5262 if (adap->vres.srq.size) {
5263 adap->srq = t4_init_srq(adap->vres.srq.size);
5264 if (!adap->srq)
5265 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
5266 }
5267
5268 params[0] = FW_PARAM_PFVF(SQRQ_START);
5269 params[1] = FW_PARAM_PFVF(SQRQ_END);
5270 params[2] = FW_PARAM_PFVF(CQ_START);
5271 params[3] = FW_PARAM_PFVF(CQ_END);
5272 params[4] = FW_PARAM_PFVF(OCQ_START);
5273 params[5] = FW_PARAM_PFVF(OCQ_END);
5274 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5275 val);
5276 if (ret < 0)
5277 goto bye;
5278 adap->vres.qp.start = val[0];
5279 adap->vres.qp.size = val[1] - val[0] + 1;
5280 adap->vres.cq.start = val[2];
5281 adap->vres.cq.size = val[3] - val[2] + 1;
5282 adap->vres.ocq.start = val[4];
5283 adap->vres.ocq.size = val[5] - val[4] + 1;
5284
5285 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5286 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5287 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5288 val);
5289 if (ret < 0) {
5290 adap->params.max_ordird_qp = 8;
5291 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5292 ret = 0;
5293 } else {
5294 adap->params.max_ordird_qp = val[0];
5295 adap->params.max_ird_adapter = val[1];
5296 }
5297 dev_info(adap->pdev_dev,
5298 "max_ordird_qp %d max_ird_adapter %d\n",
5299 adap->params.max_ordird_qp,
5300 adap->params.max_ird_adapter);
5301
5302
5303 params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
5304 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5305 val);
5306 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
5307
5308
5309 params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
5310 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5311 val);
5312 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
5313 adap->num_ofld_uld += 2;
5314 }
5315 if (caps_cmd.iscsicaps) {
5316 params[0] = FW_PARAM_PFVF(ISCSI_START);
5317 params[1] = FW_PARAM_PFVF(ISCSI_END);
5318 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5319 params, val);
5320 if (ret < 0)
5321 goto bye;
5322 adap->vres.iscsi.start = val[0];
5323 adap->vres.iscsi.size = val[1] - val[0] + 1;
5324 if (is_t6(adap->params.chip)) {
5325 params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START);
5326 params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END);
5327 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5328 params, val);
5329 if (!ret) {
5330 adap->vres.ppod_edram.start = val[0];
5331 adap->vres.ppod_edram.size =
5332 val[1] - val[0] + 1;
5333
5334 dev_info(adap->pdev_dev,
5335 "ppod edram start 0x%x end 0x%x size 0x%x\n",
5336 val[0], val[1],
5337 adap->vres.ppod_edram.size);
5338 }
5339 }
5340
5341 adap->num_ofld_uld += 2;
5342 }
5343 if (caps_cmd.cryptocaps) {
5344 if (ntohs(caps_cmd.cryptocaps) &
5345 FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
5346 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
5347 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5348 2, params, val);
5349 if (ret < 0) {
5350 if (ret != -EINVAL)
5351 goto bye;
5352 } else {
5353 adap->vres.ncrypto_fc = val[0];
5354 }
5355 adap->num_ofld_uld += 1;
5356 }
5357 if (ntohs(caps_cmd.cryptocaps) &
5358 FW_CAPS_CONFIG_TLS_INLINE) {
5359 params[0] = FW_PARAM_PFVF(TLS_START);
5360 params[1] = FW_PARAM_PFVF(TLS_END);
5361 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5362 2, params, val);
5363 if (ret < 0)
5364 goto bye;
5365 adap->vres.key.start = val[0];
5366 adap->vres.key.size = val[1] - val[0] + 1;
5367 adap->num_uld += 1;
5368 }
5369 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
5370 }
5371
5372
5373
5374
5375
5376
5377 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5378 if (state != DEV_STATE_INIT) {
5379 int i;
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398 for (i = 0; i < NMTUS; i++)
5399 if (adap->params.mtus[i] == 1492) {
5400 adap->params.mtus[i] = 1488;
5401 break;
5402 }
5403
5404 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5405 adap->params.b_wnd);
5406 }
5407 t4_init_sge_params(adap);
5408 adap->flags |= CXGB4_FW_OK;
5409 t4_init_tp_params(adap, true);
5410 return 0;
5411
5412
5413
5414
5415
5416
5417bye:
5418 adap_free_hma_mem(adap);
5419 kfree(adap->sge.egr_map);
5420 kfree(adap->sge.ingr_map);
5421 kfree(adap->sge.starving_fl);
5422 kfree(adap->sge.txq_maperr);
5423#ifdef CONFIG_DEBUG_FS
5424 kfree(adap->sge.blocked_fl);
5425#endif
5426 if (ret != -ETIMEDOUT && ret != -EIO)
5427 t4_fw_bye(adap, adap->mbox);
5428 return ret;
5429}
5430
5431
5432
5433static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5434 pci_channel_state_t state)
5435{
5436 int i;
5437 struct adapter *adap = pci_get_drvdata(pdev);
5438
5439 if (!adap)
5440 goto out;
5441
5442 rtnl_lock();
5443 adap->flags &= ~CXGB4_FW_OK;
5444 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5445 spin_lock(&adap->stats_lock);
5446 for_each_port(adap, i) {
5447 struct net_device *dev = adap->port[i];
5448 if (dev) {
5449 netif_device_detach(dev);
5450 netif_carrier_off(dev);
5451 }
5452 }
5453 spin_unlock(&adap->stats_lock);
5454 disable_interrupts(adap);
5455 if (adap->flags & CXGB4_FULL_INIT_DONE)
5456 cxgb_down(adap);
5457 rtnl_unlock();
5458 if ((adap->flags & CXGB4_DEV_ENABLED)) {
5459 pci_disable_device(pdev);
5460 adap->flags &= ~CXGB4_DEV_ENABLED;
5461 }
5462out: return state == pci_channel_io_perm_failure ?
5463 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5464}
5465
5466static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5467{
5468 int i, ret;
5469 struct fw_caps_config_cmd c;
5470 struct adapter *adap = pci_get_drvdata(pdev);
5471
5472 if (!adap) {
5473 pci_restore_state(pdev);
5474 pci_save_state(pdev);
5475 return PCI_ERS_RESULT_RECOVERED;
5476 }
5477
5478 if (!(adap->flags & CXGB4_DEV_ENABLED)) {
5479 if (pci_enable_device(pdev)) {
5480 dev_err(&pdev->dev, "Cannot reenable PCI "
5481 "device after reset\n");
5482 return PCI_ERS_RESULT_DISCONNECT;
5483 }
5484 adap->flags |= CXGB4_DEV_ENABLED;
5485 }
5486
5487 pci_set_master(pdev);
5488 pci_restore_state(pdev);
5489 pci_save_state(pdev);
5490
5491 if (t4_wait_dev_ready(adap->regs) < 0)
5492 return PCI_ERS_RESULT_DISCONNECT;
5493 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
5494 return PCI_ERS_RESULT_DISCONNECT;
5495 adap->flags |= CXGB4_FW_OK;
5496 if (adap_init1(adap, &c))
5497 return PCI_ERS_RESULT_DISCONNECT;
5498
5499 for_each_port(adap, i) {
5500 struct port_info *pi = adap2pinfo(adap, i);
5501 u8 vivld = 0, vin = 0;
5502
5503 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
5504 NULL, NULL, &vivld, &vin);
5505 if (ret < 0)
5506 return PCI_ERS_RESULT_DISCONNECT;
5507 pi->viid = ret;
5508 pi->xact_addr_filt = -1;
5509
5510
5511
5512 if (adap->params.viid_smt_extn_support) {
5513 pi->vivld = vivld;
5514 pi->vin = vin;
5515 } else {
5516
5517 pi->vivld = FW_VIID_VIVLD_G(pi->viid);
5518 pi->vin = FW_VIID_VIN_G(pi->viid);
5519 }
5520 }
5521
5522 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5523 adap->params.b_wnd);
5524 setup_memwin(adap);
5525 if (cxgb_up(adap))
5526 return PCI_ERS_RESULT_DISCONNECT;
5527 return PCI_ERS_RESULT_RECOVERED;
5528}
5529
5530static void eeh_resume(struct pci_dev *pdev)
5531{
5532 int i;
5533 struct adapter *adap = pci_get_drvdata(pdev);
5534
5535 if (!adap)
5536 return;
5537
5538 rtnl_lock();
5539 for_each_port(adap, i) {
5540 struct net_device *dev = adap->port[i];
5541 if (dev) {
5542 if (netif_running(dev)) {
5543 link_start(dev);
5544 cxgb_set_rxmode(dev);
5545 }
5546 netif_device_attach(dev);
5547 }
5548 }
5549 rtnl_unlock();
5550}
5551
5552static void eeh_reset_prepare(struct pci_dev *pdev)
5553{
5554 struct adapter *adapter = pci_get_drvdata(pdev);
5555 int i;
5556
5557 if (adapter->pf != 4)
5558 return;
5559
5560 adapter->flags &= ~CXGB4_FW_OK;
5561
5562 notify_ulds(adapter, CXGB4_STATE_DOWN);
5563
5564 for_each_port(adapter, i)
5565 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5566 cxgb_close(adapter->port[i]);
5567
5568 disable_interrupts(adapter);
5569 cxgb4_free_mps_ref_entries(adapter);
5570
5571 adap_free_hma_mem(adapter);
5572
5573 if (adapter->flags & CXGB4_FULL_INIT_DONE)
5574 cxgb_down(adapter);
5575}
5576
5577static void eeh_reset_done(struct pci_dev *pdev)
5578{
5579 struct adapter *adapter = pci_get_drvdata(pdev);
5580 int err, i;
5581
5582 if (adapter->pf != 4)
5583 return;
5584
5585 err = t4_wait_dev_ready(adapter->regs);
5586 if (err < 0) {
5587 dev_err(adapter->pdev_dev,
5588 "Device not ready, err %d", err);
5589 return;
5590 }
5591
5592 setup_memwin(adapter);
5593
5594 err = adap_init0(adapter, 1);
5595 if (err) {
5596 dev_err(adapter->pdev_dev,
5597 "Adapter init failed, err %d", err);
5598 return;
5599 }
5600
5601 setup_memwin_rdma(adapter);
5602
5603 if (adapter->flags & CXGB4_FW_OK) {
5604 err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
5605 if (err) {
5606 dev_err(adapter->pdev_dev,
5607 "Port init failed, err %d", err);
5608 return;
5609 }
5610 }
5611
5612 err = cfg_queues(adapter);
5613 if (err) {
5614 dev_err(adapter->pdev_dev,
5615 "Config queues failed, err %d", err);
5616 return;
5617 }
5618
5619 cxgb4_init_mps_ref_entries(adapter);
5620
5621 err = setup_fw_sge_queues(adapter);
5622 if (err) {
5623 dev_err(adapter->pdev_dev,
5624 "FW sge queue allocation failed, err %d", err);
5625 return;
5626 }
5627
5628 for_each_port(adapter, i)
5629 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5630 cxgb_open(adapter->port[i]);
5631}
5632
5633static const struct pci_error_handlers cxgb4_eeh = {
5634 .error_detected = eeh_err_detected,
5635 .slot_reset = eeh_slot_reset,
5636 .resume = eeh_resume,
5637 .reset_prepare = eeh_reset_prepare,
5638 .reset_done = eeh_reset_done,
5639};
5640
5641
5642
5643
5644static inline bool is_x_10g_port(const struct link_config *lc)
5645{
5646 unsigned int speeds, high_speeds;
5647
5648 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
5649 high_speeds = speeds &
5650 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
5651
5652 return high_speeds != 0;
5653}
5654
5655
5656
5657
5658
5659static int cfg_queues(struct adapter *adap)
5660{
5661 u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
5662 u32 ncpus = num_online_cpus();
5663 u32 niqflint, neq, num_ulds;
5664 struct sge *s = &adap->sge;
5665 u32 i, n10g = 0, qidx = 0;
5666 u32 q10g = 0, q1g;
5667
5668
5669 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
5670 adap->params.offload = 0;
5671 adap->params.crypto = 0;
5672 adap->params.ethofld = 0;
5673 }
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687 niqflint = adap->params.pfres.niqflint - 1;
5688 if (!(adap->flags & CXGB4_USING_MSIX))
5689 niqflint--;
5690 neq = adap->params.pfres.neq / 2;
5691 avail_qsets = min(niqflint, neq);
5692
5693 if (avail_qsets < adap->params.nports) {
5694 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
5695 avail_qsets, adap->params.nports);
5696 return -ENOMEM;
5697 }
5698
5699
5700 for_each_port(adap, i)
5701 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5702
5703 avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
5704
5705
5706
5707
5708 if (n10g)
5709 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
5710
5711#ifdef CONFIG_CHELSIO_T4_DCB
5712
5713
5714
5715
5716 q1g = 8;
5717 if (adap->params.nports * 8 > avail_eth_qsets) {
5718 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
5719 avail_eth_qsets, adap->params.nports * 8);
5720 return -ENOMEM;
5721 }
5722
5723 if (adap->params.nports * ncpus < avail_eth_qsets)
5724 q10g = max(8U, ncpus);
5725 else
5726 q10g = max(8U, q10g);
5727
5728 while ((q10g * n10g) >
5729 (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
5730 q10g--;
5731
5732#else
5733 q1g = 1;
5734 q10g = min(q10g, ncpus);
5735#endif
5736 if (is_kdump_kernel()) {
5737 q10g = 1;
5738 q1g = 1;
5739 }
5740
5741 for_each_port(adap, i) {
5742 struct port_info *pi = adap2pinfo(adap, i);
5743
5744 pi->first_qset = qidx;
5745 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
5746 qidx += pi->nqsets;
5747 }
5748
5749 s->ethqsets = qidx;
5750 s->max_ethqsets = qidx;
5751 avail_qsets -= qidx;
5752
5753 if (is_uld(adap)) {
5754
5755
5756
5757
5758 num_ulds = adap->num_uld + adap->num_ofld_uld;
5759 i = min_t(u32, MAX_OFLD_QSETS, ncpus);
5760 avail_uld_qsets = roundup(i, adap->params.nports);
5761 if (avail_qsets < num_ulds * adap->params.nports) {
5762 adap->params.offload = 0;
5763 adap->params.crypto = 0;
5764 s->ofldqsets = 0;
5765 } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
5766 s->ofldqsets = adap->params.nports;
5767 } else {
5768 s->ofldqsets = avail_uld_qsets;
5769 }
5770
5771 avail_qsets -= num_ulds * s->ofldqsets;
5772 }
5773
5774
5775
5776
5777 if (is_ethofld(adap)) {
5778 if (avail_qsets < s->max_ethqsets) {
5779 adap->params.ethofld = 0;
5780 s->eoqsets = 0;
5781 } else {
5782 s->eoqsets = s->max_ethqsets;
5783 }
5784 avail_qsets -= s->eoqsets;
5785 }
5786
5787
5788
5789
5790
5791
5792 if (avail_qsets >= s->max_ethqsets)
5793 s->mirrorqsets = s->max_ethqsets;
5794 else if (avail_qsets >= adap->params.nports)
5795 s->mirrorqsets = adap->params.nports;
5796 else
5797 s->mirrorqsets = 0;
5798 avail_qsets -= s->mirrorqsets;
5799
5800 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5801 struct sge_eth_rxq *r = &s->ethrxq[i];
5802
5803 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5804 r->fl.size = 72;
5805 }
5806
5807 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5808 s->ethtxq[i].q.size = 1024;
5809
5810 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5811 s->ctrlq[i].q.size = 512;
5812
5813 if (!is_t4(adap->params.chip))
5814 s->ptptxq.q.size = 8;
5815
5816 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5817 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
5818
5819 return 0;
5820}
5821
5822
5823
5824
5825
5826static void reduce_ethqs(struct adapter *adap, int n)
5827{
5828 int i;
5829 struct port_info *pi;
5830
5831 while (n < adap->sge.ethqsets)
5832 for_each_port(adap, i) {
5833 pi = adap2pinfo(adap, i);
5834 if (pi->nqsets > 1) {
5835 pi->nqsets--;
5836 adap->sge.ethqsets--;
5837 if (adap->sge.ethqsets <= n)
5838 break;
5839 }
5840 }
5841
5842 n = 0;
5843 for_each_port(adap, i) {
5844 pi = adap2pinfo(adap, i);
5845 pi->first_qset = n;
5846 n += pi->nqsets;
5847 }
5848}
5849
5850static int alloc_msix_info(struct adapter *adap, u32 num_vec)
5851{
5852 struct msix_info *msix_info;
5853
5854 msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
5855 if (!msix_info)
5856 return -ENOMEM;
5857
5858 adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
5859 sizeof(long), GFP_KERNEL);
5860 if (!adap->msix_bmap.msix_bmap) {
5861 kfree(msix_info);
5862 return -ENOMEM;
5863 }
5864
5865 spin_lock_init(&adap->msix_bmap.lock);
5866 adap->msix_bmap.mapsize = num_vec;
5867
5868 adap->msix_info = msix_info;
5869 return 0;
5870}
5871
5872static void free_msix_info(struct adapter *adap)
5873{
5874 kfree(adap->msix_bmap.msix_bmap);
5875 kfree(adap->msix_info);
5876}
5877
5878int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
5879{
5880 struct msix_bmap *bmap = &adap->msix_bmap;
5881 unsigned int msix_idx;
5882 unsigned long flags;
5883
5884 spin_lock_irqsave(&bmap->lock, flags);
5885 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
5886 if (msix_idx < bmap->mapsize) {
5887 __set_bit(msix_idx, bmap->msix_bmap);
5888 } else {
5889 spin_unlock_irqrestore(&bmap->lock, flags);
5890 return -ENOSPC;
5891 }
5892
5893 spin_unlock_irqrestore(&bmap->lock, flags);
5894 return msix_idx;
5895}
5896
5897void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
5898 unsigned int msix_idx)
5899{
5900 struct msix_bmap *bmap = &adap->msix_bmap;
5901 unsigned long flags;
5902
5903 spin_lock_irqsave(&bmap->lock, flags);
5904 __clear_bit(msix_idx, bmap->msix_bmap);
5905 spin_unlock_irqrestore(&bmap->lock, flags);
5906}
5907
5908
5909#define EXTRA_VECS 2
5910
5911static int enable_msix(struct adapter *adap)
5912{
5913 u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0;
5914 u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0;
5915 u8 num_uld = 0, nchan = adap->params.nports;
5916 u32 i, want, need, num_vec;
5917 struct sge *s = &adap->sge;
5918 struct msix_entry *entries;
5919 struct port_info *pi;
5920 int allocated, ret;
5921
5922 want = s->max_ethqsets;
5923#ifdef CONFIG_CHELSIO_T4_DCB
5924
5925
5926
5927 need = 8 * nchan;
5928#else
5929 need = nchan;
5930#endif
5931 eth_need = need;
5932 if (is_uld(adap)) {
5933 num_uld = adap->num_ofld_uld + adap->num_uld;
5934 want += num_uld * s->ofldqsets;
5935 uld_need = num_uld * nchan;
5936 need += uld_need;
5937 }
5938
5939 if (is_ethofld(adap)) {
5940 want += s->eoqsets;
5941 ethofld_need = eth_need;
5942 need += ethofld_need;
5943 }
5944
5945 if (s->mirrorqsets) {
5946 want += s->mirrorqsets;
5947 mirror_need = nchan;
5948 need += mirror_need;
5949 }
5950
5951 want += EXTRA_VECS;
5952 need += EXTRA_VECS;
5953
5954 entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
5955 if (!entries)
5956 return -ENOMEM;
5957
5958 for (i = 0; i < want; i++)
5959 entries[i].entry = i;
5960
5961 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5962 if (allocated < 0) {
5963
5964
5965
5966 want = s->max_ethqsets + EXTRA_VECS;
5967 need = eth_need + EXTRA_VECS;
5968 allocated = pci_enable_msix_range(adap->pdev, entries,
5969 need, want);
5970 if (allocated < 0) {
5971 dev_info(adap->pdev_dev,
5972 "Disabling MSI-X due to insufficient MSI-X vectors\n");
5973 ret = allocated;
5974 goto out_free;
5975 }
5976
5977 dev_info(adap->pdev_dev,
5978 "Disabling offload due to insufficient MSI-X vectors\n");
5979 adap->params.offload = 0;
5980 adap->params.crypto = 0;
5981 adap->params.ethofld = 0;
5982 s->ofldqsets = 0;
5983 s->eoqsets = 0;
5984 s->mirrorqsets = 0;
5985 uld_need = 0;
5986 ethofld_need = 0;
5987 mirror_need = 0;
5988 }
5989
5990 num_vec = allocated;
5991 if (num_vec < want) {
5992
5993
5994
5995
5996 ethqsets = eth_need;
5997 if (is_uld(adap))
5998 ofldqsets = nchan;
5999 if (is_ethofld(adap))
6000 eoqsets = ethofld_need;
6001 if (s->mirrorqsets)
6002 mirrorqsets = mirror_need;
6003
6004 num_vec -= need;
6005 while (num_vec) {
6006 if (num_vec < eth_need + ethofld_need ||
6007 ethqsets > s->max_ethqsets)
6008 break;
6009
6010 for_each_port(adap, i) {
6011 pi = adap2pinfo(adap, i);
6012 if (pi->nqsets < 2)
6013 continue;
6014
6015 ethqsets++;
6016 num_vec--;
6017 if (ethofld_need) {
6018 eoqsets++;
6019 num_vec--;
6020 }
6021 }
6022 }
6023
6024 if (is_uld(adap)) {
6025 while (num_vec) {
6026 if (num_vec < uld_need ||
6027 ofldqsets > s->ofldqsets)
6028 break;
6029
6030 ofldqsets++;
6031 num_vec -= uld_need;
6032 }
6033 }
6034
6035 if (s->mirrorqsets) {
6036 while (num_vec) {
6037 if (num_vec < mirror_need ||
6038 mirrorqsets > s->mirrorqsets)
6039 break;
6040
6041 mirrorqsets++;
6042 num_vec -= mirror_need;
6043 }
6044 }
6045 } else {
6046 ethqsets = s->max_ethqsets;
6047 if (is_uld(adap))
6048 ofldqsets = s->ofldqsets;
6049 if (is_ethofld(adap))
6050 eoqsets = s->eoqsets;
6051 if (s->mirrorqsets)
6052 mirrorqsets = s->mirrorqsets;
6053 }
6054
6055 if (ethqsets < s->max_ethqsets) {
6056 s->max_ethqsets = ethqsets;
6057 reduce_ethqs(adap, ethqsets);
6058 }
6059
6060 if (is_uld(adap)) {
6061 s->ofldqsets = ofldqsets;
6062 s->nqs_per_uld = s->ofldqsets;
6063 }
6064
6065 if (is_ethofld(adap))
6066 s->eoqsets = eoqsets;
6067
6068 if (s->mirrorqsets) {
6069 s->mirrorqsets = mirrorqsets;
6070 for_each_port(adap, i) {
6071 pi = adap2pinfo(adap, i);
6072 pi->nmirrorqsets = s->mirrorqsets / nchan;
6073 mutex_init(&pi->vi_mirror_mutex);
6074 }
6075 }
6076
6077
6078 ret = alloc_msix_info(adap, allocated);
6079 if (ret)
6080 goto out_disable_msix;
6081
6082 for (i = 0; i < allocated; i++) {
6083 adap->msix_info[i].vec = entries[i].vector;
6084 adap->msix_info[i].idx = i;
6085 }
6086
6087 dev_info(adap->pdev_dev,
6088 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
6089 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
6090 s->mirrorqsets);
6091
6092 kfree(entries);
6093 return 0;
6094
6095out_disable_msix:
6096 pci_disable_msix(adap->pdev);
6097
6098out_free:
6099 kfree(entries);
6100 return ret;
6101}
6102
6103#undef EXTRA_VECS
6104
6105static int init_rss(struct adapter *adap)
6106{
6107 unsigned int i;
6108 int err;
6109
6110 err = t4_init_rss_mode(adap, adap->mbox);
6111 if (err)
6112 return err;
6113
6114 for_each_port(adap, i) {
6115 struct port_info *pi = adap2pinfo(adap, i);
6116
6117 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6118 if (!pi->rss)
6119 return -ENOMEM;
6120 }
6121 return 0;
6122}
6123
6124
6125static void print_adapter_info(struct adapter *adapter)
6126{
6127
6128 t4_dump_version_info(adapter);
6129
6130
6131 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
6132 is_offload(adapter) ? "R" : "",
6133 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
6134 (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
6135 is_offload(adapter) ? "Offload" : "non-Offload");
6136}
6137
6138static void print_port_info(const struct net_device *dev)
6139{
6140 char buf[80];
6141 char *bufp = buf;
6142 const struct port_info *pi = netdev_priv(dev);
6143 const struct adapter *adap = pi->adapter;
6144
6145 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
6146 bufp += sprintf(bufp, "100M/");
6147 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
6148 bufp += sprintf(bufp, "1G/");
6149 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
6150 bufp += sprintf(bufp, "10G/");
6151 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
6152 bufp += sprintf(bufp, "25G/");
6153 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
6154 bufp += sprintf(bufp, "40G/");
6155 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
6156 bufp += sprintf(bufp, "50G/");
6157 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
6158 bufp += sprintf(bufp, "100G/");
6159 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
6160 bufp += sprintf(bufp, "200G/");
6161 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
6162 bufp += sprintf(bufp, "400G/");
6163 if (bufp != buf)
6164 --bufp;
6165 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6166
6167 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
6168 dev->name, adap->params.vpd.id, adap->name, buf);
6169}
6170
6171
6172
6173
6174
6175
6176
6177
6178static void free_some_resources(struct adapter *adapter)
6179{
6180 unsigned int i;
6181
6182 kvfree(adapter->smt);
6183 kvfree(adapter->l2t);
6184 kvfree(adapter->srq);
6185 t4_cleanup_sched(adapter);
6186 kvfree(adapter->tids.tid_tab);
6187 cxgb4_cleanup_tc_matchall(adapter);
6188 cxgb4_cleanup_tc_mqprio(adapter);
6189 cxgb4_cleanup_tc_flower(adapter);
6190 cxgb4_cleanup_tc_u32(adapter);
6191 cxgb4_cleanup_ethtool_filters(adapter);
6192 kfree(adapter->sge.egr_map);
6193 kfree(adapter->sge.ingr_map);
6194 kfree(adapter->sge.starving_fl);
6195 kfree(adapter->sge.txq_maperr);
6196#ifdef CONFIG_DEBUG_FS
6197 kfree(adapter->sge.blocked_fl);
6198#endif
6199 disable_msi(adapter);
6200
6201 for_each_port(adapter, i)
6202 if (adapter->port[i]) {
6203 struct port_info *pi = adap2pinfo(adapter, i);
6204
6205 if (pi->viid != 0)
6206 t4_free_vi(adapter, adapter->mbox, adapter->pf,
6207 0, pi->viid);
6208 kfree(adap2pinfo(adapter, i)->rss);
6209 free_netdev(adapter->port[i]);
6210 }
6211 if (adapter->flags & CXGB4_FW_OK)
6212 t4_fw_bye(adapter, adapter->pf);
6213}
6214
6215#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
6216 NETIF_F_GSO_UDP_L4)
6217#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6218 NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6219#define SEGMENT_SIZE 128
6220
6221static int t4_get_chip_type(struct adapter *adap, int ver)
6222{
6223 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
6224
6225 switch (ver) {
6226 case CHELSIO_T4:
6227 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
6228 case CHELSIO_T5:
6229 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
6230 case CHELSIO_T6:
6231 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
6232 default:
6233 break;
6234 }
6235 return -EINVAL;
6236}
6237
6238#ifdef CONFIG_PCI_IOV
6239static void cxgb4_mgmt_setup(struct net_device *dev)
6240{
6241 dev->type = ARPHRD_NONE;
6242 dev->mtu = 0;
6243 dev->hard_header_len = 0;
6244 dev->addr_len = 0;
6245 dev->tx_queue_len = 0;
6246 dev->flags |= IFF_NOARP;
6247 dev->priv_flags |= IFF_NO_QUEUE;
6248
6249
6250 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
6251 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
6252}
6253
6254static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
6255{
6256 struct adapter *adap = pci_get_drvdata(pdev);
6257 int err = 0;
6258 int current_vfs = pci_num_vf(pdev);
6259 u32 pcie_fw;
6260
6261 pcie_fw = readl(adap->regs + PCIE_FW_A);
6262
6263 if (!(pcie_fw & PCIE_FW_INIT_F)) {
6264 dev_warn(&pdev->dev, "Device not initialized\n");
6265 return -EOPNOTSUPP;
6266 }
6267
6268
6269
6270
6271 if (current_vfs && pci_vfs_assigned(pdev)) {
6272 dev_err(&pdev->dev,
6273 "Cannot modify SR-IOV while VFs are assigned\n");
6274 return current_vfs;
6275 }
6276
6277
6278
6279
6280 if (num_vfs != 0 && current_vfs != 0)
6281 return -EBUSY;
6282
6283
6284 if (num_vfs == current_vfs)
6285 return num_vfs;
6286
6287
6288 if (!num_vfs) {
6289 pci_disable_sriov(pdev);
6290
6291 unregister_netdev(adap->port[0]);
6292 free_netdev(adap->port[0]);
6293 adap->port[0] = NULL;
6294
6295
6296 adap->num_vfs = 0;
6297 kfree(adap->vfinfo);
6298 adap->vfinfo = NULL;
6299 return 0;
6300 }
6301
6302 if (!current_vfs) {
6303 struct fw_pfvf_cmd port_cmd, port_rpl;
6304 struct net_device *netdev;
6305 unsigned int pmask, port;
6306 struct pci_dev *pbridge;
6307 struct port_info *pi;
6308 char name[IFNAMSIZ];
6309 u32 devcap2;
6310 u16 flags;
6311
6312
6313
6314
6315
6316
6317 pbridge = pdev->bus->self;
6318 pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
6319 pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
6320
6321 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
6322 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
6323
6324
6325
6326
6327 dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
6328 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
6329 PCI_FUNC(pbridge->devfn));
6330 return -ENOTSUPP;
6331 }
6332 memset(&port_cmd, 0, sizeof(port_cmd));
6333 port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
6334 FW_CMD_REQUEST_F |
6335 FW_CMD_READ_F |
6336 FW_PFVF_CMD_PFN_V(adap->pf) |
6337 FW_PFVF_CMD_VFN_V(0));
6338 port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
6339 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
6340 &port_rpl);
6341 if (err)
6342 return err;
6343 pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
6344 port = ffs(pmask) - 1;
6345
6346 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
6347 adap->pf);
6348 netdev = alloc_netdev(sizeof(struct port_info),
6349 name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
6350 if (!netdev)
6351 return -ENOMEM;
6352
6353 pi = netdev_priv(netdev);
6354 pi->adapter = adap;
6355 pi->lport = port;
6356 pi->tx_chan = port;
6357 SET_NETDEV_DEV(netdev, &pdev->dev);
6358
6359 adap->port[0] = netdev;
6360 pi->port_id = 0;
6361
6362 err = register_netdev(adap->port[0]);
6363 if (err) {
6364 pr_info("Unable to register VF mgmt netdev %s\n", name);
6365 free_netdev(adap->port[0]);
6366 adap->port[0] = NULL;
6367 return err;
6368 }
6369
6370 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
6371 sizeof(struct vf_info), GFP_KERNEL);
6372 if (!adap->vfinfo) {
6373 unregister_netdev(adap->port[0]);
6374 free_netdev(adap->port[0]);
6375 adap->port[0] = NULL;
6376 return -ENOMEM;
6377 }
6378 cxgb4_mgmt_fill_vf_station_mac_addr(adap);
6379 }
6380
6381 err = pci_enable_sriov(pdev, num_vfs);
6382 if (err) {
6383 pr_info("Unable to instantiate %d VFs\n", num_vfs);
6384 if (!current_vfs) {
6385 unregister_netdev(adap->port[0]);
6386 free_netdev(adap->port[0]);
6387 adap->port[0] = NULL;
6388 kfree(adap->vfinfo);
6389 adap->vfinfo = NULL;
6390 }
6391 return err;
6392 }
6393
6394 adap->num_vfs = num_vfs;
6395 return num_vfs;
6396}
6397#endif
6398
6399#if defined(CONFIG_CHELSIO_TLS_DEVICE)
6400
6401static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
6402 enum tls_offload_ctx_dir direction,
6403 struct tls_crypto_info *crypto_info,
6404 u32 tcp_sn)
6405{
6406 struct adapter *adap = netdev2adap(netdev);
6407 int ret = 0;
6408
6409 mutex_lock(&uld_mutex);
6410 if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
6411 dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
6412 ret = -EOPNOTSUPP;
6413 goto out_unlock;
6414 }
6415
6416 if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
6417 dev_err(adap->pdev_dev,
6418 "chcr driver has no registered tlsdev_ops()\n");
6419 ret = -EOPNOTSUPP;
6420 goto out_unlock;
6421 }
6422
6423 ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
6424 if (ret)
6425 goto out_unlock;
6426
6427 ret = adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_add(netdev, sk,
6428 direction,
6429 crypto_info,
6430 tcp_sn);
6431
6432 if (ret)
6433 cxgb4_set_ktls_feature(adap,
6434 FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6435out_unlock:
6436 mutex_unlock(&uld_mutex);
6437 return ret;
6438}
6439
6440static void cxgb4_ktls_dev_del(struct net_device *netdev,
6441 struct tls_context *tls_ctx,
6442 enum tls_offload_ctx_dir direction)
6443{
6444 struct adapter *adap = netdev2adap(netdev);
6445
6446 mutex_lock(&uld_mutex);
6447 if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
6448 dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
6449 goto out_unlock;
6450 }
6451
6452 if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
6453 dev_err(adap->pdev_dev,
6454 "chcr driver has no registered tlsdev_ops\n");
6455 goto out_unlock;
6456 }
6457
6458 adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
6459 direction);
6460 cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6461
6462out_unlock:
6463 mutex_unlock(&uld_mutex);
6464}
6465
6466static const struct tlsdev_ops cxgb4_ktls_ops = {
6467 .tls_dev_add = cxgb4_ktls_dev_add,
6468 .tls_dev_del = cxgb4_ktls_dev_del,
6469};
6470#endif
6471
6472static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6473{
6474 struct net_device *netdev;
6475 struct adapter *adapter;
6476 static int adap_idx = 1;
6477 int s_qpp, qpp, num_seg;
6478 struct port_info *pi;
6479 bool highdma = false;
6480 enum chip_type chip;
6481 void __iomem *regs;
6482 int func, chip_ver;
6483 u16 device_id;
6484 int i, err;
6485 u32 whoami;
6486
6487 err = pci_request_regions(pdev, KBUILD_MODNAME);
6488 if (err) {
6489
6490 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6491 return err;
6492 }
6493
6494 err = pci_enable_device(pdev);
6495 if (err) {
6496 dev_err(&pdev->dev, "cannot enable PCI device\n");
6497 goto out_release_regions;
6498 }
6499
6500 regs = pci_ioremap_bar(pdev, 0);
6501 if (!regs) {
6502 dev_err(&pdev->dev, "cannot map device registers\n");
6503 err = -ENOMEM;
6504 goto out_disable_device;
6505 }
6506
6507 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6508 if (!adapter) {
6509 err = -ENOMEM;
6510 goto out_unmap_bar0;
6511 }
6512
6513 adapter->regs = regs;
6514 err = t4_wait_dev_ready(regs);
6515 if (err < 0)
6516 goto out_free_adapter;
6517
6518
6519 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
6520 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
6521 chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
6522 if ((int)chip < 0) {
6523 dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
6524 err = chip;
6525 goto out_free_adapter;
6526 }
6527 chip_ver = CHELSIO_CHIP_VERSION(chip);
6528 func = chip_ver <= CHELSIO_T5 ?
6529 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
6530
6531 adapter->pdev = pdev;
6532 adapter->pdev_dev = &pdev->dev;
6533 adapter->name = pci_name(pdev);
6534 adapter->mbox = func;
6535 adapter->pf = func;
6536 adapter->params.chip = chip;
6537 adapter->adap_idx = adap_idx;
6538 adapter->msg_enable = DFLT_MSG_ENABLE;
6539 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
6540 (sizeof(struct mbox_cmd) *
6541 T4_OS_LOG_MBOX_CMDS),
6542 GFP_KERNEL);
6543 if (!adapter->mbox_log) {
6544 err = -ENOMEM;
6545 goto out_free_adapter;
6546 }
6547 spin_lock_init(&adapter->mbox_lock);
6548 INIT_LIST_HEAD(&adapter->mlist.list);
6549 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
6550 pci_set_drvdata(pdev, adapter);
6551
6552 if (func != ent->driver_data) {
6553 pci_disable_device(pdev);
6554 pci_save_state(pdev);
6555 return 0;
6556 }
6557
6558 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6559 highdma = true;
6560 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6561 if (err) {
6562 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6563 "coherent allocations\n");
6564 goto out_free_adapter;
6565 }
6566 } else {
6567 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6568 if (err) {
6569 dev_err(&pdev->dev, "no usable DMA configuration\n");
6570 goto out_free_adapter;
6571 }
6572 }
6573
6574 pci_enable_pcie_error_reporting(pdev);
6575 pci_set_master(pdev);
6576 pci_save_state(pdev);
6577 adap_idx++;
6578 adapter->workq = create_singlethread_workqueue("cxgb4");
6579 if (!adapter->workq) {
6580 err = -ENOMEM;
6581 goto out_free_adapter;
6582 }
6583
6584
6585 adapter->flags |= CXGB4_DEV_ENABLED;
6586 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600
6601
6602 if (!pcie_relaxed_ordering_enabled(pdev))
6603 adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
6604
6605 spin_lock_init(&adapter->stats_lock);
6606 spin_lock_init(&adapter->tid_release_lock);
6607 spin_lock_init(&adapter->win0_lock);
6608
6609 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6610 INIT_WORK(&adapter->db_full_task, process_db_full);
6611 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6612 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
6613
6614 err = t4_prep_adapter(adapter);
6615 if (err)
6616 goto out_free_adapter;
6617
6618 if (is_kdump_kernel()) {
6619
6620 err = cxgb4_cudbg_vmcore_add_dump(adapter);
6621 if (err) {
6622 dev_warn(adapter->pdev_dev,
6623 "Fail collecting vmcore device dump, err: %d. Continuing\n",
6624 err);
6625 err = 0;
6626 }
6627 }
6628
6629 if (!is_t4(adapter->params.chip)) {
6630 s_qpp = (QUEUESPERPAGEPF0_S +
6631 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6632 adapter->pf);
6633 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6634 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
6635 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6636
6637
6638
6639
6640
6641
6642 if (qpp > num_seg) {
6643 dev_err(&pdev->dev,
6644 "Incorrect number of egress queues per page\n");
6645 err = -EINVAL;
6646 goto out_free_adapter;
6647 }
6648 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6649 pci_resource_len(pdev, 2));
6650 if (!adapter->bar2) {
6651 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6652 err = -ENOMEM;
6653 goto out_free_adapter;
6654 }
6655 }
6656
6657 setup_memwin(adapter);
6658 err = adap_init0(adapter, 0);
6659#ifdef CONFIG_DEBUG_FS
6660 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
6661#endif
6662 setup_memwin_rdma(adapter);
6663 if (err)
6664 goto out_unmap_bar;
6665
6666
6667 if (!is_t4(adapter->params.chip))
6668 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
6669 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
6670 T6_STATMODE_V(0)));
6671
6672
6673 INIT_LIST_HEAD(&adapter->mac_hlist);
6674
6675 for_each_port(adapter, i) {
6676
6677
6678
6679
6680
6681
6682 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6683 MAX_ETH_QSETS + MAX_ATIDS);
6684 if (!netdev) {
6685 err = -ENOMEM;
6686 goto out_free_dev;
6687 }
6688
6689 SET_NETDEV_DEV(netdev, &pdev->dev);
6690
6691 adapter->port[i] = netdev;
6692 pi = netdev_priv(netdev);
6693 pi->adapter = adapter;
6694 pi->xact_addr_filt = -1;
6695 pi->port_id = i;
6696 netdev->irq = pdev->irq;
6697
6698 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6699 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6700 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
6701 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
6702 NETIF_F_HW_TC | NETIF_F_NTUPLE;
6703
6704 if (chip_ver > CHELSIO_T5) {
6705 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
6706 NETIF_F_IPV6_CSUM |
6707 NETIF_F_RXCSUM |
6708 NETIF_F_GSO_UDP_TUNNEL |
6709 NETIF_F_GSO_UDP_TUNNEL_CSUM |
6710 NETIF_F_TSO | NETIF_F_TSO6;
6711
6712 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
6713 NETIF_F_GSO_UDP_TUNNEL_CSUM |
6714 NETIF_F_HW_TLS_RECORD;
6715
6716 if (adapter->rawf_cnt)
6717 netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
6718 }
6719
6720 if (highdma)
6721 netdev->hw_features |= NETIF_F_HIGHDMA;
6722 netdev->features |= netdev->hw_features;
6723 netdev->vlan_features = netdev->features & VLAN_FEAT;
6724#if defined(CONFIG_CHELSIO_TLS_DEVICE)
6725 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
6726 netdev->hw_features |= NETIF_F_HW_TLS_TX;
6727 netdev->tlsdev_ops = &cxgb4_ktls_ops;
6728
6729 refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
6730 }
6731#endif
6732 netdev->priv_flags |= IFF_UNICAST_FLT;
6733
6734
6735 netdev->min_mtu = 81;
6736 netdev->max_mtu = MAX_MTU;
6737
6738 netdev->netdev_ops = &cxgb4_netdev_ops;
6739#ifdef CONFIG_CHELSIO_T4_DCB
6740 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6741 cxgb4_dcb_state_init(netdev);
6742 cxgb4_dcb_version_init(netdev);
6743#endif
6744 cxgb4_set_ethtool_ops(netdev);
6745 }
6746
6747 cxgb4_init_ethtool_dump(adapter);
6748
6749 pci_set_drvdata(pdev, adapter);
6750
6751 if (adapter->flags & CXGB4_FW_OK) {
6752 err = t4_port_init(adapter, func, func, 0);
6753 if (err)
6754 goto out_free_dev;
6755 } else if (adapter->params.nports == 1) {
6756
6757
6758
6759
6760
6761 u8 hw_addr[ETH_ALEN];
6762 u8 *na = adapter->params.vpd.na;
6763
6764 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
6765 if (!err) {
6766 for (i = 0; i < ETH_ALEN; i++)
6767 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
6768 hex2val(na[2 * i + 1]));
6769 t4_set_hw_addr(adapter, 0, hw_addr);
6770 }
6771 }
6772
6773 if (!(adapter->flags & CXGB4_FW_OK))
6774 goto fw_attach_fail;
6775
6776
6777
6778
6779 err = cfg_queues(adapter);
6780 if (err)
6781 goto out_free_dev;
6782
6783 adapter->smt = t4_init_smt();
6784 if (!adapter->smt) {
6785
6786 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
6787 }
6788
6789 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
6790 if (!adapter->l2t) {
6791
6792 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6793 adapter->params.offload = 0;
6794 }
6795
6796#if IS_ENABLED(CONFIG_IPV6)
6797 if (chip_ver <= CHELSIO_T5 &&
6798 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
6799
6800
6801
6802 dev_warn(&pdev->dev,
6803 "CLIP not enabled in hardware, continuing\n");
6804 adapter->params.offload = 0;
6805 } else {
6806 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6807 adapter->clipt_end);
6808 if (!adapter->clipt) {
6809
6810
6811
6812 dev_warn(&pdev->dev,
6813 "could not allocate Clip table, continuing\n");
6814 adapter->params.offload = 0;
6815 }
6816 }
6817#endif
6818
6819 for_each_port(adapter, i) {
6820 pi = adap2pinfo(adapter, i);
6821 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
6822 if (!pi->sched_tbl)
6823 dev_warn(&pdev->dev,
6824 "could not activate scheduling on port %d\n",
6825 i);
6826 }
6827
6828 if (is_offload(adapter) || is_hashfilter(adapter)) {
6829 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
6830 u32 v;
6831
6832 v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
6833 if (chip_ver <= CHELSIO_T5) {
6834 adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
6835 v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
6836 adapter->tids.hash_base = v / 4;
6837 } else {
6838 adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
6839 v = t4_read_reg(adapter,
6840 T6_LE_DB_HASH_TID_BASE_A);
6841 adapter->tids.hash_base = v;
6842 }
6843 }
6844 }
6845
6846 if (tid_init(&adapter->tids) < 0) {
6847 dev_warn(&pdev->dev, "could not allocate TID table, "
6848 "continuing\n");
6849 adapter->params.offload = 0;
6850 } else {
6851 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
6852 if (!adapter->tc_u32)
6853 dev_warn(&pdev->dev,
6854 "could not offload tc u32, continuing\n");
6855
6856 if (cxgb4_init_tc_flower(adapter))
6857 dev_warn(&pdev->dev,
6858 "could not offload tc flower, continuing\n");
6859
6860 if (cxgb4_init_tc_mqprio(adapter))
6861 dev_warn(&pdev->dev,
6862 "could not offload tc mqprio, continuing\n");
6863
6864 if (cxgb4_init_tc_matchall(adapter))
6865 dev_warn(&pdev->dev,
6866 "could not offload tc matchall, continuing\n");
6867 if (cxgb4_init_ethtool_filters(adapter))
6868 dev_warn(&pdev->dev,
6869 "could not initialize ethtool filters, continuing\n");
6870 }
6871
6872
6873 if (msi > 1 && enable_msix(adapter) == 0)
6874 adapter->flags |= CXGB4_USING_MSIX;
6875 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
6876 adapter->flags |= CXGB4_USING_MSI;
6877 if (msi > 1)
6878 free_msix_info(adapter);
6879 }
6880
6881
6882 pcie_print_link_status(pdev);
6883
6884 cxgb4_init_mps_ref_entries(adapter);
6885
6886 err = init_rss(adapter);
6887 if (err)
6888 goto out_free_dev;
6889
6890 err = setup_non_data_intr(adapter);
6891 if (err) {
6892 dev_err(adapter->pdev_dev,
6893 "Non Data interrupt allocation failed, err: %d\n", err);
6894 goto out_free_dev;
6895 }
6896
6897 err = setup_fw_sge_queues(adapter);
6898 if (err) {
6899 dev_err(adapter->pdev_dev,
6900 "FW sge queue allocation failed, err %d", err);
6901 goto out_free_dev;
6902 }
6903
6904fw_attach_fail:
6905
6906
6907
6908
6909
6910
6911 for_each_port(adapter, i) {
6912 pi = adap2pinfo(adapter, i);
6913 adapter->port[i]->dev_port = pi->lport;
6914 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6915 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6916
6917 netif_carrier_off(adapter->port[i]);
6918
6919 err = register_netdev(adapter->port[i]);
6920 if (err)
6921 break;
6922 adapter->chan_map[pi->tx_chan] = i;
6923 print_port_info(adapter->port[i]);
6924 }
6925 if (i == 0) {
6926 dev_err(&pdev->dev, "could not register any net devices\n");
6927 goto out_free_dev;
6928 }
6929 if (err) {
6930 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6931 err = 0;
6932 }
6933
6934 if (cxgb4_debugfs_root) {
6935 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6936 cxgb4_debugfs_root);
6937 setup_debugfs(adapter);
6938 }
6939
6940
6941 pdev->needs_freset = 1;
6942
6943 if (is_uld(adapter))
6944 cxgb4_uld_enable(adapter);
6945
6946 if (!is_t4(adapter->params.chip))
6947 cxgb4_ptp_init(adapter);
6948
6949 if (IS_REACHABLE(CONFIG_THERMAL) &&
6950 !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
6951 cxgb4_thermal_init(adapter);
6952
6953 print_adapter_info(adapter);
6954 return 0;
6955
6956 out_free_dev:
6957 t4_free_sge_resources(adapter);
6958 free_some_resources(adapter);
6959 if (adapter->flags & CXGB4_USING_MSIX)
6960 free_msix_info(adapter);
6961 if (adapter->num_uld || adapter->num_ofld_uld)
6962 t4_uld_mem_free(adapter);
6963 out_unmap_bar:
6964 if (!is_t4(adapter->params.chip))
6965 iounmap(adapter->bar2);
6966 out_free_adapter:
6967 if (adapter->workq)
6968 destroy_workqueue(adapter->workq);
6969
6970 kfree(adapter->mbox_log);
6971 kfree(adapter);
6972 out_unmap_bar0:
6973 iounmap(regs);
6974 out_disable_device:
6975 pci_disable_pcie_error_reporting(pdev);
6976 pci_disable_device(pdev);
6977 out_release_regions:
6978 pci_release_regions(pdev);
6979 return err;
6980}
6981
6982static void remove_one(struct pci_dev *pdev)
6983{
6984 struct adapter *adapter = pci_get_drvdata(pdev);
6985 struct hash_mac_addr *entry, *tmp;
6986
6987 if (!adapter) {
6988 pci_release_regions(pdev);
6989 return;
6990 }
6991
6992
6993
6994
6995 clear_all_filters(adapter);
6996
6997 adapter->flags |= CXGB4_SHUTTING_DOWN;
6998
6999 if (adapter->pf == 4) {
7000 int i;
7001
7002
7003
7004
7005 destroy_workqueue(adapter->workq);
7006
7007 if (is_uld(adapter)) {
7008 detach_ulds(adapter);
7009 t4_uld_clean_up(adapter);
7010 }
7011
7012 adap_free_hma_mem(adapter);
7013
7014 disable_interrupts(adapter);
7015
7016 cxgb4_free_mps_ref_entries(adapter);
7017
7018 for_each_port(adapter, i)
7019 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7020 unregister_netdev(adapter->port[i]);
7021
7022 debugfs_remove_recursive(adapter->debugfs_root);
7023
7024 if (!is_t4(adapter->params.chip))
7025 cxgb4_ptp_stop(adapter);
7026 if (IS_REACHABLE(CONFIG_THERMAL))
7027 cxgb4_thermal_remove(adapter);
7028
7029 if (adapter->flags & CXGB4_FULL_INIT_DONE)
7030 cxgb_down(adapter);
7031
7032 if (adapter->flags & CXGB4_USING_MSIX)
7033 free_msix_info(adapter);
7034 if (adapter->num_uld || adapter->num_ofld_uld)
7035 t4_uld_mem_free(adapter);
7036 free_some_resources(adapter);
7037 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
7038 list) {
7039 list_del(&entry->list);
7040 kfree(entry);
7041 }
7042
7043#if IS_ENABLED(CONFIG_IPV6)
7044 t4_cleanup_clip_tbl(adapter);
7045#endif
7046 if (!is_t4(adapter->params.chip))
7047 iounmap(adapter->bar2);
7048 }
7049#ifdef CONFIG_PCI_IOV
7050 else {
7051 cxgb4_iov_configure(adapter->pdev, 0);
7052 }
7053#endif
7054 iounmap(adapter->regs);
7055 pci_disable_pcie_error_reporting(pdev);
7056 if ((adapter->flags & CXGB4_DEV_ENABLED)) {
7057 pci_disable_device(pdev);
7058 adapter->flags &= ~CXGB4_DEV_ENABLED;
7059 }
7060 pci_release_regions(pdev);
7061 kfree(adapter->mbox_log);
7062 synchronize_rcu();
7063 kfree(adapter);
7064}
7065
7066
7067
7068
7069
7070
7071static void shutdown_one(struct pci_dev *pdev)
7072{
7073 struct adapter *adapter = pci_get_drvdata(pdev);
7074
7075
7076
7077
7078
7079 if (!adapter) {
7080 pci_release_regions(pdev);
7081 return;
7082 }
7083
7084 adapter->flags |= CXGB4_SHUTTING_DOWN;
7085
7086 if (adapter->pf == 4) {
7087 int i;
7088
7089 for_each_port(adapter, i)
7090 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7091 cxgb_close(adapter->port[i]);
7092
7093 rtnl_lock();
7094 cxgb4_mqprio_stop_offload(adapter);
7095 rtnl_unlock();
7096
7097 if (is_uld(adapter)) {
7098 detach_ulds(adapter);
7099 t4_uld_clean_up(adapter);
7100 }
7101
7102 disable_interrupts(adapter);
7103 disable_msi(adapter);
7104
7105 t4_sge_stop(adapter);
7106 if (adapter->flags & CXGB4_FW_OK)
7107 t4_fw_bye(adapter, adapter->mbox);
7108 }
7109}
7110
7111static struct pci_driver cxgb4_driver = {
7112 .name = KBUILD_MODNAME,
7113 .id_table = cxgb4_pci_tbl,
7114 .probe = init_one,
7115 .remove = remove_one,
7116 .shutdown = shutdown_one,
7117#ifdef CONFIG_PCI_IOV
7118 .sriov_configure = cxgb4_iov_configure,
7119#endif
7120 .err_handler = &cxgb4_eeh,
7121};
7122
7123static int __init cxgb4_init_module(void)
7124{
7125 int ret;
7126
7127 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
7128
7129 ret = pci_register_driver(&cxgb4_driver);
7130 if (ret < 0)
7131 goto err_pci;
7132
7133#if IS_ENABLED(CONFIG_IPV6)
7134 if (!inet6addr_registered) {
7135 ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
7136 if (ret)
7137 pci_unregister_driver(&cxgb4_driver);
7138 else
7139 inet6addr_registered = true;
7140 }
7141#endif
7142
7143 if (ret == 0)
7144 return ret;
7145
7146err_pci:
7147 debugfs_remove(cxgb4_debugfs_root);
7148
7149 return ret;
7150}
7151
7152static void __exit cxgb4_cleanup_module(void)
7153{
7154#if IS_ENABLED(CONFIG_IPV6)
7155 if (inet6addr_registered) {
7156 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
7157 inet6addr_registered = false;
7158 }
7159#endif
7160 pci_unregister_driver(&cxgb4_driver);
7161 debugfs_remove(cxgb4_debugfs_root);
7162}
7163
7164module_init(cxgb4_init_module);
7165module_exit(cxgb4_cleanup_module);
7166