1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/prefetch.h>
19#include <linux/module.h>
20#include "be.h"
21#include "be_cmds.h"
22#include <asm/div64.h>
23#include <linux/aer.h>
24#include <linux/if_bridge.h>
25#include <net/busy_poll.h>
26#include <net/vxlan.h>
27
28MODULE_VERSION(DRV_VER);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30MODULE_AUTHOR("Emulex Corporation");
31MODULE_LICENSE("GPL");
32
33
34
35
36static unsigned int num_vfs;
37module_param(num_vfs, uint, S_IRUGO);
38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
39
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
44static const struct pci_device_id be_dev_ids[] = {
45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
56
57static const char * const ue_status_low_desc[] = {
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
90};
91
92
93static const char * const ue_status_hi_desc[] = {
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
115 "ECRC",
116 "Poison TLP",
117 "NETC",
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
125 "Unknown"
126};
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 if (mem->va) {
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
135 mem->va = NULL;
136 }
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
150 if (!mem->va)
151 return -ENOMEM;
152 return 0;
153}
154
155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156{
157 u32 reg, enabled;
158
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 ®);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163 if (!enabled && enable)
164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else if (enabled && !enable)
166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else
168 return;
169
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172}
173
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178
179 if (lancer_chip(adapter))
180 return;
181
182 if (be_check_error(adapter, BE_ERROR_EEH))
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191{
192 u32 val = 0;
193
194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
199
200 wmb();
201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
202}
203
204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
206{
207 u32 val = 0;
208
209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
214
215 wmb();
216 iowrite32(val, adapter->db + txo->db_offset);
217}
218
219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
222{
223 u32 val = 0;
224
225 val |= qid & DB_EQ_RING_ID_MASK;
226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
227
228 if (be_check_error(adapter, BE_ERROR_HW))
229 return;
230
231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
239}
240
241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
242{
243 u32 val = 0;
244
245 val |= qid & DB_CQ_RING_ID_MASK;
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
248
249 if (be_check_error(adapter, BE_ERROR_HW))
250 return;
251
252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
256}
257
258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
261 struct device *dev = &adapter->pdev->dev;
262 struct sockaddr *addr = p;
263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
266
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
270
271
272
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
276
277 if (!netif_running(netdev))
278 goto done;
279
280
281
282
283
284
285
286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291
292
293
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
297 }
298
299
300
301
302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
304 if (status)
305 goto err;
306
307
308
309
310 if (!ether_addr_equal(addr->sa_data, mac)) {
311 status = -EPERM;
312 goto err;
313 }
314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
317 return 0;
318err:
319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
320 return status;
321}
322
323
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
330 } else if (BE3_chip(adapter)) {
331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
333 return &cmd->hw_stats;
334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
337 return &cmd->hw_stats;
338 }
339}
340
341
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
348 } else if (BE3_chip(adapter)) {
349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
351 return &hw_stats->erx;
352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
355 return &hw_stats->erx;
356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
360{
361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
364 struct be_port_rxf_stats_v0 *port_stats =
365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
367
368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
396 drvs->jabber_events = rxf_stats->port1_jabber_events;
397 else
398 drvs->jabber_events = rxf_stats->port0_jabber_events;
399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
408static void populate_be_v1_stats(struct be_adapter *adapter)
409{
410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
413 struct be_port_rxf_stats_v1 *port_stats =
414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
416
417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
498 if (be_roce_supported(adapter)) {
499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
506}
507
508static void populate_lancer_stats(struct be_adapter *adapter)
509{
510 struct be_drv_stats *drvs = &adapter->drv_stats;
511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
538 drvs->jabber_events = pport_stats->rx_jabbers;
539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
541 drvs->rx_drops_too_many_frags =
542 pport_stats->rx_drops_too_many_frags_lo;
543}
544
545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
557static void populate_erx_stats(struct be_adapter *adapter,
558 struct be_rx_obj *rxo, u32 erx_stat)
559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563
564
565
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
570void be_parse_stats(struct be_adapter *adapter)
571{
572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
573 struct be_rx_obj *rxo;
574 int i;
575 u32 erx_stat;
576
577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
579 } else {
580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
582 else if (BE3_chip(adapter))
583
584 populate_be_v1_stats(adapter);
585 else
586 populate_be_v2_stats(adapter);
587
588
589 for_all_rx_queues(adapter, rxo, i) {
590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
592 }
593 }
594}
595
596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
597 struct rtnl_link_stats64 *stats)
598{
599 struct be_adapter *adapter = netdev_priv(netdev);
600 struct be_drv_stats *drvs = &adapter->drv_stats;
601 struct be_rx_obj *rxo;
602 struct be_tx_obj *txo;
603 u64 pkts, bytes;
604 unsigned int start;
605 int i;
606
607 for_all_rx_queues(adapter, rxo, i) {
608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
609
610 do {
611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
620 }
621
622 for_all_tx_queues(adapter, txo, i) {
623 const struct be_tx_stats *tx_stats = tx_stats(txo);
624
625 do {
626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
632 }
633
634
635 stats->rx_errors = drvs->rx_crc_errors +
636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
644 drvs->rx_dropped_runt;
645
646
647 stats->rx_length_errors = drvs->rx_in_range_errors +
648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
650
651 stats->rx_crc_errors = drvs->rx_crc_errors;
652
653
654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
655
656
657
658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
661 return stats;
662}
663
664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
665{
666 struct net_device *netdev = adapter->netdev;
667
668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
669 netif_carrier_off(netdev);
670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
671 }
672
673 if (link_status)
674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
679}
680
681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
682{
683 struct be_tx_stats *stats = tx_stats(txo);
684
685 u64_stats_update_begin(&stats->sync);
686 stats->tx_reqs++;
687 stats->tx_bytes += skb->len;
688 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
689 u64_stats_update_end(&stats->sync);
690}
691
692
693static u32 skb_wrb_cnt(struct sk_buff *skb)
694{
695
696 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
697}
698
699static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
700{
701 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
702 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
703 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
704 wrb->rsvd0 = 0;
705}
706
707
708
709
710static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
711{
712 wrb->frag_pa_hi = 0;
713 wrb->frag_pa_lo = 0;
714 wrb->frag_len = 0;
715 wrb->rsvd0 = 0;
716}
717
718static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
719 struct sk_buff *skb)
720{
721 u8 vlan_prio;
722 u16 vlan_tag;
723
724 vlan_tag = skb_vlan_tag_get(skb);
725 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
726
727 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
728 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
729 adapter->recommended_prio;
730
731 return vlan_tag;
732}
733
734
735static u16 skb_inner_ip_proto(struct sk_buff *skb)
736{
737 return (inner_ip_hdr(skb)->version == 4) ?
738 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
739}
740
741static u16 skb_ip_proto(struct sk_buff *skb)
742{
743 return (ip_hdr(skb)->version == 4) ?
744 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
745}
746
747static inline bool be_is_txq_full(struct be_tx_obj *txo)
748{
749 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
750}
751
752static inline bool be_can_txq_wake(struct be_tx_obj *txo)
753{
754 return atomic_read(&txo->q.used) < txo->q.len / 2;
755}
756
757static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
758{
759 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
760}
761
762static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
763 struct sk_buff *skb,
764 struct be_wrb_params *wrb_params)
765{
766 u16 proto;
767
768 if (skb_is_gso(skb)) {
769 BE_WRB_F_SET(wrb_params->features, LSO, 1);
770 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
771 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
772 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
773 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
774 if (skb->encapsulation) {
775 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
776 proto = skb_inner_ip_proto(skb);
777 } else {
778 proto = skb_ip_proto(skb);
779 }
780 if (proto == IPPROTO_TCP)
781 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
782 else if (proto == IPPROTO_UDP)
783 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
784 }
785
786 if (skb_vlan_tag_present(skb)) {
787 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
788 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
789 }
790
791 BE_WRB_F_SET(wrb_params->features, CRC, 1);
792}
793
794static void wrb_fill_hdr(struct be_adapter *adapter,
795 struct be_eth_hdr_wrb *hdr,
796 struct be_wrb_params *wrb_params,
797 struct sk_buff *skb)
798{
799 memset(hdr, 0, sizeof(*hdr));
800
801 SET_TX_WRB_HDR_BITS(crc, hdr,
802 BE_WRB_F_GET(wrb_params->features, CRC));
803 SET_TX_WRB_HDR_BITS(ipcs, hdr,
804 BE_WRB_F_GET(wrb_params->features, IPCS));
805 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
806 BE_WRB_F_GET(wrb_params->features, TCPCS));
807 SET_TX_WRB_HDR_BITS(udpcs, hdr,
808 BE_WRB_F_GET(wrb_params->features, UDPCS));
809
810 SET_TX_WRB_HDR_BITS(lso, hdr,
811 BE_WRB_F_GET(wrb_params->features, LSO));
812 SET_TX_WRB_HDR_BITS(lso6, hdr,
813 BE_WRB_F_GET(wrb_params->features, LSO6));
814 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
815
816
817
818
819 SET_TX_WRB_HDR_BITS(event, hdr,
820 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
821 SET_TX_WRB_HDR_BITS(vlan, hdr,
822 BE_WRB_F_GET(wrb_params->features, VLAN));
823 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
824
825 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
826 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
827 SET_TX_WRB_HDR_BITS(mgmt, hdr,
828 BE_WRB_F_GET(wrb_params->features, OS2BMC));
829}
830
831static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
832 bool unmap_single)
833{
834 dma_addr_t dma;
835 u32 frag_len = le32_to_cpu(wrb->frag_len);
836
837
838 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
839 (u64)le32_to_cpu(wrb->frag_pa_lo);
840 if (frag_len) {
841 if (unmap_single)
842 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
843 else
844 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
845 }
846}
847
848
849static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
850{
851 u16 head = txo->q.head;
852
853 queue_head_inc(&txo->q);
854 return head;
855}
856
857
858static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
859 struct be_tx_obj *txo,
860 struct be_wrb_params *wrb_params,
861 struct sk_buff *skb, u16 head)
862{
863 u32 num_frags = skb_wrb_cnt(skb);
864 struct be_queue_info *txq = &txo->q;
865 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
866
867 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
868 be_dws_cpu_to_le(hdr, sizeof(*hdr));
869
870 BUG_ON(txo->sent_skb_list[head]);
871 txo->sent_skb_list[head] = skb;
872 txo->last_req_hdr = head;
873 atomic_add(num_frags, &txq->used);
874 txo->last_req_wrb_cnt = num_frags;
875 txo->pend_wrb_cnt += num_frags;
876}
877
878
879static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
880 int len)
881{
882 struct be_eth_wrb *wrb;
883 struct be_queue_info *txq = &txo->q;
884
885 wrb = queue_head_node(txq);
886 wrb_fill(wrb, busaddr, len);
887 queue_head_inc(txq);
888}
889
890
891
892
893
894static void be_xmit_restore(struct be_adapter *adapter,
895 struct be_tx_obj *txo, u16 head, bool map_single,
896 u32 copied)
897{
898 struct device *dev;
899 struct be_eth_wrb *wrb;
900 struct be_queue_info *txq = &txo->q;
901
902 dev = &adapter->pdev->dev;
903 txq->head = head;
904
905
906 queue_head_inc(txq);
907 while (copied) {
908 wrb = queue_head_node(txq);
909 unmap_tx_frag(dev, wrb, map_single);
910 map_single = false;
911 copied -= le32_to_cpu(wrb->frag_len);
912 queue_head_inc(txq);
913 }
914
915 txq->head = head;
916}
917
918
919
920
921
922static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
923 struct sk_buff *skb,
924 struct be_wrb_params *wrb_params)
925{
926 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
927 struct device *dev = &adapter->pdev->dev;
928 struct be_queue_info *txq = &txo->q;
929 bool map_single = false;
930 u16 head = txq->head;
931 dma_addr_t busaddr;
932 int len;
933
934 head = be_tx_get_wrb_hdr(txo);
935
936 if (skb->len > skb->data_len) {
937 len = skb_headlen(skb);
938
939 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
940 if (dma_mapping_error(dev, busaddr))
941 goto dma_err;
942 map_single = true;
943 be_tx_setup_wrb_frag(txo, busaddr, len);
944 copied += len;
945 }
946
947 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
948 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
949 len = skb_frag_size(frag);
950
951 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
952 if (dma_mapping_error(dev, busaddr))
953 goto dma_err;
954 be_tx_setup_wrb_frag(txo, busaddr, len);
955 copied += len;
956 }
957
958 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
959
960 be_tx_stats_update(txo, skb);
961 return wrb_cnt;
962
963dma_err:
964 adapter->drv_stats.dma_map_errors++;
965 be_xmit_restore(adapter, txo, head, map_single, copied);
966 return 0;
967}
968
969static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
970{
971 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
972}
973
974static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
975 struct sk_buff *skb,
976 struct be_wrb_params
977 *wrb_params)
978{
979 u16 vlan_tag = 0;
980
981 skb = skb_share_check(skb, GFP_ATOMIC);
982 if (unlikely(!skb))
983 return skb;
984
985 if (skb_vlan_tag_present(skb))
986 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
987
988 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
989 if (!vlan_tag)
990 vlan_tag = adapter->pvid;
991
992
993
994 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
995 }
996
997 if (vlan_tag) {
998 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
999 vlan_tag);
1000 if (unlikely(!skb))
1001 return skb;
1002 skb->vlan_tci = 0;
1003 }
1004
1005
1006 if (adapter->qnq_vid) {
1007 vlan_tag = adapter->qnq_vid;
1008 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1009 vlan_tag);
1010 if (unlikely(!skb))
1011 return skb;
1012 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1013 }
1014
1015 return skb;
1016}
1017
1018static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1019{
1020 struct ethhdr *eh = (struct ethhdr *)skb->data;
1021 u16 offset = ETH_HLEN;
1022
1023 if (eh->h_proto == htons(ETH_P_IPV6)) {
1024 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1025
1026 offset += sizeof(struct ipv6hdr);
1027 if (ip6h->nexthdr != NEXTHDR_TCP &&
1028 ip6h->nexthdr != NEXTHDR_UDP) {
1029 struct ipv6_opt_hdr *ehdr =
1030 (struct ipv6_opt_hdr *)(skb->data + offset);
1031
1032
1033 if (ehdr->hdrlen == 0xff)
1034 return true;
1035 }
1036 }
1037 return false;
1038}
1039
1040static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1041{
1042 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1043}
1044
1045static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1046{
1047 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1048}
1049
1050static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1051 struct sk_buff *skb,
1052 struct be_wrb_params
1053 *wrb_params)
1054{
1055 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1056 unsigned int eth_hdr_len;
1057 struct iphdr *ip;
1058
1059
1060
1061
1062
1063 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1064 VLAN_ETH_HLEN : ETH_HLEN;
1065 if (skb->len <= 60 &&
1066 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
1067 is_ipv4_pkt(skb)) {
1068 ip = (struct iphdr *)ip_hdr(skb);
1069 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1070 }
1071
1072
1073
1074
1075 if (be_pvid_tagging_enabled(adapter) &&
1076 veh->h_vlan_proto == htons(ETH_P_8021Q))
1077 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1078
1079
1080
1081
1082
1083 if (skb->ip_summed != CHECKSUM_PARTIAL &&
1084 skb_vlan_tag_present(skb)) {
1085 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1086 if (unlikely(!skb))
1087 goto err;
1088 }
1089
1090
1091
1092
1093
1094 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
1095 (adapter->pvid || adapter->qnq_vid) &&
1096 !qnq_async_evt_rcvd(adapter)))
1097 goto tx_drop;
1098
1099
1100
1101
1102
1103
1104
1105 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1106 be_vlan_tag_tx_chk(adapter, skb)) {
1107 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1108 if (unlikely(!skb))
1109 goto err;
1110 }
1111
1112 return skb;
1113tx_drop:
1114 dev_kfree_skb_any(skb);
1115err:
1116 return NULL;
1117}
1118
1119static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1120 struct sk_buff *skb,
1121 struct be_wrb_params *wrb_params)
1122{
1123
1124
1125
1126
1127 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1128 if (skb_put_padto(skb, 36))
1129 return NULL;
1130 }
1131
1132 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1133 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1134 if (!skb)
1135 return NULL;
1136 }
1137
1138 return skb;
1139}
1140
1141static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1142{
1143 struct be_queue_info *txq = &txo->q;
1144 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1145
1146
1147 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1148 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1149
1150
1151 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1152 wrb_fill_dummy(queue_head_node(txq));
1153 queue_head_inc(txq);
1154 atomic_inc(&txq->used);
1155 txo->pend_wrb_cnt++;
1156 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1157 TX_HDR_WRB_NUM_SHIFT);
1158 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1159 TX_HDR_WRB_NUM_SHIFT);
1160 }
1161 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1162 txo->pend_wrb_cnt = 0;
1163}
1164
1165
1166
1167#define DHCP_CLIENT_PORT 68
1168#define DHCP_SERVER_PORT 67
1169#define NET_BIOS_PORT1 137
1170#define NET_BIOS_PORT2 138
1171#define DHCPV6_RAS_PORT 547
1172
1173#define is_mc_allowed_on_bmc(adapter, eh) \
1174 (!is_multicast_filt_enabled(adapter) && \
1175 is_multicast_ether_addr(eh->h_dest) && \
1176 !is_broadcast_ether_addr(eh->h_dest))
1177
1178#define is_bc_allowed_on_bmc(adapter, eh) \
1179 (!is_broadcast_filt_enabled(adapter) && \
1180 is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_arp_allowed_on_bmc(adapter, skb) \
1183 (is_arp(skb) && is_arp_filt_enabled(adapter))
1184
1185#define is_broadcast_packet(eh, adapter) \
1186 (is_multicast_ether_addr(eh->h_dest) && \
1187 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1188
1189#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1190
1191#define is_arp_filt_enabled(adapter) \
1192 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1193
1194#define is_dhcp_client_filt_enabled(adapter) \
1195 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1196
1197#define is_dhcp_srvr_filt_enabled(adapter) \
1198 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1199
1200#define is_nbios_filt_enabled(adapter) \
1201 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1202
1203#define is_ipv6_na_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & \
1205 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1206
1207#define is_ipv6_ra_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1209
1210#define is_ipv6_ras_filt_enabled(adapter) \
1211 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1212
1213#define is_broadcast_filt_enabled(adapter) \
1214 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1215
1216#define is_multicast_filt_enabled(adapter) \
1217 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1218
1219static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1220 struct sk_buff **skb)
1221{
1222 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1223 bool os2bmc = false;
1224
1225 if (!be_is_os2bmc_enabled(adapter))
1226 goto done;
1227
1228 if (!is_multicast_ether_addr(eh->h_dest))
1229 goto done;
1230
1231 if (is_mc_allowed_on_bmc(adapter, eh) ||
1232 is_bc_allowed_on_bmc(adapter, eh) ||
1233 is_arp_allowed_on_bmc(adapter, (*skb))) {
1234 os2bmc = true;
1235 goto done;
1236 }
1237
1238 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1239 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1240 u8 nexthdr = hdr->nexthdr;
1241
1242 if (nexthdr == IPPROTO_ICMPV6) {
1243 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1244
1245 switch (icmp6->icmp6_type) {
1246 case NDISC_ROUTER_ADVERTISEMENT:
1247 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1248 goto done;
1249 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1250 os2bmc = is_ipv6_na_filt_enabled(adapter);
1251 goto done;
1252 default:
1253 break;
1254 }
1255 }
1256 }
1257
1258 if (is_udp_pkt((*skb))) {
1259 struct udphdr *udp = udp_hdr((*skb));
1260
1261 switch (udp->dest) {
1262 case DHCP_CLIENT_PORT:
1263 os2bmc = is_dhcp_client_filt_enabled(adapter);
1264 goto done;
1265 case DHCP_SERVER_PORT:
1266 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1267 goto done;
1268 case NET_BIOS_PORT1:
1269 case NET_BIOS_PORT2:
1270 os2bmc = is_nbios_filt_enabled(adapter);
1271 goto done;
1272 case DHCPV6_RAS_PORT:
1273 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1274 goto done;
1275 default:
1276 break;
1277 }
1278 }
1279done:
1280
1281
1282
1283 if (os2bmc)
1284 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1285
1286 return os2bmc;
1287}
1288
1289static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1290{
1291 struct be_adapter *adapter = netdev_priv(netdev);
1292 u16 q_idx = skb_get_queue_mapping(skb);
1293 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1294 struct be_wrb_params wrb_params = { 0 };
1295 bool flush = !skb->xmit_more;
1296 u16 wrb_cnt;
1297
1298 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1299 if (unlikely(!skb))
1300 goto drop;
1301
1302 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1303
1304 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1305 if (unlikely(!wrb_cnt)) {
1306 dev_kfree_skb_any(skb);
1307 goto drop;
1308 }
1309
1310
1311
1312
1313 if (be_send_pkt_to_bmc(adapter, &skb)) {
1314 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1315 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1316 if (unlikely(!wrb_cnt))
1317 goto drop;
1318 else
1319 skb_get(skb);
1320 }
1321
1322 if (be_is_txq_full(txo)) {
1323 netif_stop_subqueue(netdev, q_idx);
1324 tx_stats(txo)->tx_stops++;
1325 }
1326
1327 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1328 be_xmit_flush(adapter, txo);
1329
1330 return NETDEV_TX_OK;
1331drop:
1332 tx_stats(txo)->tx_drv_drops++;
1333
1334 if (flush && txo->pend_wrb_cnt)
1335 be_xmit_flush(adapter, txo);
1336
1337 return NETDEV_TX_OK;
1338}
1339
1340static int be_change_mtu(struct net_device *netdev, int new_mtu)
1341{
1342 struct be_adapter *adapter = netdev_priv(netdev);
1343 struct device *dev = &adapter->pdev->dev;
1344
1345 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1346 dev_info(dev, "MTU must be between %d and %d bytes\n",
1347 BE_MIN_MTU, BE_MAX_MTU);
1348 return -EINVAL;
1349 }
1350
1351 dev_info(dev, "MTU changed from %d to %d bytes\n",
1352 netdev->mtu, new_mtu);
1353 netdev->mtu = new_mtu;
1354 return 0;
1355}
1356
1357static inline bool be_in_all_promisc(struct be_adapter *adapter)
1358{
1359 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1360 BE_IF_FLAGS_ALL_PROMISCUOUS;
1361}
1362
1363static int be_set_vlan_promisc(struct be_adapter *adapter)
1364{
1365 struct device *dev = &adapter->pdev->dev;
1366 int status;
1367
1368 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1369 return 0;
1370
1371 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1372 if (!status) {
1373 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1374 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1375 } else {
1376 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1377 }
1378 return status;
1379}
1380
1381static int be_clear_vlan_promisc(struct be_adapter *adapter)
1382{
1383 struct device *dev = &adapter->pdev->dev;
1384 int status;
1385
1386 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1387 if (!status) {
1388 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1389 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1390 }
1391 return status;
1392}
1393
1394
1395
1396
1397
1398static int be_vid_config(struct be_adapter *adapter)
1399{
1400 struct device *dev = &adapter->pdev->dev;
1401 u16 vids[BE_NUM_VLANS_SUPPORTED];
1402 u16 num = 0, i = 0;
1403 int status = 0;
1404
1405
1406 if (be_in_all_promisc(adapter))
1407 return 0;
1408
1409 if (adapter->vlans_added > be_max_vlans(adapter))
1410 return be_set_vlan_promisc(adapter);
1411
1412
1413 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1414 vids[num++] = cpu_to_le16(i);
1415
1416 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1417 if (status) {
1418 dev_err(dev, "Setting HW VLAN filtering failed\n");
1419
1420 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1421 addl_status(status) ==
1422 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1423 return be_set_vlan_promisc(adapter);
1424 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1425 status = be_clear_vlan_promisc(adapter);
1426 }
1427 return status;
1428}
1429
1430static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1431{
1432 struct be_adapter *adapter = netdev_priv(netdev);
1433 int status = 0;
1434
1435
1436 if (lancer_chip(adapter) && vid == 0)
1437 return status;
1438
1439 if (test_bit(vid, adapter->vids))
1440 return status;
1441
1442 set_bit(vid, adapter->vids);
1443 adapter->vlans_added++;
1444
1445 status = be_vid_config(adapter);
1446 if (status) {
1447 adapter->vlans_added--;
1448 clear_bit(vid, adapter->vids);
1449 }
1450
1451 return status;
1452}
1453
1454static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1455{
1456 struct be_adapter *adapter = netdev_priv(netdev);
1457
1458
1459 if (lancer_chip(adapter) && vid == 0)
1460 return 0;
1461
1462 clear_bit(vid, adapter->vids);
1463 adapter->vlans_added--;
1464
1465 return be_vid_config(adapter);
1466}
1467
1468static void be_clear_all_promisc(struct be_adapter *adapter)
1469{
1470 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
1471 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1472}
1473
1474static void be_set_all_promisc(struct be_adapter *adapter)
1475{
1476 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1477 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1478}
1479
1480static void be_set_mc_promisc(struct be_adapter *adapter)
1481{
1482 int status;
1483
1484 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1485 return;
1486
1487 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1488 if (!status)
1489 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1490}
1491
1492static void be_set_mc_list(struct be_adapter *adapter)
1493{
1494 int status;
1495
1496 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1497 if (!status)
1498 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1499 else
1500 be_set_mc_promisc(adapter);
1501}
1502
1503static void be_set_uc_list(struct be_adapter *adapter)
1504{
1505 struct netdev_hw_addr *ha;
1506 int i = 1;
1507
1508 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1509 be_cmd_pmac_del(adapter, adapter->if_handle,
1510 adapter->pmac_id[i], 0);
1511
1512 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1513 be_set_all_promisc(adapter);
1514 return;
1515 }
1516
1517 netdev_for_each_uc_addr(ha, adapter->netdev) {
1518 adapter->uc_macs++;
1519 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1520 &adapter->pmac_id[adapter->uc_macs], 0);
1521 }
1522}
1523
1524static void be_clear_uc_list(struct be_adapter *adapter)
1525{
1526 int i;
1527
1528 for (i = 1; i < (adapter->uc_macs + 1); i++)
1529 be_cmd_pmac_del(adapter, adapter->if_handle,
1530 adapter->pmac_id[i], 0);
1531 adapter->uc_macs = 0;
1532}
1533
1534static void be_set_rx_mode(struct net_device *netdev)
1535{
1536 struct be_adapter *adapter = netdev_priv(netdev);
1537
1538 if (netdev->flags & IFF_PROMISC) {
1539 be_set_all_promisc(adapter);
1540 return;
1541 }
1542
1543
1544 if (be_in_all_promisc(adapter)) {
1545 be_clear_all_promisc(adapter);
1546 if (adapter->vlans_added)
1547 be_vid_config(adapter);
1548 }
1549
1550
1551 if (netdev->flags & IFF_ALLMULTI ||
1552 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1553 be_set_mc_promisc(adapter);
1554 return;
1555 }
1556
1557 if (netdev_uc_count(netdev) != adapter->uc_macs)
1558 be_set_uc_list(adapter);
1559
1560 be_set_mc_list(adapter);
1561}
1562
1563static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1564{
1565 struct be_adapter *adapter = netdev_priv(netdev);
1566 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1567 int status;
1568
1569 if (!sriov_enabled(adapter))
1570 return -EPERM;
1571
1572 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1573 return -EINVAL;
1574
1575
1576
1577
1578 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1579 return 0;
1580
1581 if (BEx_chip(adapter)) {
1582 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1583 vf + 1);
1584
1585 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1586 &vf_cfg->pmac_id, vf + 1);
1587 } else {
1588 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1589 vf + 1);
1590 }
1591
1592 if (status) {
1593 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1594 mac, vf, status);
1595 return be_cmd_status(status);
1596 }
1597
1598 ether_addr_copy(vf_cfg->mac_addr, mac);
1599
1600 return 0;
1601}
1602
1603static int be_get_vf_config(struct net_device *netdev, int vf,
1604 struct ifla_vf_info *vi)
1605{
1606 struct be_adapter *adapter = netdev_priv(netdev);
1607 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1608
1609 if (!sriov_enabled(adapter))
1610 return -EPERM;
1611
1612 if (vf >= adapter->num_vfs)
1613 return -EINVAL;
1614
1615 vi->vf = vf;
1616 vi->max_tx_rate = vf_cfg->tx_rate;
1617 vi->min_tx_rate = 0;
1618 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1619 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1620 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1621 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1622 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
1623
1624 return 0;
1625}
1626
1627static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1628{
1629 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1630 u16 vids[BE_NUM_VLANS_SUPPORTED];
1631 int vf_if_id = vf_cfg->if_handle;
1632 int status;
1633
1634
1635 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
1636 if (status)
1637 return status;
1638
1639
1640 vids[0] = 0;
1641 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1642 if (!status)
1643 dev_info(&adapter->pdev->dev,
1644 "Cleared guest VLANs on VF%d", vf);
1645
1646
1647 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1648 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1649 ~BE_PRIV_FILTMGMT, vf + 1);
1650 if (!status)
1651 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1652 }
1653 return 0;
1654}
1655
1656static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1657{
1658 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1659 struct device *dev = &adapter->pdev->dev;
1660 int status;
1661
1662
1663 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1664 vf_cfg->if_handle, 0, 0);
1665 if (status)
1666 return status;
1667
1668
1669 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1670 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1671 BE_PRIV_FILTMGMT, vf + 1);
1672 if (!status) {
1673 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1674 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1675 }
1676 }
1677
1678 dev_info(dev,
1679 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1680 return 0;
1681}
1682
1683static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1684{
1685 struct be_adapter *adapter = netdev_priv(netdev);
1686 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1687 int status;
1688
1689 if (!sriov_enabled(adapter))
1690 return -EPERM;
1691
1692 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1693 return -EINVAL;
1694
1695 if (vlan || qos) {
1696 vlan |= qos << VLAN_PRIO_SHIFT;
1697 status = be_set_vf_tvt(adapter, vf, vlan);
1698 } else {
1699 status = be_clear_vf_tvt(adapter, vf);
1700 }
1701
1702 if (status) {
1703 dev_err(&adapter->pdev->dev,
1704 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1705 status);
1706 return be_cmd_status(status);
1707 }
1708
1709 vf_cfg->vlan_tag = vlan;
1710 return 0;
1711}
1712
1713static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1714 int min_tx_rate, int max_tx_rate)
1715{
1716 struct be_adapter *adapter = netdev_priv(netdev);
1717 struct device *dev = &adapter->pdev->dev;
1718 int percent_rate, status = 0;
1719 u16 link_speed = 0;
1720 u8 link_status;
1721
1722 if (!sriov_enabled(adapter))
1723 return -EPERM;
1724
1725 if (vf >= adapter->num_vfs)
1726 return -EINVAL;
1727
1728 if (min_tx_rate)
1729 return -EINVAL;
1730
1731 if (!max_tx_rate)
1732 goto config_qos;
1733
1734 status = be_cmd_link_status_query(adapter, &link_speed,
1735 &link_status, 0);
1736 if (status)
1737 goto err;
1738
1739 if (!link_status) {
1740 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1741 status = -ENETDOWN;
1742 goto err;
1743 }
1744
1745 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1746 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1747 link_speed);
1748 status = -EINVAL;
1749 goto err;
1750 }
1751
1752
1753 percent_rate = link_speed / 100;
1754 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1755 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1756 percent_rate);
1757 status = -EINVAL;
1758 goto err;
1759 }
1760
1761config_qos:
1762 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1763 if (status)
1764 goto err;
1765
1766 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1767 return 0;
1768
1769err:
1770 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1771 max_tx_rate, vf);
1772 return be_cmd_status(status);
1773}
1774
1775static int be_set_vf_link_state(struct net_device *netdev, int vf,
1776 int link_state)
1777{
1778 struct be_adapter *adapter = netdev_priv(netdev);
1779 int status;
1780
1781 if (!sriov_enabled(adapter))
1782 return -EPERM;
1783
1784 if (vf >= adapter->num_vfs)
1785 return -EINVAL;
1786
1787 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1788 if (status) {
1789 dev_err(&adapter->pdev->dev,
1790 "Link state change on VF %d failed: %#x\n", vf, status);
1791 return be_cmd_status(status);
1792 }
1793
1794 adapter->vf_cfg[vf].plink_tracking = link_state;
1795
1796 return 0;
1797}
1798
1799static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1800{
1801 struct be_adapter *adapter = netdev_priv(netdev);
1802 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1803 u8 spoofchk;
1804 int status;
1805
1806 if (!sriov_enabled(adapter))
1807 return -EPERM;
1808
1809 if (vf >= adapter->num_vfs)
1810 return -EINVAL;
1811
1812 if (BEx_chip(adapter))
1813 return -EOPNOTSUPP;
1814
1815 if (enable == vf_cfg->spoofchk)
1816 return 0;
1817
1818 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1819
1820 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1821 0, spoofchk);
1822 if (status) {
1823 dev_err(&adapter->pdev->dev,
1824 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1825 return be_cmd_status(status);
1826 }
1827
1828 vf_cfg->spoofchk = enable;
1829 return 0;
1830}
1831
1832static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1833 ulong now)
1834{
1835 aic->rx_pkts_prev = rx_pkts;
1836 aic->tx_reqs_prev = tx_pkts;
1837 aic->jiffies = now;
1838}
1839
1840static int be_get_new_eqd(struct be_eq_obj *eqo)
1841{
1842 struct be_adapter *adapter = eqo->adapter;
1843 int eqd, start;
1844 struct be_aic_obj *aic;
1845 struct be_rx_obj *rxo;
1846 struct be_tx_obj *txo;
1847 u64 rx_pkts = 0, tx_pkts = 0;
1848 ulong now;
1849 u32 pps, delta;
1850 int i;
1851
1852 aic = &adapter->aic_obj[eqo->idx];
1853 if (!aic->enable) {
1854 if (aic->jiffies)
1855 aic->jiffies = 0;
1856 eqd = aic->et_eqd;
1857 return eqd;
1858 }
1859
1860 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1861 do {
1862 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1863 rx_pkts += rxo->stats.rx_pkts;
1864 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1865 }
1866
1867 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1868 do {
1869 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1870 tx_pkts += txo->stats.tx_reqs;
1871 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1872 }
1873
1874
1875 now = jiffies;
1876 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1877 rx_pkts < aic->rx_pkts_prev ||
1878 tx_pkts < aic->tx_reqs_prev) {
1879 be_aic_update(aic, rx_pkts, tx_pkts, now);
1880 return aic->prev_eqd;
1881 }
1882
1883 delta = jiffies_to_msecs(now - aic->jiffies);
1884 if (delta == 0)
1885 return aic->prev_eqd;
1886
1887 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1888 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1889 eqd = (pps / 15000) << 2;
1890
1891 if (eqd < 8)
1892 eqd = 0;
1893 eqd = min_t(u32, eqd, aic->max_eqd);
1894 eqd = max_t(u32, eqd, aic->min_eqd);
1895
1896 be_aic_update(aic, rx_pkts, tx_pkts, now);
1897
1898 return eqd;
1899}
1900
1901
1902static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1903{
1904 struct be_adapter *adapter = eqo->adapter;
1905 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1906 ulong now = jiffies;
1907 int eqd;
1908 u32 mult_enc;
1909
1910 if (!aic->enable)
1911 return 0;
1912
1913 if (time_before_eq(now, aic->jiffies) ||
1914 jiffies_to_msecs(now - aic->jiffies) < 1)
1915 eqd = aic->prev_eqd;
1916 else
1917 eqd = be_get_new_eqd(eqo);
1918
1919 if (eqd > 100)
1920 mult_enc = R2I_DLY_ENC_1;
1921 else if (eqd > 60)
1922 mult_enc = R2I_DLY_ENC_2;
1923 else if (eqd > 20)
1924 mult_enc = R2I_DLY_ENC_3;
1925 else
1926 mult_enc = R2I_DLY_ENC_0;
1927
1928 aic->prev_eqd = eqd;
1929
1930 return mult_enc;
1931}
1932
1933void be_eqd_update(struct be_adapter *adapter, bool force_update)
1934{
1935 struct be_set_eqd set_eqd[MAX_EVT_QS];
1936 struct be_aic_obj *aic;
1937 struct be_eq_obj *eqo;
1938 int i, num = 0, eqd;
1939
1940 for_all_evt_queues(adapter, eqo, i) {
1941 aic = &adapter->aic_obj[eqo->idx];
1942 eqd = be_get_new_eqd(eqo);
1943 if (force_update || eqd != aic->prev_eqd) {
1944 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1945 set_eqd[num].eq_id = eqo->q.id;
1946 aic->prev_eqd = eqd;
1947 num++;
1948 }
1949 }
1950
1951 if (num)
1952 be_cmd_modify_eqd(adapter, set_eqd, num);
1953}
1954
1955static void be_rx_stats_update(struct be_rx_obj *rxo,
1956 struct be_rx_compl_info *rxcp)
1957{
1958 struct be_rx_stats *stats = rx_stats(rxo);
1959
1960 u64_stats_update_begin(&stats->sync);
1961 stats->rx_compl++;
1962 stats->rx_bytes += rxcp->pkt_size;
1963 stats->rx_pkts++;
1964 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1965 stats->rx_mcast_pkts++;
1966 if (rxcp->err)
1967 stats->rx_compl_err++;
1968 u64_stats_update_end(&stats->sync);
1969}
1970
1971static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1972{
1973
1974
1975
1976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1977 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1978}
1979
1980static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1981{
1982 struct be_adapter *adapter = rxo->adapter;
1983 struct be_rx_page_info *rx_page_info;
1984 struct be_queue_info *rxq = &rxo->q;
1985 u16 frag_idx = rxq->tail;
1986
1987 rx_page_info = &rxo->page_info_tbl[frag_idx];
1988 BUG_ON(!rx_page_info->page);
1989
1990 if (rx_page_info->last_frag) {
1991 dma_unmap_page(&adapter->pdev->dev,
1992 dma_unmap_addr(rx_page_info, bus),
1993 adapter->big_page_size, DMA_FROM_DEVICE);
1994 rx_page_info->last_frag = false;
1995 } else {
1996 dma_sync_single_for_cpu(&adapter->pdev->dev,
1997 dma_unmap_addr(rx_page_info, bus),
1998 rx_frag_size, DMA_FROM_DEVICE);
1999 }
2000
2001 queue_tail_inc(rxq);
2002 atomic_dec(&rxq->used);
2003 return rx_page_info;
2004}
2005
2006
2007static void be_rx_compl_discard(struct be_rx_obj *rxo,
2008 struct be_rx_compl_info *rxcp)
2009{
2010 struct be_rx_page_info *page_info;
2011 u16 i, num_rcvd = rxcp->num_rcvd;
2012
2013 for (i = 0; i < num_rcvd; i++) {
2014 page_info = get_rx_page_info(rxo);
2015 put_page(page_info->page);
2016 memset(page_info, 0, sizeof(*page_info));
2017 }
2018}
2019
2020
2021
2022
2023
2024static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2025 struct be_rx_compl_info *rxcp)
2026{
2027 struct be_rx_page_info *page_info;
2028 u16 i, j;
2029 u16 hdr_len, curr_frag_len, remaining;
2030 u8 *start;
2031
2032 page_info = get_rx_page_info(rxo);
2033 start = page_address(page_info->page) + page_info->page_offset;
2034 prefetch(start);
2035
2036
2037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
2038
2039 skb->len = curr_frag_len;
2040 if (curr_frag_len <= BE_HDR_LEN) {
2041 memcpy(skb->data, start, curr_frag_len);
2042
2043 put_page(page_info->page);
2044 skb->data_len = 0;
2045 skb->tail += curr_frag_len;
2046 } else {
2047 hdr_len = ETH_HLEN;
2048 memcpy(skb->data, start, hdr_len);
2049 skb_shinfo(skb)->nr_frags = 1;
2050 skb_frag_set_page(skb, 0, page_info->page);
2051 skb_shinfo(skb)->frags[0].page_offset =
2052 page_info->page_offset + hdr_len;
2053 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2054 curr_frag_len - hdr_len);
2055 skb->data_len = curr_frag_len - hdr_len;
2056 skb->truesize += rx_frag_size;
2057 skb->tail += hdr_len;
2058 }
2059 page_info->page = NULL;
2060
2061 if (rxcp->pkt_size <= rx_frag_size) {
2062 BUG_ON(rxcp->num_rcvd != 1);
2063 return;
2064 }
2065
2066
2067 remaining = rxcp->pkt_size - curr_frag_len;
2068 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
2069 page_info = get_rx_page_info(rxo);
2070 curr_frag_len = min(remaining, rx_frag_size);
2071
2072
2073 if (page_info->page_offset == 0) {
2074
2075 j++;
2076 skb_frag_set_page(skb, j, page_info->page);
2077 skb_shinfo(skb)->frags[j].page_offset =
2078 page_info->page_offset;
2079 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2080 skb_shinfo(skb)->nr_frags++;
2081 } else {
2082 put_page(page_info->page);
2083 }
2084
2085 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2086 skb->len += curr_frag_len;
2087 skb->data_len += curr_frag_len;
2088 skb->truesize += rx_frag_size;
2089 remaining -= curr_frag_len;
2090 page_info->page = NULL;
2091 }
2092 BUG_ON(j > MAX_SKB_FRAGS);
2093}
2094
2095
2096static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
2097 struct be_rx_compl_info *rxcp)
2098{
2099 struct be_adapter *adapter = rxo->adapter;
2100 struct net_device *netdev = adapter->netdev;
2101 struct sk_buff *skb;
2102
2103 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
2104 if (unlikely(!skb)) {
2105 rx_stats(rxo)->rx_drops_no_skbs++;
2106 be_rx_compl_discard(rxo, rxcp);
2107 return;
2108 }
2109
2110 skb_fill_rx_data(rxo, skb, rxcp);
2111
2112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
2113 skb->ip_summed = CHECKSUM_UNNECESSARY;
2114 else
2115 skb_checksum_none_assert(skb);
2116
2117 skb->protocol = eth_type_trans(skb, netdev);
2118 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2119 if (netdev->features & NETIF_F_RXHASH)
2120 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2121
2122 skb->csum_level = rxcp->tunneled;
2123 skb_mark_napi_id(skb, napi);
2124
2125 if (rxcp->vlanf)
2126 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2127
2128 netif_receive_skb(skb);
2129}
2130
2131
2132static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2133 struct napi_struct *napi,
2134 struct be_rx_compl_info *rxcp)
2135{
2136 struct be_adapter *adapter = rxo->adapter;
2137 struct be_rx_page_info *page_info;
2138 struct sk_buff *skb = NULL;
2139 u16 remaining, curr_frag_len;
2140 u16 i, j;
2141
2142 skb = napi_get_frags(napi);
2143 if (!skb) {
2144 be_rx_compl_discard(rxo, rxcp);
2145 return;
2146 }
2147
2148 remaining = rxcp->pkt_size;
2149 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
2150 page_info = get_rx_page_info(rxo);
2151
2152 curr_frag_len = min(remaining, rx_frag_size);
2153
2154
2155 if (i == 0 || page_info->page_offset == 0) {
2156
2157 j++;
2158 skb_frag_set_page(skb, j, page_info->page);
2159 skb_shinfo(skb)->frags[j].page_offset =
2160 page_info->page_offset;
2161 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2162 } else {
2163 put_page(page_info->page);
2164 }
2165 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2166 skb->truesize += rx_frag_size;
2167 remaining -= curr_frag_len;
2168 memset(page_info, 0, sizeof(*page_info));
2169 }
2170 BUG_ON(j > MAX_SKB_FRAGS);
2171
2172 skb_shinfo(skb)->nr_frags = j + 1;
2173 skb->len = rxcp->pkt_size;
2174 skb->data_len = rxcp->pkt_size;
2175 skb->ip_summed = CHECKSUM_UNNECESSARY;
2176 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2177 if (adapter->netdev->features & NETIF_F_RXHASH)
2178 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2179
2180 skb->csum_level = rxcp->tunneled;
2181 skb_mark_napi_id(skb, napi);
2182
2183 if (rxcp->vlanf)
2184 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2185
2186 napi_gro_frags(napi);
2187}
2188
2189static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2190 struct be_rx_compl_info *rxcp)
2191{
2192 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2193 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2194 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2195 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2196 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2197 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2198 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2199 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2200 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2201 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2202 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
2203 if (rxcp->vlanf) {
2204 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2205 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
2206 }
2207 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
2208 rxcp->tunneled =
2209 GET_RX_COMPL_V1_BITS(tunneled, compl);
2210}
2211
2212static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2213 struct be_rx_compl_info *rxcp)
2214{
2215 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2216 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2217 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2218 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2219 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2220 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2221 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2222 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2223 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2224 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2225 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
2226 if (rxcp->vlanf) {
2227 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2228 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
2229 }
2230 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2231 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2232}
2233
2234static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2235{
2236 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2237 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2238 struct be_adapter *adapter = rxo->adapter;
2239
2240
2241
2242 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2243 return NULL;
2244
2245 rmb();
2246 be_dws_le_to_cpu(compl, sizeof(*compl));
2247
2248 if (adapter->be3_native)
2249 be_parse_rx_compl_v1(compl, rxcp);
2250 else
2251 be_parse_rx_compl_v0(compl, rxcp);
2252
2253 if (rxcp->ip_frag)
2254 rxcp->l4_csum = 0;
2255
2256 if (rxcp->vlanf) {
2257
2258
2259
2260
2261 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
2262 rxcp->vlanf = 0;
2263
2264 if (!lancer_chip(adapter))
2265 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
2266
2267 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
2268 !test_bit(rxcp->vlan_tag, adapter->vids))
2269 rxcp->vlanf = 0;
2270 }
2271
2272
2273 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
2274
2275 queue_tail_inc(&rxo->cq);
2276 return rxcp;
2277}
2278
2279static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
2280{
2281 u32 order = get_order(size);
2282
2283 if (order > 0)
2284 gfp |= __GFP_COMP;
2285 return alloc_pages(gfp, order);
2286}
2287
2288
2289
2290
2291
2292static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2293{
2294 struct be_adapter *adapter = rxo->adapter;
2295 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
2296 struct be_queue_info *rxq = &rxo->q;
2297 struct page *pagep = NULL;
2298 struct device *dev = &adapter->pdev->dev;
2299 struct be_eth_rx_d *rxd;
2300 u64 page_dmaaddr = 0, frag_dmaaddr;
2301 u32 posted, page_offset = 0, notify = 0;
2302
2303 page_info = &rxo->page_info_tbl[rxq->head];
2304 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
2305 if (!pagep) {
2306 pagep = be_alloc_pages(adapter->big_page_size, gfp);
2307 if (unlikely(!pagep)) {
2308 rx_stats(rxo)->rx_post_fail++;
2309 break;
2310 }
2311 page_dmaaddr = dma_map_page(dev, pagep, 0,
2312 adapter->big_page_size,
2313 DMA_FROM_DEVICE);
2314 if (dma_mapping_error(dev, page_dmaaddr)) {
2315 put_page(pagep);
2316 pagep = NULL;
2317 adapter->drv_stats.dma_map_errors++;
2318 break;
2319 }
2320 page_offset = 0;
2321 } else {
2322 get_page(pagep);
2323 page_offset += rx_frag_size;
2324 }
2325 page_info->page_offset = page_offset;
2326 page_info->page = pagep;
2327
2328 rxd = queue_head_node(rxq);
2329 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
2330 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2331 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
2332
2333
2334 if ((page_offset + rx_frag_size + rx_frag_size) >
2335 adapter->big_page_size) {
2336 pagep = NULL;
2337 page_info->last_frag = true;
2338 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2339 } else {
2340 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
2341 }
2342
2343 prev_page_info = page_info;
2344 queue_head_inc(rxq);
2345 page_info = &rxo->page_info_tbl[rxq->head];
2346 }
2347
2348
2349
2350
2351 if (pagep) {
2352 prev_page_info->last_frag = true;
2353 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2354 }
2355
2356 if (posted) {
2357 atomic_add(posted, &rxq->used);
2358 if (rxo->rx_post_starved)
2359 rxo->rx_post_starved = false;
2360 do {
2361 notify = min(MAX_NUM_POST_ERX_DB, posted);
2362 be_rxq_notify(adapter, rxq->id, notify);
2363 posted -= notify;
2364 } while (posted);
2365 } else if (atomic_read(&rxq->used) == 0) {
2366
2367 rxo->rx_post_starved = true;
2368 }
2369}
2370
2371static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
2372{
2373 struct be_queue_info *tx_cq = &txo->cq;
2374 struct be_tx_compl_info *txcp = &txo->txcp;
2375 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
2376
2377 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2378 return NULL;
2379
2380
2381 rmb();
2382 be_dws_le_to_cpu(compl, sizeof(*compl));
2383
2384 txcp->status = GET_TX_COMPL_BITS(status, compl);
2385 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
2386
2387 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2388 queue_tail_inc(tx_cq);
2389 return txcp;
2390}
2391
2392static u16 be_tx_compl_process(struct be_adapter *adapter,
2393 struct be_tx_obj *txo, u16 last_index)
2394{
2395 struct sk_buff **sent_skbs = txo->sent_skb_list;
2396 struct be_queue_info *txq = &txo->q;
2397 u16 frag_index, num_wrbs = 0;
2398 struct sk_buff *skb = NULL;
2399 bool unmap_skb_hdr = false;
2400 struct be_eth_wrb *wrb;
2401
2402 do {
2403 if (sent_skbs[txq->tail]) {
2404
2405 if (skb)
2406 dev_consume_skb_any(skb);
2407 skb = sent_skbs[txq->tail];
2408 sent_skbs[txq->tail] = NULL;
2409 queue_tail_inc(txq);
2410 num_wrbs++;
2411 unmap_skb_hdr = true;
2412 }
2413 wrb = queue_tail_node(txq);
2414 frag_index = txq->tail;
2415 unmap_tx_frag(&adapter->pdev->dev, wrb,
2416 (unmap_skb_hdr && skb_headlen(skb)));
2417 unmap_skb_hdr = false;
2418 queue_tail_inc(txq);
2419 num_wrbs++;
2420 } while (frag_index != last_index);
2421 dev_consume_skb_any(skb);
2422
2423 return num_wrbs;
2424}
2425
2426
2427static inline int events_get(struct be_eq_obj *eqo)
2428{
2429 struct be_eq_entry *eqe;
2430 int num = 0;
2431
2432 do {
2433 eqe = queue_tail_node(&eqo->q);
2434 if (eqe->evt == 0)
2435 break;
2436
2437 rmb();
2438 eqe->evt = 0;
2439 num++;
2440 queue_tail_inc(&eqo->q);
2441 } while (true);
2442
2443 return num;
2444}
2445
2446
2447static void be_eq_clean(struct be_eq_obj *eqo)
2448{
2449 int num = events_get(eqo);
2450
2451 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2452}
2453
2454
2455static void be_rxq_clean(struct be_rx_obj *rxo)
2456{
2457 struct be_queue_info *rxq = &rxo->q;
2458 struct be_rx_page_info *page_info;
2459
2460 while (atomic_read(&rxq->used) > 0) {
2461 page_info = get_rx_page_info(rxo);
2462 put_page(page_info->page);
2463 memset(page_info, 0, sizeof(*page_info));
2464 }
2465 BUG_ON(atomic_read(&rxq->used));
2466 rxq->tail = 0;
2467 rxq->head = 0;
2468}
2469
2470static void be_rx_cq_clean(struct be_rx_obj *rxo)
2471{
2472 struct be_queue_info *rx_cq = &rxo->cq;
2473 struct be_rx_compl_info *rxcp;
2474 struct be_adapter *adapter = rxo->adapter;
2475 int flush_wait = 0;
2476
2477
2478
2479
2480
2481
2482
2483 for (;;) {
2484 rxcp = be_rx_compl_get(rxo);
2485 if (!rxcp) {
2486 if (lancer_chip(adapter))
2487 break;
2488
2489 if (flush_wait++ > 50 ||
2490 be_check_error(adapter,
2491 BE_ERROR_HW)) {
2492 dev_warn(&adapter->pdev->dev,
2493 "did not receive flush compl\n");
2494 break;
2495 }
2496 be_cq_notify(adapter, rx_cq->id, true, 0);
2497 mdelay(1);
2498 } else {
2499 be_rx_compl_discard(rxo, rxcp);
2500 be_cq_notify(adapter, rx_cq->id, false, 1);
2501 if (rxcp->num_rcvd == 0)
2502 break;
2503 }
2504 }
2505
2506
2507 be_cq_notify(adapter, rx_cq->id, false, 0);
2508}
2509
2510static void be_tx_compl_clean(struct be_adapter *adapter)
2511{
2512 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2513 struct device *dev = &adapter->pdev->dev;
2514 struct be_tx_compl_info *txcp;
2515 struct be_queue_info *txq;
2516 struct be_tx_obj *txo;
2517 int i, pending_txqs;
2518
2519
2520 do {
2521 pending_txqs = adapter->num_tx_qs;
2522
2523 for_all_tx_queues(adapter, txo, i) {
2524 cmpl = 0;
2525 num_wrbs = 0;
2526 txq = &txo->q;
2527 while ((txcp = be_tx_compl_get(txo))) {
2528 num_wrbs +=
2529 be_tx_compl_process(adapter, txo,
2530 txcp->end_index);
2531 cmpl++;
2532 }
2533 if (cmpl) {
2534 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2535 atomic_sub(num_wrbs, &txq->used);
2536 timeo = 0;
2537 }
2538 if (!be_is_tx_compl_pending(txo))
2539 pending_txqs--;
2540 }
2541
2542 if (pending_txqs == 0 || ++timeo > 10 ||
2543 be_check_error(adapter, BE_ERROR_HW))
2544 break;
2545
2546 mdelay(1);
2547 } while (true);
2548
2549
2550 for_all_tx_queues(adapter, txo, i) {
2551 txq = &txo->q;
2552
2553 if (atomic_read(&txq->used)) {
2554 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2555 i, atomic_read(&txq->used));
2556 notified_idx = txq->tail;
2557 end_idx = txq->tail;
2558 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2559 txq->len);
2560
2561
2562
2563 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2564 atomic_sub(num_wrbs, &txq->used);
2565 BUG_ON(atomic_read(&txq->used));
2566 txo->pend_wrb_cnt = 0;
2567
2568
2569
2570 txq->head = notified_idx;
2571 txq->tail = notified_idx;
2572 }
2573 }
2574}
2575
2576static void be_evt_queues_destroy(struct be_adapter *adapter)
2577{
2578 struct be_eq_obj *eqo;
2579 int i;
2580
2581 for_all_evt_queues(adapter, eqo, i) {
2582 if (eqo->q.created) {
2583 be_eq_clean(eqo);
2584 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2585 napi_hash_del(&eqo->napi);
2586 netif_napi_del(&eqo->napi);
2587 free_cpumask_var(eqo->affinity_mask);
2588 }
2589 be_queue_free(adapter, &eqo->q);
2590 }
2591}
2592
2593static int be_evt_queues_create(struct be_adapter *adapter)
2594{
2595 struct be_queue_info *eq;
2596 struct be_eq_obj *eqo;
2597 struct be_aic_obj *aic;
2598 int i, rc;
2599
2600 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2601 adapter->cfg_num_qs);
2602
2603 for_all_evt_queues(adapter, eqo, i) {
2604 int numa_node = dev_to_node(&adapter->pdev->dev);
2605
2606 aic = &adapter->aic_obj[i];
2607 eqo->adapter = adapter;
2608 eqo->idx = i;
2609 aic->max_eqd = BE_MAX_EQD;
2610 aic->enable = true;
2611
2612 eq = &eqo->q;
2613 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2614 sizeof(struct be_eq_entry));
2615 if (rc)
2616 return rc;
2617
2618 rc = be_cmd_eq_create(adapter, eqo);
2619 if (rc)
2620 return rc;
2621
2622 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2623 return -ENOMEM;
2624 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2625 eqo->affinity_mask);
2626 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2627 BE_NAPI_WEIGHT);
2628 napi_hash_add(&eqo->napi);
2629 }
2630 return 0;
2631}
2632
2633static void be_mcc_queues_destroy(struct be_adapter *adapter)
2634{
2635 struct be_queue_info *q;
2636
2637 q = &adapter->mcc_obj.q;
2638 if (q->created)
2639 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2640 be_queue_free(adapter, q);
2641
2642 q = &adapter->mcc_obj.cq;
2643 if (q->created)
2644 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2645 be_queue_free(adapter, q);
2646}
2647
2648
2649static int be_mcc_queues_create(struct be_adapter *adapter)
2650{
2651 struct be_queue_info *q, *cq;
2652
2653 cq = &adapter->mcc_obj.cq;
2654 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2655 sizeof(struct be_mcc_compl)))
2656 goto err;
2657
2658
2659 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2660 goto mcc_cq_free;
2661
2662 q = &adapter->mcc_obj.q;
2663 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2664 goto mcc_cq_destroy;
2665
2666 if (be_cmd_mccq_create(adapter, q, cq))
2667 goto mcc_q_free;
2668
2669 return 0;
2670
2671mcc_q_free:
2672 be_queue_free(adapter, q);
2673mcc_cq_destroy:
2674 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2675mcc_cq_free:
2676 be_queue_free(adapter, cq);
2677err:
2678 return -1;
2679}
2680
2681static void be_tx_queues_destroy(struct be_adapter *adapter)
2682{
2683 struct be_queue_info *q;
2684 struct be_tx_obj *txo;
2685 u8 i;
2686
2687 for_all_tx_queues(adapter, txo, i) {
2688 q = &txo->q;
2689 if (q->created)
2690 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2691 be_queue_free(adapter, q);
2692
2693 q = &txo->cq;
2694 if (q->created)
2695 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2696 be_queue_free(adapter, q);
2697 }
2698}
2699
2700static int be_tx_qs_create(struct be_adapter *adapter)
2701{
2702 struct be_queue_info *cq;
2703 struct be_tx_obj *txo;
2704 struct be_eq_obj *eqo;
2705 int status, i;
2706
2707 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2708
2709 for_all_tx_queues(adapter, txo, i) {
2710 cq = &txo->cq;
2711 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2712 sizeof(struct be_eth_tx_compl));
2713 if (status)
2714 return status;
2715
2716 u64_stats_init(&txo->stats.sync);
2717 u64_stats_init(&txo->stats.sync_compl);
2718
2719
2720
2721
2722 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2723 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
2724 if (status)
2725 return status;
2726
2727 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2728 sizeof(struct be_eth_wrb));
2729 if (status)
2730 return status;
2731
2732 status = be_cmd_txq_create(adapter, txo);
2733 if (status)
2734 return status;
2735
2736 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2737 eqo->idx);
2738 }
2739
2740 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2741 adapter->num_tx_qs);
2742 return 0;
2743}
2744
2745static void be_rx_cqs_destroy(struct be_adapter *adapter)
2746{
2747 struct be_queue_info *q;
2748 struct be_rx_obj *rxo;
2749 int i;
2750
2751 for_all_rx_queues(adapter, rxo, i) {
2752 q = &rxo->cq;
2753 if (q->created)
2754 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2755 be_queue_free(adapter, q);
2756 }
2757}
2758
2759static int be_rx_cqs_create(struct be_adapter *adapter)
2760{
2761 struct be_queue_info *eq, *cq;
2762 struct be_rx_obj *rxo;
2763 int rc, i;
2764
2765
2766 adapter->num_rss_qs = adapter->num_evt_qs;
2767
2768
2769 if (adapter->num_rss_qs <= 1)
2770 adapter->num_rss_qs = 0;
2771
2772 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2773
2774
2775
2776
2777 if (adapter->num_rx_qs == 0)
2778 adapter->num_rx_qs = 1;
2779
2780 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2781 for_all_rx_queues(adapter, rxo, i) {
2782 rxo->adapter = adapter;
2783 cq = &rxo->cq;
2784 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2785 sizeof(struct be_eth_rx_compl));
2786 if (rc)
2787 return rc;
2788
2789 u64_stats_init(&rxo->stats.sync);
2790 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2791 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2792 if (rc)
2793 return rc;
2794 }
2795
2796 dev_info(&adapter->pdev->dev,
2797 "created %d RX queue(s)\n", adapter->num_rx_qs);
2798 return 0;
2799}
2800
2801static irqreturn_t be_intx(int irq, void *dev)
2802{
2803 struct be_eq_obj *eqo = dev;
2804 struct be_adapter *adapter = eqo->adapter;
2805 int num_evts = 0;
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815 if (napi_schedule_prep(&eqo->napi)) {
2816 num_evts = events_get(eqo);
2817 __napi_schedule(&eqo->napi);
2818 if (num_evts)
2819 eqo->spurious_intr = 0;
2820 }
2821 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
2822
2823
2824
2825
2826
2827 if (num_evts || eqo->spurious_intr++ == 0)
2828 return IRQ_HANDLED;
2829 else
2830 return IRQ_NONE;
2831}
2832
2833static irqreturn_t be_msix(int irq, void *dev)
2834{
2835 struct be_eq_obj *eqo = dev;
2836
2837 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
2838 napi_schedule(&eqo->napi);
2839 return IRQ_HANDLED;
2840}
2841
2842static inline bool do_gro(struct be_rx_compl_info *rxcp)
2843{
2844 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2845}
2846
2847static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2848 int budget, int polling)
2849{
2850 struct be_adapter *adapter = rxo->adapter;
2851 struct be_queue_info *rx_cq = &rxo->cq;
2852 struct be_rx_compl_info *rxcp;
2853 u32 work_done;
2854 u32 frags_consumed = 0;
2855
2856 for (work_done = 0; work_done < budget; work_done++) {
2857 rxcp = be_rx_compl_get(rxo);
2858 if (!rxcp)
2859 break;
2860
2861
2862 if (unlikely(rxcp->num_rcvd == 0))
2863 goto loop_continue;
2864
2865
2866 if (unlikely(!rxcp->pkt_size)) {
2867 be_rx_compl_discard(rxo, rxcp);
2868 goto loop_continue;
2869 }
2870
2871
2872
2873
2874 if (unlikely(rxcp->port != adapter->port_num &&
2875 !lancer_chip(adapter))) {
2876 be_rx_compl_discard(rxo, rxcp);
2877 goto loop_continue;
2878 }
2879
2880
2881 if (do_gro(rxcp) && polling != BUSY_POLLING)
2882 be_rx_compl_process_gro(rxo, napi, rxcp);
2883 else
2884 be_rx_compl_process(rxo, napi, rxcp);
2885
2886loop_continue:
2887 frags_consumed += rxcp->num_rcvd;
2888 be_rx_stats_update(rxo, rxcp);
2889 }
2890
2891 if (work_done) {
2892 be_cq_notify(adapter, rx_cq->id, true, work_done);
2893
2894
2895
2896
2897 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2898 !rxo->rx_post_starved)
2899 be_post_rx_frags(rxo, GFP_ATOMIC,
2900 max_t(u32, MAX_RX_POST,
2901 frags_consumed));
2902 }
2903
2904 return work_done;
2905}
2906
2907static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2908{
2909 switch (status) {
2910 case BE_TX_COMP_HDR_PARSE_ERR:
2911 tx_stats(txo)->tx_hdr_parse_err++;
2912 break;
2913 case BE_TX_COMP_NDMA_ERR:
2914 tx_stats(txo)->tx_dma_err++;
2915 break;
2916 case BE_TX_COMP_ACL_ERR:
2917 tx_stats(txo)->tx_spoof_check_err++;
2918 break;
2919 }
2920}
2921
2922static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2923{
2924 switch (status) {
2925 case LANCER_TX_COMP_LSO_ERR:
2926 tx_stats(txo)->tx_tso_err++;
2927 break;
2928 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2929 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2930 tx_stats(txo)->tx_spoof_check_err++;
2931 break;
2932 case LANCER_TX_COMP_QINQ_ERR:
2933 tx_stats(txo)->tx_qinq_err++;
2934 break;
2935 case LANCER_TX_COMP_PARITY_ERR:
2936 tx_stats(txo)->tx_internal_parity_err++;
2937 break;
2938 case LANCER_TX_COMP_DMA_ERR:
2939 tx_stats(txo)->tx_dma_err++;
2940 break;
2941 }
2942}
2943
2944static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2945 int idx)
2946{
2947 int num_wrbs = 0, work_done = 0;
2948 struct be_tx_compl_info *txcp;
2949
2950 while ((txcp = be_tx_compl_get(txo))) {
2951 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
2952 work_done++;
2953
2954 if (txcp->status) {
2955 if (lancer_chip(adapter))
2956 lancer_update_tx_err(txo, txcp->status);
2957 else
2958 be_update_tx_err(txo, txcp->status);
2959 }
2960 }
2961
2962 if (work_done) {
2963 be_cq_notify(adapter, txo->cq.id, true, work_done);
2964 atomic_sub(num_wrbs, &txo->q.used);
2965
2966
2967
2968 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2969 be_can_txq_wake(txo)) {
2970 netif_wake_subqueue(adapter->netdev, idx);
2971 }
2972
2973 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2974 tx_stats(txo)->tx_compl += work_done;
2975 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2976 }
2977}
2978
2979#ifdef CONFIG_NET_RX_BUSY_POLL
2980static inline bool be_lock_napi(struct be_eq_obj *eqo)
2981{
2982 bool status = true;
2983
2984 spin_lock(&eqo->lock);
2985 if (eqo->state & BE_EQ_LOCKED) {
2986 WARN_ON(eqo->state & BE_EQ_NAPI);
2987 eqo->state |= BE_EQ_NAPI_YIELD;
2988 status = false;
2989 } else {
2990 eqo->state = BE_EQ_NAPI;
2991 }
2992 spin_unlock(&eqo->lock);
2993 return status;
2994}
2995
2996static inline void be_unlock_napi(struct be_eq_obj *eqo)
2997{
2998 spin_lock(&eqo->lock);
2999
3000 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3001 eqo->state = BE_EQ_IDLE;
3002
3003 spin_unlock(&eqo->lock);
3004}
3005
3006static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3007{
3008 bool status = true;
3009
3010 spin_lock_bh(&eqo->lock);
3011 if (eqo->state & BE_EQ_LOCKED) {
3012 eqo->state |= BE_EQ_POLL_YIELD;
3013 status = false;
3014 } else {
3015 eqo->state |= BE_EQ_POLL;
3016 }
3017 spin_unlock_bh(&eqo->lock);
3018 return status;
3019}
3020
3021static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3022{
3023 spin_lock_bh(&eqo->lock);
3024
3025 WARN_ON(eqo->state & (BE_EQ_NAPI));
3026 eqo->state = BE_EQ_IDLE;
3027
3028 spin_unlock_bh(&eqo->lock);
3029}
3030
3031static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3032{
3033 spin_lock_init(&eqo->lock);
3034 eqo->state = BE_EQ_IDLE;
3035}
3036
3037static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3038{
3039 local_bh_disable();
3040
3041
3042
3043
3044 while (!be_lock_napi(eqo))
3045 mdelay(1);
3046
3047 local_bh_enable();
3048}
3049
3050#else
3051
3052static inline bool be_lock_napi(struct be_eq_obj *eqo)
3053{
3054 return true;
3055}
3056
3057static inline void be_unlock_napi(struct be_eq_obj *eqo)
3058{
3059}
3060
3061static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3062{
3063 return false;
3064}
3065
3066static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3067{
3068}
3069
3070static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3071{
3072}
3073
3074static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3075{
3076}
3077#endif
3078
3079int be_poll(struct napi_struct *napi, int budget)
3080{
3081 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3082 struct be_adapter *adapter = eqo->adapter;
3083 int max_work = 0, work, i, num_evts;
3084 struct be_rx_obj *rxo;
3085 struct be_tx_obj *txo;
3086 u32 mult_enc = 0;
3087
3088 num_evts = events_get(eqo);
3089
3090 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3091 be_process_tx(adapter, txo, i);
3092
3093 if (be_lock_napi(eqo)) {
3094
3095
3096
3097
3098 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3099 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3100 max_work = max(work, max_work);
3101 }
3102 be_unlock_napi(eqo);
3103 } else {
3104 max_work = budget;
3105 }
3106
3107 if (is_mcc_eqo(eqo))
3108 be_process_mcc(adapter);
3109
3110 if (max_work < budget) {
3111 napi_complete(napi);
3112
3113
3114
3115
3116 if (skyhawk_chip(adapter))
3117 mult_enc = be_get_eq_delay_mult_enc(eqo);
3118
3119 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3120 mult_enc);
3121 } else {
3122
3123 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
3124 }
3125 return max_work;
3126}
3127
3128#ifdef CONFIG_NET_RX_BUSY_POLL
3129static int be_busy_poll(struct napi_struct *napi)
3130{
3131 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3132 struct be_adapter *adapter = eqo->adapter;
3133 struct be_rx_obj *rxo;
3134 int i, work = 0;
3135
3136 if (!be_lock_busy_poll(eqo))
3137 return LL_FLUSH_BUSY;
3138
3139 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3140 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3141 if (work)
3142 break;
3143 }
3144
3145 be_unlock_busy_poll(eqo);
3146 return work;
3147}
3148#endif
3149
3150void be_detect_error(struct be_adapter *adapter)
3151{
3152 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3153 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
3154 u32 i;
3155 struct device *dev = &adapter->pdev->dev;
3156
3157 if (be_check_error(adapter, BE_ERROR_HW))
3158 return;
3159
3160 if (lancer_chip(adapter)) {
3161 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3162 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3163 be_set_error(adapter, BE_ERROR_UE);
3164 sliport_err1 = ioread32(adapter->db +
3165 SLIPORT_ERROR1_OFFSET);
3166 sliport_err2 = ioread32(adapter->db +
3167 SLIPORT_ERROR2_OFFSET);
3168
3169 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3170 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3171 dev_info(dev, "Firmware update in progress\n");
3172 } else {
3173 dev_err(dev, "Error detected in the card\n");
3174 dev_err(dev, "ERR: sliport status 0x%x\n",
3175 sliport_status);
3176 dev_err(dev, "ERR: sliport error1 0x%x\n",
3177 sliport_err1);
3178 dev_err(dev, "ERR: sliport error2 0x%x\n",
3179 sliport_err2);
3180 }
3181 }
3182 } else {
3183 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3184 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3185 ue_lo_mask = ioread32(adapter->pcicfg +
3186 PCICFG_UE_STATUS_LOW_MASK);
3187 ue_hi_mask = ioread32(adapter->pcicfg +
3188 PCICFG_UE_STATUS_HI_MASK);
3189
3190 ue_lo = (ue_lo & ~ue_lo_mask);
3191 ue_hi = (ue_hi & ~ue_hi_mask);
3192
3193
3194
3195
3196
3197
3198 if (ue_lo || ue_hi) {
3199 dev_err(dev,
3200 "Unrecoverable Error detected in the adapter");
3201 dev_err(dev, "Please reboot server to recover");
3202 if (skyhawk_chip(adapter))
3203 be_set_error(adapter, BE_ERROR_UE);
3204
3205 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3206 if (ue_lo & 1)
3207 dev_err(dev, "UE: %s bit set\n",
3208 ue_status_low_desc[i]);
3209 }
3210 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3211 if (ue_hi & 1)
3212 dev_err(dev, "UE: %s bit set\n",
3213 ue_status_hi_desc[i]);
3214 }
3215 }
3216 }
3217}
3218
3219static void be_msix_disable(struct be_adapter *adapter)
3220{
3221 if (msix_enabled(adapter)) {
3222 pci_disable_msix(adapter->pdev);
3223 adapter->num_msix_vec = 0;
3224 adapter->num_msix_roce_vec = 0;
3225 }
3226}
3227
3228static int be_msix_enable(struct be_adapter *adapter)
3229{
3230 int i, num_vec;
3231 struct device *dev = &adapter->pdev->dev;
3232
3233
3234
3235
3236
3237 if (be_roce_supported(adapter))
3238 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3239 2 * num_online_cpus());
3240 else
3241 num_vec = adapter->cfg_num_qs;
3242
3243 for (i = 0; i < num_vec; i++)
3244 adapter->msix_entries[i].entry = i;
3245
3246 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3247 MIN_MSIX_VECTORS, num_vec);
3248 if (num_vec < 0)
3249 goto fail;
3250
3251 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3252 adapter->num_msix_roce_vec = num_vec / 2;
3253 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3254 adapter->num_msix_roce_vec);
3255 }
3256
3257 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3258
3259 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3260 adapter->num_msix_vec);
3261 return 0;
3262
3263fail:
3264 dev_warn(dev, "MSIx enable failed\n");
3265
3266
3267 if (be_virtfn(adapter))
3268 return num_vec;
3269 return 0;
3270}
3271
3272static inline int be_msix_vec_get(struct be_adapter *adapter,
3273 struct be_eq_obj *eqo)
3274{
3275 return adapter->msix_entries[eqo->msix_idx].vector;
3276}
3277
3278static int be_msix_register(struct be_adapter *adapter)
3279{
3280 struct net_device *netdev = adapter->netdev;
3281 struct be_eq_obj *eqo;
3282 int status, i, vec;
3283
3284 for_all_evt_queues(adapter, eqo, i) {
3285 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3286 vec = be_msix_vec_get(adapter, eqo);
3287 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3288 if (status)
3289 goto err_msix;
3290
3291 irq_set_affinity_hint(vec, eqo->affinity_mask);
3292 }
3293
3294 return 0;
3295err_msix:
3296 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3297 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3298 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
3299 status);
3300 be_msix_disable(adapter);
3301 return status;
3302}
3303
3304static int be_irq_register(struct be_adapter *adapter)
3305{
3306 struct net_device *netdev = adapter->netdev;
3307 int status;
3308
3309 if (msix_enabled(adapter)) {
3310 status = be_msix_register(adapter);
3311 if (status == 0)
3312 goto done;
3313
3314 if (be_virtfn(adapter))
3315 return status;
3316 }
3317
3318
3319 netdev->irq = adapter->pdev->irq;
3320 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
3321 &adapter->eq_obj[0]);
3322 if (status) {
3323 dev_err(&adapter->pdev->dev,
3324 "INTx request IRQ failed - err %d\n", status);
3325 return status;
3326 }
3327done:
3328 adapter->isr_registered = true;
3329 return 0;
3330}
3331
3332static void be_irq_unregister(struct be_adapter *adapter)
3333{
3334 struct net_device *netdev = adapter->netdev;
3335 struct be_eq_obj *eqo;
3336 int i, vec;
3337
3338 if (!adapter->isr_registered)
3339 return;
3340
3341
3342 if (!msix_enabled(adapter)) {
3343 free_irq(netdev->irq, &adapter->eq_obj[0]);
3344 goto done;
3345 }
3346
3347
3348 for_all_evt_queues(adapter, eqo, i) {
3349 vec = be_msix_vec_get(adapter, eqo);
3350 irq_set_affinity_hint(vec, NULL);
3351 free_irq(vec, eqo);
3352 }
3353
3354done:
3355 adapter->isr_registered = false;
3356}
3357
3358static void be_rx_qs_destroy(struct be_adapter *adapter)
3359{
3360 struct be_queue_info *q;
3361 struct be_rx_obj *rxo;
3362 int i;
3363
3364 for_all_rx_queues(adapter, rxo, i) {
3365 q = &rxo->q;
3366 if (q->created) {
3367
3368
3369
3370
3371
3372
3373 if (lancer_chip(adapter)) {
3374 be_rx_cq_clean(rxo);
3375 if (atomic_read(&q->used) == 0)
3376 be_post_rx_frags(rxo, GFP_KERNEL,
3377 MAX_RX_POST);
3378 }
3379
3380 be_cmd_rxq_destroy(adapter, q);
3381 be_rx_cq_clean(rxo);
3382 be_rxq_clean(rxo);
3383 }
3384 be_queue_free(adapter, q);
3385 }
3386}
3387
3388static void be_disable_if_filters(struct be_adapter *adapter)
3389{
3390 be_cmd_pmac_del(adapter, adapter->if_handle,
3391 adapter->pmac_id[0], 0);
3392
3393 be_clear_uc_list(adapter);
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409 if (lancer_chip(adapter)) {
3410 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3411 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3412 }
3413}
3414
3415static int be_close(struct net_device *netdev)
3416{
3417 struct be_adapter *adapter = netdev_priv(netdev);
3418 struct be_eq_obj *eqo;
3419 int i;
3420
3421
3422
3423
3424 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3425 return 0;
3426
3427 be_disable_if_filters(adapter);
3428
3429 be_roce_dev_close(adapter);
3430
3431 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3432 for_all_evt_queues(adapter, eqo, i) {
3433 napi_disable(&eqo->napi);
3434 be_disable_busy_poll(eqo);
3435 }
3436 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3437 }
3438
3439 be_async_mcc_disable(adapter);
3440
3441
3442
3443
3444 netif_tx_disable(netdev);
3445 be_tx_compl_clean(adapter);
3446
3447 be_rx_qs_destroy(adapter);
3448
3449 for_all_evt_queues(adapter, eqo, i) {
3450 if (msix_enabled(adapter))
3451 synchronize_irq(be_msix_vec_get(adapter, eqo));
3452 else
3453 synchronize_irq(netdev->irq);
3454 be_eq_clean(eqo);
3455 }
3456
3457 be_irq_unregister(adapter);
3458
3459 return 0;
3460}
3461
3462static int be_rx_qs_create(struct be_adapter *adapter)
3463{
3464 struct rss_info *rss = &adapter->rss_info;
3465 u8 rss_key[RSS_HASH_KEY_LEN];
3466 struct be_rx_obj *rxo;
3467 int rc, i, j;
3468
3469 for_all_rx_queues(adapter, rxo, i) {
3470 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3471 sizeof(struct be_eth_rx_d));
3472 if (rc)
3473 return rc;
3474 }
3475
3476 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3477 rxo = default_rxo(adapter);
3478 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3479 rx_frag_size, adapter->if_handle,
3480 false, &rxo->rss_id);
3481 if (rc)
3482 return rc;
3483 }
3484
3485 for_all_rss_queues(adapter, rxo, i) {
3486 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3487 rx_frag_size, adapter->if_handle,
3488 true, &rxo->rss_id);
3489 if (rc)
3490 return rc;
3491 }
3492
3493 if (be_multi_rxq(adapter)) {
3494 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
3495 for_all_rss_queues(adapter, rxo, i) {
3496 if ((j + i) >= RSS_INDIR_TABLE_LEN)
3497 break;
3498 rss->rsstable[j + i] = rxo->rss_id;
3499 rss->rss_queue[j + i] = i;
3500 }
3501 }
3502 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3503 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3504
3505 if (!BEx_chip(adapter))
3506 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3507 RSS_ENABLE_UDP_IPV6;
3508 } else {
3509
3510 rss->rss_flags = RSS_ENABLE_NONE;
3511 }
3512
3513 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3514 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3515 128, rss_key);
3516 if (rc) {
3517 rss->rss_flags = RSS_ENABLE_NONE;
3518 return rc;
3519 }
3520
3521 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3522
3523
3524
3525
3526 for_all_rx_queues(adapter, rxo, i)
3527 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3528
3529 return 0;
3530}
3531
3532static int be_enable_if_filters(struct be_adapter *adapter)
3533{
3534 int status;
3535
3536 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3537 if (status)
3538 return status;
3539
3540
3541 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3542 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3543 adapter->if_handle,
3544 &adapter->pmac_id[0], 0);
3545 if (status)
3546 return status;
3547 }
3548
3549 if (adapter->vlans_added)
3550 be_vid_config(adapter);
3551
3552 be_set_rx_mode(adapter->netdev);
3553
3554 return 0;
3555}
3556
3557static int be_open(struct net_device *netdev)
3558{
3559 struct be_adapter *adapter = netdev_priv(netdev);
3560 struct be_eq_obj *eqo;
3561 struct be_rx_obj *rxo;
3562 struct be_tx_obj *txo;
3563 u8 link_status;
3564 int status, i;
3565
3566 status = be_rx_qs_create(adapter);
3567 if (status)
3568 goto err;
3569
3570 status = be_enable_if_filters(adapter);
3571 if (status)
3572 goto err;
3573
3574 status = be_irq_register(adapter);
3575 if (status)
3576 goto err;
3577
3578 for_all_rx_queues(adapter, rxo, i)
3579 be_cq_notify(adapter, rxo->cq.id, true, 0);
3580
3581 for_all_tx_queues(adapter, txo, i)
3582 be_cq_notify(adapter, txo->cq.id, true, 0);
3583
3584 be_async_mcc_enable(adapter);
3585
3586 for_all_evt_queues(adapter, eqo, i) {
3587 napi_enable(&eqo->napi);
3588 be_enable_busy_poll(eqo);
3589 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
3590 }
3591 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3592
3593 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3594 if (!status)
3595 be_link_status_update(adapter, link_status);
3596
3597 netif_tx_start_all_queues(netdev);
3598 be_roce_dev_open(adapter);
3599
3600#ifdef CONFIG_BE2NET_VXLAN
3601 if (skyhawk_chip(adapter))
3602 vxlan_get_rx_port(netdev);
3603#endif
3604
3605 return 0;
3606err:
3607 be_close(adapter->netdev);
3608 return -EIO;
3609}
3610
3611static int be_setup_wol(struct be_adapter *adapter, bool enable)
3612{
3613 struct be_dma_mem cmd;
3614 int status = 0;
3615 u8 mac[ETH_ALEN];
3616
3617 eth_zero_addr(mac);
3618
3619 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
3620 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3621 GFP_KERNEL);
3622 if (!cmd.va)
3623 return -ENOMEM;
3624
3625 if (enable) {
3626 status = pci_write_config_dword(adapter->pdev,
3627 PCICFG_PM_CONTROL_OFFSET,
3628 PCICFG_PM_CONTROL_MASK);
3629 if (status) {
3630 dev_err(&adapter->pdev->dev,
3631 "Could not enable Wake-on-lan\n");
3632 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3633 cmd.dma);
3634 return status;
3635 }
3636 status = be_cmd_enable_magic_wol(adapter,
3637 adapter->netdev->dev_addr,
3638 &cmd);
3639 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3640 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3641 } else {
3642 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3643 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3644 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3645 }
3646
3647 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3648 return status;
3649}
3650
3651static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3652{
3653 u32 addr;
3654
3655 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3656
3657 mac[5] = (u8)(addr & 0xFF);
3658 mac[4] = (u8)((addr >> 8) & 0xFF);
3659 mac[3] = (u8)((addr >> 16) & 0xFF);
3660
3661 memcpy(mac, adapter->netdev->dev_addr, 3);
3662}
3663
3664
3665
3666
3667
3668
3669
3670static int be_vf_eth_addr_config(struct be_adapter *adapter)
3671{
3672 u32 vf;
3673 int status = 0;
3674 u8 mac[ETH_ALEN];
3675 struct be_vf_cfg *vf_cfg;
3676
3677 be_vf_eth_addr_generate(adapter, mac);
3678
3679 for_all_vfs(adapter, vf_cfg, vf) {
3680 if (BEx_chip(adapter))
3681 status = be_cmd_pmac_add(adapter, mac,
3682 vf_cfg->if_handle,
3683 &vf_cfg->pmac_id, vf + 1);
3684 else
3685 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3686 vf + 1);
3687
3688 if (status)
3689 dev_err(&adapter->pdev->dev,
3690 "Mac address assignment failed for VF %d\n",
3691 vf);
3692 else
3693 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3694
3695 mac[5] += 1;
3696 }
3697 return status;
3698}
3699
3700static int be_vfs_mac_query(struct be_adapter *adapter)
3701{
3702 int status, vf;
3703 u8 mac[ETH_ALEN];
3704 struct be_vf_cfg *vf_cfg;
3705
3706 for_all_vfs(adapter, vf_cfg, vf) {
3707 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3708 mac, vf_cfg->if_handle,
3709 false, vf+1);
3710 if (status)
3711 return status;
3712 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3713 }
3714 return 0;
3715}
3716
3717static void be_vf_clear(struct be_adapter *adapter)
3718{
3719 struct be_vf_cfg *vf_cfg;
3720 u32 vf;
3721
3722 if (pci_vfs_assigned(adapter->pdev)) {
3723 dev_warn(&adapter->pdev->dev,
3724 "VFs are assigned to VMs: not disabling VFs\n");
3725 goto done;
3726 }
3727
3728 pci_disable_sriov(adapter->pdev);
3729
3730 for_all_vfs(adapter, vf_cfg, vf) {
3731 if (BEx_chip(adapter))
3732 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3733 vf_cfg->pmac_id, vf + 1);
3734 else
3735 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3736 vf + 1);
3737
3738 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3739 }
3740done:
3741 kfree(adapter->vf_cfg);
3742 adapter->num_vfs = 0;
3743 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3744}
3745
3746static void be_clear_queues(struct be_adapter *adapter)
3747{
3748 be_mcc_queues_destroy(adapter);
3749 be_rx_cqs_destroy(adapter);
3750 be_tx_queues_destroy(adapter);
3751 be_evt_queues_destroy(adapter);
3752}
3753
3754static void be_cancel_worker(struct be_adapter *adapter)
3755{
3756 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3757 cancel_delayed_work_sync(&adapter->work);
3758 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3759 }
3760}
3761
3762static void be_cancel_err_detection(struct be_adapter *adapter)
3763{
3764 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3765 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3766 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3767 }
3768}
3769
3770#ifdef CONFIG_BE2NET_VXLAN
3771static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3772{
3773 struct net_device *netdev = adapter->netdev;
3774
3775 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3776 be_cmd_manage_iface(adapter, adapter->if_handle,
3777 OP_CONVERT_TUNNEL_TO_NORMAL);
3778
3779 if (adapter->vxlan_port)
3780 be_cmd_set_vxlan_port(adapter, 0);
3781
3782 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3783 adapter->vxlan_port = 0;
3784
3785 netdev->hw_enc_features = 0;
3786 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3787 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
3788}
3789#endif
3790
3791static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3792{
3793 struct be_resources res = adapter->pool_res;
3794 u16 num_vf_qs = 1;
3795
3796
3797
3798
3799 if (num_vfs && !be_is_mc(adapter)) {
3800
3801
3802
3803
3804 if (num_vfs < (be_max_vfs(adapter) - 8))
3805 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3806 else
3807 num_vf_qs = res.max_rss_qs / num_vfs;
3808
3809
3810
3811
3812
3813 if (num_vfs >= MAX_RSS_IFACES)
3814 num_vf_qs = 1;
3815 }
3816 return num_vf_qs;
3817}
3818
3819static int be_clear(struct be_adapter *adapter)
3820{
3821 struct pci_dev *pdev = adapter->pdev;
3822 u16 num_vf_qs;
3823
3824 be_cancel_worker(adapter);
3825
3826 if (sriov_enabled(adapter))
3827 be_vf_clear(adapter);
3828
3829
3830
3831
3832 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3833 !pci_vfs_assigned(pdev)) {
3834 num_vf_qs = be_calculate_vf_qs(adapter,
3835 pci_sriov_get_totalvfs(pdev));
3836 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3837 pci_sriov_get_totalvfs(pdev),
3838 num_vf_qs);
3839 }
3840
3841#ifdef CONFIG_BE2NET_VXLAN
3842 be_disable_vxlan_offloads(adapter);
3843#endif
3844 kfree(adapter->pmac_id);
3845 adapter->pmac_id = NULL;
3846
3847 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3848
3849 be_clear_queues(adapter);
3850
3851 be_msix_disable(adapter);
3852 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3853 return 0;
3854}
3855
3856static int be_vfs_if_create(struct be_adapter *adapter)
3857{
3858 struct be_resources res = {0};
3859 u32 cap_flags, en_flags, vf;
3860 struct be_vf_cfg *vf_cfg;
3861 int status;
3862
3863
3864 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3865 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3866
3867 for_all_vfs(adapter, vf_cfg, vf) {
3868 if (!BE3_chip(adapter)) {
3869 status = be_cmd_get_profile_config(adapter, &res,
3870 RESOURCE_LIMITS,
3871 vf + 1);
3872 if (!status) {
3873 cap_flags = res.if_cap_flags;
3874
3875
3876
3877 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3878 }
3879 }
3880
3881 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3882 BE_IF_FLAGS_BROADCAST |
3883 BE_IF_FLAGS_MULTICAST |
3884 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3885 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3886 &vf_cfg->if_handle, vf + 1);
3887 if (status)
3888 return status;
3889 }
3890
3891 return 0;
3892}
3893
3894static int be_vf_setup_init(struct be_adapter *adapter)
3895{
3896 struct be_vf_cfg *vf_cfg;
3897 int vf;
3898
3899 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3900 GFP_KERNEL);
3901 if (!adapter->vf_cfg)
3902 return -ENOMEM;
3903
3904 for_all_vfs(adapter, vf_cfg, vf) {
3905 vf_cfg->if_handle = -1;
3906 vf_cfg->pmac_id = -1;
3907 }
3908 return 0;
3909}
3910
3911static int be_vf_setup(struct be_adapter *adapter)
3912{
3913 struct device *dev = &adapter->pdev->dev;
3914 struct be_vf_cfg *vf_cfg;
3915 int status, old_vfs, vf;
3916 bool spoofchk;
3917
3918 old_vfs = pci_num_vf(adapter->pdev);
3919
3920 status = be_vf_setup_init(adapter);
3921 if (status)
3922 goto err;
3923
3924 if (old_vfs) {
3925 for_all_vfs(adapter, vf_cfg, vf) {
3926 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3927 if (status)
3928 goto err;
3929 }
3930
3931 status = be_vfs_mac_query(adapter);
3932 if (status)
3933 goto err;
3934 } else {
3935 status = be_vfs_if_create(adapter);
3936 if (status)
3937 goto err;
3938
3939 status = be_vf_eth_addr_config(adapter);
3940 if (status)
3941 goto err;
3942 }
3943
3944 for_all_vfs(adapter, vf_cfg, vf) {
3945
3946 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3947 vf + 1);
3948 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
3949 status = be_cmd_set_fn_privileges(adapter,
3950 vf_cfg->privileges |
3951 BE_PRIV_FILTMGMT,
3952 vf + 1);
3953 if (!status) {
3954 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
3955 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3956 vf);
3957 }
3958 }
3959
3960
3961 if (!old_vfs)
3962 be_cmd_config_qos(adapter, 0, 0, vf + 1);
3963
3964 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3965 vf_cfg->if_handle, NULL,
3966 &spoofchk);
3967 if (!status)
3968 vf_cfg->spoofchk = spoofchk;
3969
3970 if (!old_vfs) {
3971 be_cmd_enable_vf(adapter, vf + 1);
3972 be_cmd_set_logical_link_config(adapter,
3973 IFLA_VF_LINK_STATE_AUTO,
3974 vf+1);
3975 }
3976 }
3977
3978 if (!old_vfs) {
3979 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3980 if (status) {
3981 dev_err(dev, "SRIOV enable failed\n");
3982 adapter->num_vfs = 0;
3983 goto err;
3984 }
3985 }
3986
3987 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
3988 return 0;
3989err:
3990 dev_err(dev, "VF setup failed\n");
3991 be_vf_clear(adapter);
3992 return status;
3993}
3994
3995
3996
3997static u8 be_convert_mc_type(u32 function_mode)
3998{
3999 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
4000 return vNIC1;
4001 else if (function_mode & QNQ_MODE)
4002 return FLEX10;
4003 else if (function_mode & VNIC_MODE)
4004 return vNIC2;
4005 else if (function_mode & UMC_ENABLED)
4006 return UMC;
4007 else
4008 return MC_NONE;
4009}
4010
4011
4012static void BEx_get_resources(struct be_adapter *adapter,
4013 struct be_resources *res)
4014{
4015 bool use_sriov = adapter->num_vfs ? 1 : 0;
4016
4017 if (be_physfn(adapter))
4018 res->max_uc_mac = BE_UC_PMAC_COUNT;
4019 else
4020 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4021
4022 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4023
4024 if (be_is_mc(adapter)) {
4025
4026
4027
4028 if (be_is_qnq_mode(adapter))
4029 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4030 else
4031
4032
4033
4034 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4035 } else {
4036 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
4037 }
4038
4039 res->max_mcast_mac = BE_MAX_MC;
4040
4041
4042
4043
4044
4045 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
4046 be_virtfn(adapter) ||
4047 (be_is_mc(adapter) &&
4048 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
4049 res->max_tx_qs = 1;
4050 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4051 struct be_resources super_nic_res = {0};
4052
4053
4054
4055
4056 be_cmd_get_profile_config(adapter, &super_nic_res,
4057 RESOURCE_LIMITS, 0);
4058
4059 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4060 } else {
4061 res->max_tx_qs = BE3_MAX_TX_QS;
4062 }
4063
4064 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4065 !use_sriov && be_physfn(adapter))
4066 res->max_rss_qs = (adapter->be3_native) ?
4067 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4068 res->max_rx_qs = res->max_rss_qs + 1;
4069
4070 if (be_physfn(adapter))
4071 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
4072 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4073 else
4074 res->max_evt_qs = 1;
4075
4076 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
4077 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
4078 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4079 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4080}
4081
4082static void be_setup_init(struct be_adapter *adapter)
4083{
4084 adapter->vlan_prio_bmap = 0xff;
4085 adapter->phy.link_speed = -1;
4086 adapter->if_handle = -1;
4087 adapter->be3_native = false;
4088 adapter->if_flags = 0;
4089 if (be_physfn(adapter))
4090 adapter->cmd_privileges = MAX_PRIVILEGES;
4091 else
4092 adapter->cmd_privileges = MIN_PRIVILEGES;
4093}
4094
4095static int be_get_sriov_config(struct be_adapter *adapter)
4096{
4097 struct be_resources res = {0};
4098 int max_vfs, old_vfs;
4099
4100 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
4101
4102
4103 if (BE3_chip(adapter) && !res.max_vfs) {
4104 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4105 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4106 }
4107
4108 adapter->pool_res = res;
4109
4110
4111
4112
4113
4114 old_vfs = pci_num_vf(adapter->pdev);
4115 if (old_vfs) {
4116 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4117 old_vfs);
4118
4119 adapter->pool_res.max_vfs =
4120 pci_sriov_get_totalvfs(adapter->pdev);
4121 adapter->num_vfs = old_vfs;
4122 }
4123
4124 return 0;
4125}
4126
4127static void be_alloc_sriov_res(struct be_adapter *adapter)
4128{
4129 int old_vfs = pci_num_vf(adapter->pdev);
4130 u16 num_vf_qs;
4131 int status;
4132
4133 be_get_sriov_config(adapter);
4134
4135 if (!old_vfs)
4136 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4137
4138
4139
4140
4141
4142
4143 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4144 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4145 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4146 num_vf_qs);
4147 if (status)
4148 dev_err(&adapter->pdev->dev,
4149 "Failed to optimize SRIOV resources\n");
4150 }
4151}
4152
4153static int be_get_resources(struct be_adapter *adapter)
4154{
4155 struct device *dev = &adapter->pdev->dev;
4156 struct be_resources res = {0};
4157 int status;
4158
4159 if (BEx_chip(adapter)) {
4160 BEx_get_resources(adapter, &res);
4161 adapter->res = res;
4162 }
4163
4164
4165
4166
4167
4168 if (!BEx_chip(adapter)) {
4169 status = be_cmd_get_func_config(adapter, &res);
4170 if (status)
4171 return status;
4172
4173
4174 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4175 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4176 res.max_rss_qs -= 1;
4177
4178
4179 if (be_roce_supported(adapter))
4180 res.max_evt_qs /= 2;
4181 adapter->res = res;
4182 }
4183
4184
4185
4186
4187 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4188 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4189
4190 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4191 be_max_txqs(adapter), be_max_rxqs(adapter),
4192 be_max_rss(adapter), be_max_eqs(adapter),
4193 be_max_vfs(adapter));
4194 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4195 be_max_uc(adapter), be_max_mc(adapter),
4196 be_max_vlans(adapter));
4197
4198
4199 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4200 be_max_qs(adapter));
4201 return 0;
4202}
4203
4204static int be_get_config(struct be_adapter *adapter)
4205{
4206 int status, level;
4207 u16 profile_id;
4208
4209 status = be_cmd_get_cntl_attributes(adapter);
4210 if (status)
4211 return status;
4212
4213 status = be_cmd_query_fw_cfg(adapter);
4214 if (status)
4215 return status;
4216
4217 if (BEx_chip(adapter)) {
4218 level = be_cmd_get_fw_log_level(adapter);
4219 adapter->msg_enable =
4220 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4221 }
4222
4223 be_cmd_get_acpi_wol_cap(adapter);
4224
4225 be_cmd_query_port_name(adapter);
4226
4227 if (be_physfn(adapter)) {
4228 status = be_cmd_get_active_profile(adapter, &profile_id);
4229 if (!status)
4230 dev_info(&adapter->pdev->dev,
4231 "Using profile 0x%x\n", profile_id);
4232 }
4233
4234 status = be_get_resources(adapter);
4235 if (status)
4236 return status;
4237
4238 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4239 sizeof(*adapter->pmac_id), GFP_KERNEL);
4240 if (!adapter->pmac_id)
4241 return -ENOMEM;
4242
4243 return 0;
4244}
4245
4246static int be_mac_setup(struct be_adapter *adapter)
4247{
4248 u8 mac[ETH_ALEN];
4249 int status;
4250
4251 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4252 status = be_cmd_get_perm_mac(adapter, mac);
4253 if (status)
4254 return status;
4255
4256 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4257 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4258 }
4259
4260 return 0;
4261}
4262
4263static void be_schedule_worker(struct be_adapter *adapter)
4264{
4265 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4266 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4267}
4268
4269static void be_schedule_err_detection(struct be_adapter *adapter)
4270{
4271 schedule_delayed_work(&adapter->be_err_detection_work,
4272 msecs_to_jiffies(1000));
4273 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4274}
4275
4276static int be_setup_queues(struct be_adapter *adapter)
4277{
4278 struct net_device *netdev = adapter->netdev;
4279 int status;
4280
4281 status = be_evt_queues_create(adapter);
4282 if (status)
4283 goto err;
4284
4285 status = be_tx_qs_create(adapter);
4286 if (status)
4287 goto err;
4288
4289 status = be_rx_cqs_create(adapter);
4290 if (status)
4291 goto err;
4292
4293 status = be_mcc_queues_create(adapter);
4294 if (status)
4295 goto err;
4296
4297 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4298 if (status)
4299 goto err;
4300
4301 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4302 if (status)
4303 goto err;
4304
4305 return 0;
4306err:
4307 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4308 return status;
4309}
4310
4311int be_update_queues(struct be_adapter *adapter)
4312{
4313 struct net_device *netdev = adapter->netdev;
4314 int status;
4315
4316 if (netif_running(netdev))
4317 be_close(netdev);
4318
4319 be_cancel_worker(adapter);
4320
4321
4322
4323
4324 if (!adapter->num_msix_roce_vec)
4325 be_msix_disable(adapter);
4326
4327 be_clear_queues(adapter);
4328
4329 if (!msix_enabled(adapter)) {
4330 status = be_msix_enable(adapter);
4331 if (status)
4332 return status;
4333 }
4334
4335 status = be_setup_queues(adapter);
4336 if (status)
4337 return status;
4338
4339 be_schedule_worker(adapter);
4340
4341 if (netif_running(netdev))
4342 status = be_open(netdev);
4343
4344 return status;
4345}
4346
4347static inline int fw_major_num(const char *fw_ver)
4348{
4349 int fw_major = 0, i;
4350
4351 i = sscanf(fw_ver, "%d.", &fw_major);
4352 if (i != 1)
4353 return 0;
4354
4355 return fw_major;
4356}
4357
4358
4359static bool be_reset_required(struct be_adapter *adapter)
4360{
4361 return pci_num_vf(adapter->pdev) ? false : true;
4362}
4363
4364
4365static int be_func_init(struct be_adapter *adapter)
4366{
4367 int status;
4368
4369 status = be_fw_wait_ready(adapter);
4370 if (status)
4371 return status;
4372
4373 if (be_reset_required(adapter)) {
4374 status = be_cmd_reset_function(adapter);
4375 if (status)
4376 return status;
4377
4378
4379 msleep(100);
4380
4381
4382 be_clear_error(adapter, BE_CLEAR_ALL);
4383 }
4384
4385
4386 status = be_cmd_fw_init(adapter);
4387 if (status)
4388 return status;
4389
4390
4391 be_intr_set(adapter, true);
4392
4393 return 0;
4394}
4395
4396static int be_setup(struct be_adapter *adapter)
4397{
4398 struct device *dev = &adapter->pdev->dev;
4399 u32 en_flags;
4400 int status;
4401
4402 status = be_func_init(adapter);
4403 if (status)
4404 return status;
4405
4406 be_setup_init(adapter);
4407
4408 if (!lancer_chip(adapter))
4409 be_cmd_req_native_mode(adapter);
4410
4411 if (!BE2_chip(adapter) && be_physfn(adapter))
4412 be_alloc_sriov_res(adapter);
4413
4414 status = be_get_config(adapter);
4415 if (status)
4416 goto err;
4417
4418 status = be_msix_enable(adapter);
4419 if (status)
4420 goto err;
4421
4422
4423 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4424 en_flags = en_flags & be_if_cap_flags(adapter);
4425 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4426 &adapter->if_handle, 0);
4427 if (status)
4428 goto err;
4429
4430
4431 rtnl_lock();
4432 status = be_setup_queues(adapter);
4433 rtnl_unlock();
4434 if (status)
4435 goto err;
4436
4437 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
4438
4439 status = be_mac_setup(adapter);
4440 if (status)
4441 goto err;
4442
4443 be_cmd_get_fw_ver(adapter);
4444 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
4445
4446 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
4447 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
4448 adapter->fw_ver);
4449 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4450 }
4451
4452 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4453 adapter->rx_fc);
4454 if (status)
4455 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4456 &adapter->rx_fc);
4457
4458 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4459 adapter->tx_fc, adapter->rx_fc);
4460
4461 if (be_physfn(adapter))
4462 be_cmd_set_logical_link_config(adapter,
4463 IFLA_VF_LINK_STATE_AUTO, 0);
4464
4465 if (adapter->num_vfs)
4466 be_vf_setup(adapter);
4467
4468 status = be_cmd_get_phy_info(adapter);
4469 if (!status && be_pause_supported(adapter))
4470 adapter->phy.fc_autoneg = 1;
4471
4472 be_schedule_worker(adapter);
4473 adapter->flags |= BE_FLAGS_SETUP_DONE;
4474 return 0;
4475err:
4476 be_clear(adapter);
4477 return status;
4478}
4479
4480#ifdef CONFIG_NET_POLL_CONTROLLER
4481static void be_netpoll(struct net_device *netdev)
4482{
4483 struct be_adapter *adapter = netdev_priv(netdev);
4484 struct be_eq_obj *eqo;
4485 int i;
4486
4487 for_all_evt_queues(adapter, eqo, i) {
4488 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
4489 napi_schedule(&eqo->napi);
4490 }
4491}
4492#endif
4493
4494static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
4495
4496static bool phy_flashing_required(struct be_adapter *adapter)
4497{
4498 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
4499 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
4500}
4501
4502static bool is_comp_in_ufi(struct be_adapter *adapter,
4503 struct flash_section_info *fsec, int type)
4504{
4505 int i = 0, img_type = 0;
4506 struct flash_section_info_g2 *fsec_g2 = NULL;
4507
4508 if (BE2_chip(adapter))
4509 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4510
4511 for (i = 0; i < MAX_FLASH_COMP; i++) {
4512 if (fsec_g2)
4513 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4514 else
4515 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4516
4517 if (img_type == type)
4518 return true;
4519 }
4520 return false;
4521
4522}
4523
4524static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
4525 int header_size,
4526 const struct firmware *fw)
4527{
4528 struct flash_section_info *fsec = NULL;
4529 const u8 *p = fw->data;
4530
4531 p += header_size;
4532 while (p < (fw->data + fw->size)) {
4533 fsec = (struct flash_section_info *)p;
4534 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4535 return fsec;
4536 p += 32;
4537 }
4538 return NULL;
4539}
4540
4541static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4542 u32 img_offset, u32 img_size, int hdr_size,
4543 u16 img_optype, bool *crc_match)
4544{
4545 u32 crc_offset;
4546 int status;
4547 u8 crc[4];
4548
4549 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4550 img_size - 4);
4551 if (status)
4552 return status;
4553
4554 crc_offset = hdr_size + img_offset + img_size - 4;
4555
4556
4557 if (!memcmp(crc, p + crc_offset, 4))
4558 *crc_match = true;
4559 else
4560 *crc_match = false;
4561
4562 return status;
4563}
4564
4565static int be_flash(struct be_adapter *adapter, const u8 *img,
4566 struct be_dma_mem *flash_cmd, int optype, int img_size,
4567 u32 img_offset)
4568{
4569 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
4570 struct be_cmd_write_flashrom *req = flash_cmd->va;
4571 int status;
4572
4573 while (total_bytes) {
4574 num_bytes = min_t(u32, 32*1024, total_bytes);
4575
4576 total_bytes -= num_bytes;
4577
4578 if (!total_bytes) {
4579 if (optype == OPTYPE_PHY_FW)
4580 flash_op = FLASHROM_OPER_PHY_FLASH;
4581 else
4582 flash_op = FLASHROM_OPER_FLASH;
4583 } else {
4584 if (optype == OPTYPE_PHY_FW)
4585 flash_op = FLASHROM_OPER_PHY_SAVE;
4586 else
4587 flash_op = FLASHROM_OPER_SAVE;
4588 }
4589
4590 memcpy(req->data_buf, img, num_bytes);
4591 img += num_bytes;
4592 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
4593 flash_op, img_offset +
4594 bytes_sent, num_bytes);
4595 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
4596 optype == OPTYPE_PHY_FW)
4597 break;
4598 else if (status)
4599 return status;
4600
4601 bytes_sent += num_bytes;
4602 }
4603 return 0;
4604}
4605
4606
4607static int be_flash_BEx(struct be_adapter *adapter,
4608 const struct firmware *fw,
4609 struct be_dma_mem *flash_cmd, int num_of_images)
4610{
4611 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
4612 struct device *dev = &adapter->pdev->dev;
4613 struct flash_section_info *fsec = NULL;
4614 int status, i, filehdr_size, num_comp;
4615 const struct flash_comp *pflashcomp;
4616 bool crc_match;
4617 const u8 *p;
4618
4619 struct flash_comp gen3_flash_types[] = {
4620 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4621 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4622 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4623 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4624 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4625 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4626 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4627 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4628 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4629 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4630 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4631 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4632 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4633 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4634 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4635 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4636 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4637 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4638 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4639 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
4640 };
4641
4642 struct flash_comp gen2_flash_types[] = {
4643 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4644 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4645 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4646 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4647 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4648 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4649 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4650 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4651 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4652 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4653 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4654 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4655 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4656 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4657 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4658 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
4659 };
4660
4661 if (BE3_chip(adapter)) {
4662 pflashcomp = gen3_flash_types;
4663 filehdr_size = sizeof(struct flash_file_hdr_g3);
4664 num_comp = ARRAY_SIZE(gen3_flash_types);
4665 } else {
4666 pflashcomp = gen2_flash_types;
4667 filehdr_size = sizeof(struct flash_file_hdr_g2);
4668 num_comp = ARRAY_SIZE(gen2_flash_types);
4669 img_hdrs_size = 0;
4670 }
4671
4672
4673 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4674 if (!fsec) {
4675 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
4676 return -1;
4677 }
4678 for (i = 0; i < num_comp; i++) {
4679 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
4680 continue;
4681
4682 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4683 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4684 continue;
4685
4686 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4687 !phy_flashing_required(adapter))
4688 continue;
4689
4690 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
4691 status = be_check_flash_crc(adapter, fw->data,
4692 pflashcomp[i].offset,
4693 pflashcomp[i].size,
4694 filehdr_size +
4695 img_hdrs_size,
4696 OPTYPE_REDBOOT, &crc_match);
4697 if (status) {
4698 dev_err(dev,
4699 "Could not get CRC for 0x%x region\n",
4700 pflashcomp[i].optype);
4701 continue;
4702 }
4703
4704 if (crc_match)
4705 continue;
4706 }
4707
4708 p = fw->data + filehdr_size + pflashcomp[i].offset +
4709 img_hdrs_size;
4710 if (p + pflashcomp[i].size > fw->data + fw->size)
4711 return -1;
4712
4713 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
4714 pflashcomp[i].size, 0);
4715 if (status) {
4716 dev_err(dev, "Flashing section type 0x%x failed\n",
4717 pflashcomp[i].img_type);
4718 return status;
4719 }
4720 }
4721 return 0;
4722}
4723
4724static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4725{
4726 u32 img_type = le32_to_cpu(fsec_entry.type);
4727 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4728
4729 if (img_optype != 0xFFFF)
4730 return img_optype;
4731
4732 switch (img_type) {
4733 case IMAGE_FIRMWARE_iSCSI:
4734 img_optype = OPTYPE_ISCSI_ACTIVE;
4735 break;
4736 case IMAGE_BOOT_CODE:
4737 img_optype = OPTYPE_REDBOOT;
4738 break;
4739 case IMAGE_OPTION_ROM_ISCSI:
4740 img_optype = OPTYPE_BIOS;
4741 break;
4742 case IMAGE_OPTION_ROM_PXE:
4743 img_optype = OPTYPE_PXE_BIOS;
4744 break;
4745 case IMAGE_OPTION_ROM_FCoE:
4746 img_optype = OPTYPE_FCOE_BIOS;
4747 break;
4748 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4749 img_optype = OPTYPE_ISCSI_BACKUP;
4750 break;
4751 case IMAGE_NCSI:
4752 img_optype = OPTYPE_NCSI_FW;
4753 break;
4754 case IMAGE_FLASHISM_JUMPVECTOR:
4755 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4756 break;
4757 case IMAGE_FIRMWARE_PHY:
4758 img_optype = OPTYPE_SH_PHY_FW;
4759 break;
4760 case IMAGE_REDBOOT_DIR:
4761 img_optype = OPTYPE_REDBOOT_DIR;
4762 break;
4763 case IMAGE_REDBOOT_CONFIG:
4764 img_optype = OPTYPE_REDBOOT_CONFIG;
4765 break;
4766 case IMAGE_UFI_DIR:
4767 img_optype = OPTYPE_UFI_DIR;
4768 break;
4769 default:
4770 break;
4771 }
4772
4773 return img_optype;
4774}
4775
4776static int be_flash_skyhawk(struct be_adapter *adapter,
4777 const struct firmware *fw,
4778 struct be_dma_mem *flash_cmd, int num_of_images)
4779{
4780 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
4781 bool crc_match, old_fw_img, flash_offset_support = true;
4782 struct device *dev = &adapter->pdev->dev;
4783 struct flash_section_info *fsec = NULL;
4784 u32 img_offset, img_size, img_type;
4785 u16 img_optype, flash_optype;
4786 int status, i, filehdr_size;
4787 const u8 *p;
4788
4789 filehdr_size = sizeof(struct flash_file_hdr_g3);
4790 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4791 if (!fsec) {
4792 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
4793 return -EINVAL;
4794 }
4795
4796retry_flash:
4797 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4798 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4799 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
4800 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4801 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4802 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
4803
4804 if (img_optype == 0xFFFF)
4805 continue;
4806
4807 if (flash_offset_support)
4808 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4809 else
4810 flash_optype = img_optype;
4811
4812
4813
4814
4815 if (old_fw_img)
4816 goto flash;
4817
4818 status = be_check_flash_crc(adapter, fw->data, img_offset,
4819 img_size, filehdr_size +
4820 img_hdrs_size, flash_optype,
4821 &crc_match);
4822 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4823 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
4824
4825
4826
4827
4828 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4829 flash_offset_support = false;
4830 goto retry_flash;
4831 }
4832
4833
4834
4835
4836
4837
4838
4839
4840 dev_err(dev, "Flash incomplete. Reset the server\n");
4841 dev_err(dev, "Download FW image again after reset\n");
4842 return -EAGAIN;
4843 } else if (status) {
4844 dev_err(dev, "Could not get CRC for 0x%x region\n",
4845 img_optype);
4846 return -EFAULT;
4847 }
4848
4849 if (crc_match)
4850 continue;
4851
4852flash:
4853 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
4854 if (p + img_size > fw->data + fw->size)
4855 return -1;
4856
4857 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4858 img_offset);
4859
4860
4861
4862
4863
4864 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4865 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4866 flash_offset_support = false;
4867 goto retry_flash;
4868 }
4869
4870
4871
4872
4873 if (old_fw_img &&
4874 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4875 (img_optype == OPTYPE_UFI_DIR &&
4876 base_status(status) == MCC_STATUS_FAILED))) {
4877 continue;
4878 } else if (status) {
4879 dev_err(dev, "Flashing section type 0x%x failed\n",
4880 img_type);
4881 return -EFAULT;
4882 }
4883 }
4884 return 0;
4885}
4886
4887static int lancer_fw_download(struct be_adapter *adapter,
4888 const struct firmware *fw)
4889{
4890#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4891#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
4892 struct device *dev = &adapter->pdev->dev;
4893 struct be_dma_mem flash_cmd;
4894 const u8 *data_ptr = NULL;
4895 u8 *dest_image_ptr = NULL;
4896 size_t image_size = 0;
4897 u32 chunk_size = 0;
4898 u32 data_written = 0;
4899 u32 offset = 0;
4900 int status = 0;
4901 u8 add_status = 0;
4902 u8 change_status;
4903
4904 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4905 dev_err(dev, "FW image size should be multiple of 4\n");
4906 return -EINVAL;
4907 }
4908
4909 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4910 + LANCER_FW_DOWNLOAD_CHUNK;
4911 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4912 &flash_cmd.dma, GFP_KERNEL);
4913 if (!flash_cmd.va)
4914 return -ENOMEM;
4915
4916 dest_image_ptr = flash_cmd.va +
4917 sizeof(struct lancer_cmd_req_write_object);
4918 image_size = fw->size;
4919 data_ptr = fw->data;
4920
4921 while (image_size) {
4922 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4923
4924
4925 memcpy(dest_image_ptr, data_ptr, chunk_size);
4926
4927 status = lancer_cmd_write_object(adapter, &flash_cmd,
4928 chunk_size, offset,
4929 LANCER_FW_DOWNLOAD_LOCATION,
4930 &data_written, &change_status,
4931 &add_status);
4932 if (status)
4933 break;
4934
4935 offset += data_written;
4936 data_ptr += data_written;
4937 image_size -= data_written;
4938 }
4939
4940 if (!status) {
4941
4942 status = lancer_cmd_write_object(adapter, &flash_cmd,
4943 0, offset,
4944 LANCER_FW_DOWNLOAD_LOCATION,
4945 &data_written, &change_status,
4946 &add_status);
4947 }
4948
4949 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4950 if (status) {
4951 dev_err(dev, "Firmware load error\n");
4952 return be_cmd_status(status);
4953 }
4954
4955 dev_info(dev, "Firmware flashed successfully\n");
4956
4957 if (change_status == LANCER_FW_RESET_NEEDED) {
4958 dev_info(dev, "Resetting adapter to activate new FW\n");
4959 status = lancer_physdev_ctrl(adapter,
4960 PHYSDEV_CONTROL_FW_RESET_MASK);
4961 if (status) {
4962 dev_err(dev, "Adapter busy, could not reset FW\n");
4963 dev_err(dev, "Reboot server to activate new FW\n");
4964 }
4965 } else if (change_status != LANCER_NO_RESET_NEEDED) {
4966 dev_info(dev, "Reboot server to activate new FW\n");
4967 }
4968
4969 return 0;
4970}
4971
4972
4973
4974
4975static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4976 struct flash_file_hdr_g3 *fhdr)
4977{
4978 if (!fhdr) {
4979 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4980 return -1;
4981 }
4982
4983
4984
4985
4986 switch (fhdr->build[0]) {
4987 case BLD_STR_UFI_TYPE_SH:
4988 if (!skyhawk_chip(adapter))
4989 return false;
4990 break;
4991 case BLD_STR_UFI_TYPE_BE3:
4992 if (!BE3_chip(adapter))
4993 return false;
4994 break;
4995 case BLD_STR_UFI_TYPE_BE2:
4996 if (!BE2_chip(adapter))
4997 return false;
4998 break;
4999 default:
5000 return false;
5001 }
5002
5003 return (fhdr->asic_type_rev >= adapter->asic_rev);
5004}
5005
5006static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
5007{
5008 struct device *dev = &adapter->pdev->dev;
5009 struct flash_file_hdr_g3 *fhdr3;
5010 struct image_hdr *img_hdr_ptr;
5011 int status = 0, i, num_imgs;
5012 struct be_dma_mem flash_cmd;
5013
5014 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
5015 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
5016 dev_err(dev, "Flash image is not compatible with adapter\n");
5017 return -EINVAL;
5018 }
5019
5020 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
5021 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
5022 GFP_KERNEL);
5023 if (!flash_cmd.va)
5024 return -ENOMEM;
5025
5026 num_imgs = le32_to_cpu(fhdr3->num_imgs);
5027 for (i = 0; i < num_imgs; i++) {
5028 img_hdr_ptr = (struct image_hdr *)(fw->data +
5029 (sizeof(struct flash_file_hdr_g3) +
5030 i * sizeof(struct image_hdr)));
5031 if (!BE2_chip(adapter) &&
5032 le32_to_cpu(img_hdr_ptr->imageid) != 1)
5033 continue;
5034
5035 if (skyhawk_chip(adapter))
5036 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
5037 num_imgs);
5038 else
5039 status = be_flash_BEx(adapter, fw, &flash_cmd,
5040 num_imgs);
5041 }
5042
5043 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
5044 if (!status)
5045 dev_info(dev, "Firmware flashed successfully\n");
5046
5047 return status;
5048}
5049
5050int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
5051{
5052 const struct firmware *fw;
5053 int status;
5054
5055 if (!netif_running(adapter->netdev)) {
5056 dev_err(&adapter->pdev->dev,
5057 "Firmware load not allowed (interface is down)\n");
5058 return -ENETDOWN;
5059 }
5060
5061 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5062 if (status)
5063 goto fw_exit;
5064
5065 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5066
5067 if (lancer_chip(adapter))
5068 status = lancer_fw_download(adapter, fw);
5069 else
5070 status = be_fw_download(adapter, fw);
5071
5072 if (!status)
5073 be_cmd_get_fw_ver(adapter);
5074
5075fw_exit:
5076 release_firmware(fw);
5077 return status;
5078}
5079
5080static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5081 u16 flags)
5082{
5083 struct be_adapter *adapter = netdev_priv(dev);
5084 struct nlattr *attr, *br_spec;
5085 int rem;
5086 int status = 0;
5087 u16 mode = 0;
5088
5089 if (!sriov_enabled(adapter))
5090 return -EOPNOTSUPP;
5091
5092 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
5093 if (!br_spec)
5094 return -EINVAL;
5095
5096 nla_for_each_nested(attr, br_spec, rem) {
5097 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5098 continue;
5099
5100 if (nla_len(attr) < sizeof(mode))
5101 return -EINVAL;
5102
5103 mode = nla_get_u16(attr);
5104 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5105 return -EINVAL;
5106
5107 status = be_cmd_set_hsw_config(adapter, 0, 0,
5108 adapter->if_handle,
5109 mode == BRIDGE_MODE_VEPA ?
5110 PORT_FWD_TYPE_VEPA :
5111 PORT_FWD_TYPE_VEB, 0);
5112 if (status)
5113 goto err;
5114
5115 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5116 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5117
5118 return status;
5119 }
5120err:
5121 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5122 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5123
5124 return status;
5125}
5126
5127static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5128 struct net_device *dev, u32 filter_mask,
5129 int nlflags)
5130{
5131 struct be_adapter *adapter = netdev_priv(dev);
5132 int status = 0;
5133 u8 hsw_mode;
5134
5135 if (!sriov_enabled(adapter))
5136 return 0;
5137
5138
5139 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5140 hsw_mode = PORT_FWD_TYPE_VEB;
5141 } else {
5142 status = be_cmd_get_hsw_config(adapter, NULL, 0,
5143 adapter->if_handle, &hsw_mode,
5144 NULL);
5145 if (status)
5146 return 0;
5147 }
5148
5149 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5150 hsw_mode == PORT_FWD_TYPE_VEPA ?
5151 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
5152 0, 0, nlflags, filter_mask, NULL);
5153}
5154
5155#ifdef CONFIG_BE2NET_VXLAN
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5171 __be16 port)
5172{
5173 struct be_adapter *adapter = netdev_priv(netdev);
5174 struct device *dev = &adapter->pdev->dev;
5175 int status;
5176
5177 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5178 return;
5179
5180 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
5181 dev_info(dev,
5182 "Only one UDP port supported for VxLAN offloads\n");
5183 dev_info(dev, "Disabling VxLAN offloads\n");
5184 adapter->vxlan_port_count++;
5185 goto err;
5186 }
5187
5188 if (adapter->vxlan_port_count++ >= 1)
5189 return;
5190
5191 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5192 OP_CONVERT_NORMAL_TO_TUNNEL);
5193 if (status) {
5194 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5195 goto err;
5196 }
5197
5198 status = be_cmd_set_vxlan_port(adapter, port);
5199 if (status) {
5200 dev_warn(dev, "Failed to add VxLAN port\n");
5201 goto err;
5202 }
5203 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5204 adapter->vxlan_port = port;
5205
5206 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5207 NETIF_F_TSO | NETIF_F_TSO6 |
5208 NETIF_F_GSO_UDP_TUNNEL;
5209 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
5210 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
5211
5212 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5213 be16_to_cpu(port));
5214 return;
5215err:
5216 be_disable_vxlan_offloads(adapter);
5217}
5218
5219static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5220 __be16 port)
5221{
5222 struct be_adapter *adapter = netdev_priv(netdev);
5223
5224 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5225 return;
5226
5227 if (adapter->vxlan_port != port)
5228 goto done;
5229
5230 be_disable_vxlan_offloads(adapter);
5231
5232 dev_info(&adapter->pdev->dev,
5233 "Disabled VxLAN offloads for UDP port %d\n",
5234 be16_to_cpu(port));
5235done:
5236 adapter->vxlan_port_count--;
5237}
5238
5239static netdev_features_t be_features_check(struct sk_buff *skb,
5240 struct net_device *dev,
5241 netdev_features_t features)
5242{
5243 struct be_adapter *adapter = netdev_priv(dev);
5244 u8 l4_hdr = 0;
5245
5246
5247
5248
5249 if (!skb->encapsulation ||
5250 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5251 return features;
5252
5253
5254
5255
5256
5257
5258
5259 switch (vlan_get_protocol(skb)) {
5260 case htons(ETH_P_IP):
5261 l4_hdr = ip_hdr(skb)->protocol;
5262 break;
5263 case htons(ETH_P_IPV6):
5264 l4_hdr = ipv6_hdr(skb)->nexthdr;
5265 break;
5266 default:
5267 return features;
5268 }
5269
5270 if (l4_hdr != IPPROTO_UDP ||
5271 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5272 skb->inner_protocol != htons(ETH_P_TEB) ||
5273 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5274 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5275 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5276
5277 return features;
5278}
5279#endif
5280
5281static const struct net_device_ops be_netdev_ops = {
5282 .ndo_open = be_open,
5283 .ndo_stop = be_close,
5284 .ndo_start_xmit = be_xmit,
5285 .ndo_set_rx_mode = be_set_rx_mode,
5286 .ndo_set_mac_address = be_mac_addr_set,
5287 .ndo_change_mtu = be_change_mtu,
5288 .ndo_get_stats64 = be_get_stats64,
5289 .ndo_validate_addr = eth_validate_addr,
5290 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5291 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
5292 .ndo_set_vf_mac = be_set_vf_mac,
5293 .ndo_set_vf_vlan = be_set_vf_vlan,
5294 .ndo_set_vf_rate = be_set_vf_tx_rate,
5295 .ndo_get_vf_config = be_get_vf_config,
5296 .ndo_set_vf_link_state = be_set_vf_link_state,
5297 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
5298#ifdef CONFIG_NET_POLL_CONTROLLER
5299 .ndo_poll_controller = be_netpoll,
5300#endif
5301 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5302 .ndo_bridge_getlink = be_ndo_bridge_getlink,
5303#ifdef CONFIG_NET_RX_BUSY_POLL
5304 .ndo_busy_poll = be_busy_poll,
5305#endif
5306#ifdef CONFIG_BE2NET_VXLAN
5307 .ndo_add_vxlan_port = be_add_vxlan_port,
5308 .ndo_del_vxlan_port = be_del_vxlan_port,
5309 .ndo_features_check = be_features_check,
5310#endif
5311};
5312
5313static void be_netdev_init(struct net_device *netdev)
5314{
5315 struct be_adapter *adapter = netdev_priv(netdev);
5316
5317 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5318 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
5319 NETIF_F_HW_VLAN_CTAG_TX;
5320 if (be_multi_rxq(adapter))
5321 netdev->hw_features |= NETIF_F_RXHASH;
5322
5323 netdev->features |= netdev->hw_features |
5324 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
5325
5326 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5327 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5328
5329 netdev->priv_flags |= IFF_UNICAST_FLT;
5330
5331 netdev->flags |= IFF_MULTICAST;
5332
5333 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
5334
5335 netdev->netdev_ops = &be_netdev_ops;
5336
5337 netdev->ethtool_ops = &be_ethtool_ops;
5338}
5339
5340static void be_cleanup(struct be_adapter *adapter)
5341{
5342 struct net_device *netdev = adapter->netdev;
5343
5344 rtnl_lock();
5345 netif_device_detach(netdev);
5346 if (netif_running(netdev))
5347 be_close(netdev);
5348 rtnl_unlock();
5349
5350 be_clear(adapter);
5351}
5352
5353static int be_resume(struct be_adapter *adapter)
5354{
5355 struct net_device *netdev = adapter->netdev;
5356 int status;
5357
5358 status = be_setup(adapter);
5359 if (status)
5360 return status;
5361
5362 if (netif_running(netdev)) {
5363 status = be_open(netdev);
5364 if (status)
5365 return status;
5366 }
5367
5368 netif_device_attach(netdev);
5369
5370 return 0;
5371}
5372
5373static int be_err_recover(struct be_adapter *adapter)
5374{
5375 struct device *dev = &adapter->pdev->dev;
5376 int status;
5377
5378 status = be_resume(adapter);
5379 if (status)
5380 goto err;
5381
5382 dev_info(dev, "Adapter recovery successful\n");
5383 return 0;
5384err:
5385 if (be_physfn(adapter))
5386 dev_err(dev, "Adapter recovery failed\n");
5387 else
5388 dev_err(dev, "Re-trying adapter recovery\n");
5389
5390 return status;
5391}
5392
5393static void be_err_detection_task(struct work_struct *work)
5394{
5395 struct be_adapter *adapter =
5396 container_of(work, struct be_adapter,
5397 be_err_detection_work.work);
5398 int status = 0;
5399
5400 be_detect_error(adapter);
5401
5402 if (be_check_error(adapter, BE_ERROR_HW)) {
5403 be_cleanup(adapter);
5404
5405
5406 if (lancer_chip(adapter))
5407 status = be_err_recover(adapter);
5408 }
5409
5410
5411 if (!status || be_virtfn(adapter))
5412 be_schedule_err_detection(adapter);
5413}
5414
5415static void be_log_sfp_info(struct be_adapter *adapter)
5416{
5417 int status;
5418
5419 status = be_cmd_query_sfp_info(adapter);
5420 if (!status) {
5421 dev_err(&adapter->pdev->dev,
5422 "Unqualified SFP+ detected on %c from %s part no: %s",
5423 adapter->port_name, adapter->phy.vendor_name,
5424 adapter->phy.vendor_pn);
5425 }
5426 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5427}
5428
5429static void be_worker(struct work_struct *work)
5430{
5431 struct be_adapter *adapter =
5432 container_of(work, struct be_adapter, work.work);
5433 struct be_rx_obj *rxo;
5434 int i;
5435
5436
5437
5438
5439 if (!netif_running(adapter->netdev)) {
5440 local_bh_disable();
5441 be_process_mcc(adapter);
5442 local_bh_enable();
5443 goto reschedule;
5444 }
5445
5446 if (!adapter->stats_cmd_sent) {
5447 if (lancer_chip(adapter))
5448 lancer_cmd_get_pport_stats(adapter,
5449 &adapter->stats_cmd);
5450 else
5451 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5452 }
5453
5454 if (be_physfn(adapter) &&
5455 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5456 be_cmd_get_die_temperature(adapter);
5457
5458 for_all_rx_queues(adapter, rxo, i) {
5459
5460
5461
5462 if (rxo->rx_post_starved)
5463 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5464 }
5465
5466
5467 if (!skyhawk_chip(adapter))
5468 be_eqd_update(adapter, false);
5469
5470 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5471 be_log_sfp_info(adapter);
5472
5473reschedule:
5474 adapter->work_counter++;
5475 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5476}
5477
5478static void be_unmap_pci_bars(struct be_adapter *adapter)
5479{
5480 if (adapter->csr)
5481 pci_iounmap(adapter->pdev, adapter->csr);
5482 if (adapter->db)
5483 pci_iounmap(adapter->pdev, adapter->db);
5484}
5485
5486static int db_bar(struct be_adapter *adapter)
5487{
5488 if (lancer_chip(adapter) || be_virtfn(adapter))
5489 return 0;
5490 else
5491 return 4;
5492}
5493
5494static int be_roce_map_pci_bars(struct be_adapter *adapter)
5495{
5496 if (skyhawk_chip(adapter)) {
5497 adapter->roce_db.size = 4096;
5498 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5499 db_bar(adapter));
5500 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5501 db_bar(adapter));
5502 }
5503 return 0;
5504}
5505
5506static int be_map_pci_bars(struct be_adapter *adapter)
5507{
5508 struct pci_dev *pdev = adapter->pdev;
5509 u8 __iomem *addr;
5510 u32 sli_intf;
5511
5512 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5513 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5514 SLI_INTF_FAMILY_SHIFT;
5515 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5516
5517 if (BEx_chip(adapter) && be_physfn(adapter)) {
5518 adapter->csr = pci_iomap(pdev, 2, 0);
5519 if (!adapter->csr)
5520 return -ENOMEM;
5521 }
5522
5523 addr = pci_iomap(pdev, db_bar(adapter), 0);
5524 if (!addr)
5525 goto pci_map_err;
5526 adapter->db = addr;
5527
5528 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5529 if (be_physfn(adapter)) {
5530
5531 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5532 if (!addr)
5533 goto pci_map_err;
5534 adapter->pcicfg = addr;
5535 } else {
5536 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5537 }
5538 }
5539
5540 be_roce_map_pci_bars(adapter);
5541 return 0;
5542
5543pci_map_err:
5544 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
5545 be_unmap_pci_bars(adapter);
5546 return -ENOMEM;
5547}
5548
5549static void be_drv_cleanup(struct be_adapter *adapter)
5550{
5551 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5552 struct device *dev = &adapter->pdev->dev;
5553
5554 if (mem->va)
5555 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5556
5557 mem = &adapter->rx_filter;
5558 if (mem->va)
5559 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5560
5561 mem = &adapter->stats_cmd;
5562 if (mem->va)
5563 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5564}
5565
5566
5567static int be_drv_init(struct be_adapter *adapter)
5568{
5569 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5570 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5571 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5572 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5573 struct device *dev = &adapter->pdev->dev;
5574 int status = 0;
5575
5576 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5577 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5578 &mbox_mem_alloc->dma,
5579 GFP_KERNEL);
5580 if (!mbox_mem_alloc->va)
5581 return -ENOMEM;
5582
5583 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5584 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5585 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5586
5587 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5588 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5589 &rx_filter->dma, GFP_KERNEL);
5590 if (!rx_filter->va) {
5591 status = -ENOMEM;
5592 goto free_mbox;
5593 }
5594
5595 if (lancer_chip(adapter))
5596 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5597 else if (BE2_chip(adapter))
5598 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5599 else if (BE3_chip(adapter))
5600 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5601 else
5602 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5603 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5604 &stats_cmd->dma, GFP_KERNEL);
5605 if (!stats_cmd->va) {
5606 status = -ENOMEM;
5607 goto free_rx_filter;
5608 }
5609
5610 mutex_init(&adapter->mbox_lock);
5611 spin_lock_init(&adapter->mcc_lock);
5612 spin_lock_init(&adapter->mcc_cq_lock);
5613 init_completion(&adapter->et_cmd_compl);
5614
5615 pci_save_state(adapter->pdev);
5616
5617 INIT_DELAYED_WORK(&adapter->work, be_worker);
5618 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5619 be_err_detection_task);
5620
5621 adapter->rx_fc = true;
5622 adapter->tx_fc = true;
5623
5624
5625 adapter->be_get_temp_freq = 64;
5626
5627 return 0;
5628
5629free_rx_filter:
5630 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5631free_mbox:
5632 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5633 mbox_mem_alloc->dma);
5634 return status;
5635}
5636
5637static void be_remove(struct pci_dev *pdev)
5638{
5639 struct be_adapter *adapter = pci_get_drvdata(pdev);
5640
5641 if (!adapter)
5642 return;
5643
5644 be_roce_dev_remove(adapter);
5645 be_intr_set(adapter, false);
5646
5647 be_cancel_err_detection(adapter);
5648
5649 unregister_netdev(adapter->netdev);
5650
5651 be_clear(adapter);
5652
5653
5654 be_cmd_fw_clean(adapter);
5655
5656 be_unmap_pci_bars(adapter);
5657 be_drv_cleanup(adapter);
5658
5659 pci_disable_pcie_error_reporting(pdev);
5660
5661 pci_release_regions(pdev);
5662 pci_disable_device(pdev);
5663
5664 free_netdev(adapter->netdev);
5665}
5666
5667static ssize_t be_hwmon_show_temp(struct device *dev,
5668 struct device_attribute *dev_attr,
5669 char *buf)
5670{
5671 struct be_adapter *adapter = dev_get_drvdata(dev);
5672
5673
5674 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5675 return -EIO;
5676 else
5677 return sprintf(buf, "%u\n",
5678 adapter->hwmon_info.be_on_die_temp * 1000);
5679}
5680
5681static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5682 be_hwmon_show_temp, NULL, 1);
5683
5684static struct attribute *be_hwmon_attrs[] = {
5685 &sensor_dev_attr_temp1_input.dev_attr.attr,
5686 NULL
5687};
5688
5689ATTRIBUTE_GROUPS(be_hwmon);
5690
5691static char *mc_name(struct be_adapter *adapter)
5692{
5693 char *str = "";
5694
5695 switch (adapter->mc_type) {
5696 case UMC:
5697 str = "UMC";
5698 break;
5699 case FLEX10:
5700 str = "FLEX10";
5701 break;
5702 case vNIC1:
5703 str = "vNIC-1";
5704 break;
5705 case nPAR:
5706 str = "nPAR";
5707 break;
5708 case UFP:
5709 str = "UFP";
5710 break;
5711 case vNIC2:
5712 str = "vNIC-2";
5713 break;
5714 default:
5715 str = "";
5716 }
5717
5718 return str;
5719}
5720
5721static inline char *func_name(struct be_adapter *adapter)
5722{
5723 return be_physfn(adapter) ? "PF" : "VF";
5724}
5725
5726static inline char *nic_name(struct pci_dev *pdev)
5727{
5728 switch (pdev->device) {
5729 case OC_DEVICE_ID1:
5730 return OC_NAME;
5731 case OC_DEVICE_ID2:
5732 return OC_NAME_BE;
5733 case OC_DEVICE_ID3:
5734 case OC_DEVICE_ID4:
5735 return OC_NAME_LANCER;
5736 case BE_DEVICE_ID2:
5737 return BE3_NAME;
5738 case OC_DEVICE_ID5:
5739 case OC_DEVICE_ID6:
5740 return OC_NAME_SH;
5741 default:
5742 return BE_NAME;
5743 }
5744}
5745
5746static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5747{
5748 struct be_adapter *adapter;
5749 struct net_device *netdev;
5750 int status = 0;
5751
5752 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5753
5754 status = pci_enable_device(pdev);
5755 if (status)
5756 goto do_none;
5757
5758 status = pci_request_regions(pdev, DRV_NAME);
5759 if (status)
5760 goto disable_dev;
5761 pci_set_master(pdev);
5762
5763 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5764 if (!netdev) {
5765 status = -ENOMEM;
5766 goto rel_reg;
5767 }
5768 adapter = netdev_priv(netdev);
5769 adapter->pdev = pdev;
5770 pci_set_drvdata(pdev, adapter);
5771 adapter->netdev = netdev;
5772 SET_NETDEV_DEV(netdev, &pdev->dev);
5773
5774 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5775 if (!status) {
5776 netdev->features |= NETIF_F_HIGHDMA;
5777 } else {
5778 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5779 if (status) {
5780 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5781 goto free_netdev;
5782 }
5783 }
5784
5785 status = pci_enable_pcie_error_reporting(pdev);
5786 if (!status)
5787 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
5788
5789 status = be_map_pci_bars(adapter);
5790 if (status)
5791 goto free_netdev;
5792
5793 status = be_drv_init(adapter);
5794 if (status)
5795 goto unmap_bars;
5796
5797 status = be_setup(adapter);
5798 if (status)
5799 goto drv_cleanup;
5800
5801 be_netdev_init(netdev);
5802 status = register_netdev(netdev);
5803 if (status != 0)
5804 goto unsetup;
5805
5806 be_roce_dev_add(adapter);
5807
5808 be_schedule_err_detection(adapter);
5809
5810
5811 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
5812 adapter->hwmon_info.hwmon_dev =
5813 devm_hwmon_device_register_with_groups(&pdev->dev,
5814 DRV_NAME,
5815 adapter,
5816 be_hwmon_groups);
5817 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5818 }
5819
5820 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5821 func_name(adapter), mc_name(adapter), adapter->port_name);
5822
5823 return 0;
5824
5825unsetup:
5826 be_clear(adapter);
5827drv_cleanup:
5828 be_drv_cleanup(adapter);
5829unmap_bars:
5830 be_unmap_pci_bars(adapter);
5831free_netdev:
5832 free_netdev(netdev);
5833rel_reg:
5834 pci_release_regions(pdev);
5835disable_dev:
5836 pci_disable_device(pdev);
5837do_none:
5838 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5839 return status;
5840}
5841
5842static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5843{
5844 struct be_adapter *adapter = pci_get_drvdata(pdev);
5845
5846 if (adapter->wol_en)
5847 be_setup_wol(adapter, true);
5848
5849 be_intr_set(adapter, false);
5850 be_cancel_err_detection(adapter);
5851
5852 be_cleanup(adapter);
5853
5854 pci_save_state(pdev);
5855 pci_disable_device(pdev);
5856 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5857 return 0;
5858}
5859
5860static int be_pci_resume(struct pci_dev *pdev)
5861{
5862 struct be_adapter *adapter = pci_get_drvdata(pdev);
5863 int status = 0;
5864
5865 status = pci_enable_device(pdev);
5866 if (status)
5867 return status;
5868
5869 pci_set_power_state(pdev, PCI_D0);
5870 pci_restore_state(pdev);
5871
5872 status = be_resume(adapter);
5873 if (status)
5874 return status;
5875
5876 be_schedule_err_detection(adapter);
5877
5878 if (adapter->wol_en)
5879 be_setup_wol(adapter, false);
5880
5881 return 0;
5882}
5883
5884
5885
5886
5887static void be_shutdown(struct pci_dev *pdev)
5888{
5889 struct be_adapter *adapter = pci_get_drvdata(pdev);
5890
5891 if (!adapter)
5892 return;
5893
5894 be_roce_dev_shutdown(adapter);
5895 cancel_delayed_work_sync(&adapter->work);
5896 be_cancel_err_detection(adapter);
5897
5898 netif_device_detach(adapter->netdev);
5899
5900 be_cmd_reset_function(adapter);
5901
5902 pci_disable_device(pdev);
5903}
5904
5905static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5906 pci_channel_state_t state)
5907{
5908 struct be_adapter *adapter = pci_get_drvdata(pdev);
5909
5910 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5911
5912 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5913 be_set_error(adapter, BE_ERROR_EEH);
5914
5915 be_cancel_err_detection(adapter);
5916
5917 be_cleanup(adapter);
5918 }
5919
5920 if (state == pci_channel_io_perm_failure)
5921 return PCI_ERS_RESULT_DISCONNECT;
5922
5923 pci_disable_device(pdev);
5924
5925
5926
5927
5928
5929
5930
5931 if (pdev->devfn == 0)
5932 ssleep(30);
5933
5934 return PCI_ERS_RESULT_NEED_RESET;
5935}
5936
5937static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5938{
5939 struct be_adapter *adapter = pci_get_drvdata(pdev);
5940 int status;
5941
5942 dev_info(&adapter->pdev->dev, "EEH reset\n");
5943
5944 status = pci_enable_device(pdev);
5945 if (status)
5946 return PCI_ERS_RESULT_DISCONNECT;
5947
5948 pci_set_master(pdev);
5949 pci_set_power_state(pdev, PCI_D0);
5950 pci_restore_state(pdev);
5951
5952
5953 dev_info(&adapter->pdev->dev,
5954 "Waiting for FW to be ready after EEH reset\n");
5955 status = be_fw_wait_ready(adapter);
5956 if (status)
5957 return PCI_ERS_RESULT_DISCONNECT;
5958
5959 pci_cleanup_aer_uncorrect_error_status(pdev);
5960 be_clear_error(adapter, BE_CLEAR_ALL);
5961 return PCI_ERS_RESULT_RECOVERED;
5962}
5963
5964static void be_eeh_resume(struct pci_dev *pdev)
5965{
5966 int status = 0;
5967 struct be_adapter *adapter = pci_get_drvdata(pdev);
5968
5969 dev_info(&adapter->pdev->dev, "EEH resume\n");
5970
5971 pci_save_state(pdev);
5972
5973 status = be_resume(adapter);
5974 if (status)
5975 goto err;
5976
5977 be_schedule_err_detection(adapter);
5978 return;
5979err:
5980 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
5981}
5982
5983static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5984{
5985 struct be_adapter *adapter = pci_get_drvdata(pdev);
5986 u16 num_vf_qs;
5987 int status;
5988
5989 if (!num_vfs)
5990 be_vf_clear(adapter);
5991
5992 adapter->num_vfs = num_vfs;
5993
5994 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5995 dev_warn(&pdev->dev,
5996 "Cannot disable VFs while they are assigned\n");
5997 return -EBUSY;
5998 }
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6009 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
6010 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6011 adapter->num_vfs, num_vf_qs);
6012 if (status)
6013 dev_err(&pdev->dev,
6014 "Failed to optimize SR-IOV resources\n");
6015 }
6016
6017 status = be_get_resources(adapter);
6018 if (status)
6019 return be_cmd_status(status);
6020
6021
6022 rtnl_lock();
6023 status = be_update_queues(adapter);
6024 rtnl_unlock();
6025 if (status)
6026 return be_cmd_status(status);
6027
6028 if (adapter->num_vfs)
6029 status = be_vf_setup(adapter);
6030
6031 if (!status)
6032 return adapter->num_vfs;
6033
6034 return 0;
6035}
6036
6037static const struct pci_error_handlers be_eeh_handlers = {
6038 .error_detected = be_eeh_err_detected,
6039 .slot_reset = be_eeh_reset,
6040 .resume = be_eeh_resume,
6041};
6042
6043static struct pci_driver be_driver = {
6044 .name = DRV_NAME,
6045 .id_table = be_dev_ids,
6046 .probe = be_probe,
6047 .remove = be_remove,
6048 .suspend = be_suspend,
6049 .resume = be_pci_resume,
6050 .shutdown = be_shutdown,
6051 .sriov_configure = be_pci_sriov_configure,
6052 .err_handler = &be_eeh_handlers
6053};
6054
6055static int __init be_init_module(void)
6056{
6057 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6058 rx_frag_size != 2048) {
6059 printk(KERN_WARNING DRV_NAME
6060 " : Module param rx_frag_size must be 2048/4096/8192."
6061 " Using 2048\n");
6062 rx_frag_size = 2048;
6063 }
6064
6065 if (num_vfs > 0) {
6066 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6067 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6068 }
6069
6070 return pci_register_driver(&be_driver);
6071}
6072module_init(be_init_module);
6073
6074static void __exit be_exit_module(void)
6075{
6076 pci_unregister_driver(&be_driver);
6077}
6078module_exit(be_exit_module);
6079