1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/prefetch.h>
19#include <linux/module.h>
20#include "be.h"
21#include "be_cmds.h"
22#include <asm/div64.h>
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
30static unsigned int num_vfs;
31module_param(num_vfs, uint, S_IRUGO);
32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
49
50static const char * const ue_status_low_desc[] = {
51 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84
85static const char * const ue_status_hi_desc[] = {
86 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
109 "NETC",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
119
120
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va) {
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
133 mem->va = NULL;
134 }
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
148 if (!mem->va)
149 return -ENOMEM;
150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
154static void be_intr_set(struct be_adapter *adapter, bool enable)
155{
156 u32 reg, enabled;
157
158 if (adapter->eeh_err)
159 return;
160
161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 ®);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165 if (!enabled && enable)
166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 else if (enabled && !enable)
168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169 else
170 return;
171
172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174}
175
176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182 wmb();
183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
184}
185
186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192 wmb();
193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194}
195
196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204 if (adapter->eeh_err)
205 return;
206
207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
214}
215
216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223 if (adapter->eeh_err)
224 return;
225
226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
230}
231
232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
237 u8 current_mac[ETH_ALEN];
238 u32 pmac_id = adapter->pmac_id[0];
239
240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
243 status = be_cmd_mac_addr_query(adapter, current_mac,
244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
246 if (status)
247 goto err;
248
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251 adapter->if_handle, &adapter->pmac_id[0], 0);
252 if (status)
253 goto err;
254
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261 return status;
262}
263
264static void populate_be2_stats(struct be_adapter *adapter)
265{
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269 struct be_port_rxf_stats_v0 *port_stats =
270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
272
273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
301 drvs->jabber_events = rxf_stats->port1_jabber_events;
302 else
303 drvs->jabber_events = rxf_stats->port0_jabber_events;
304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318 struct be_port_rxf_stats_v1 *port_stats =
319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
321
322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
360
361 struct be_drv_stats *drvs = &adapter->drv_stats;
362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390 drvs->jabber_events = pport_stats->rx_jabbers;
391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393 drvs->rx_drops_too_many_frags =
394 pport_stats->rx_drops_too_many_frags_lo;
395}
396
397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
409void be_parse_stats(struct be_adapter *adapter)
410{
411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
421 populate_be2_stats(adapter);
422 }
423
424
425 for_all_rx_queues(adapter, rxo, i) {
426
427
428
429 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431 }
432}
433
434static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435 struct rtnl_link_stats64 *stats)
436{
437 struct be_adapter *adapter = netdev_priv(netdev);
438 struct be_drv_stats *drvs = &adapter->drv_stats;
439 struct be_rx_obj *rxo;
440 struct be_tx_obj *txo;
441 u64 pkts, bytes;
442 unsigned int start;
443 int i;
444
445 for_all_rx_queues(adapter, rxo, i) {
446 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447 do {
448 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449 pkts = rx_stats(rxo)->rx_pkts;
450 bytes = rx_stats(rxo)->rx_bytes;
451 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452 stats->rx_packets += pkts;
453 stats->rx_bytes += bytes;
454 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456 rx_stats(rxo)->rx_drops_no_frags;
457 }
458
459 for_all_tx_queues(adapter, txo, i) {
460 const struct be_tx_stats *tx_stats = tx_stats(txo);
461 do {
462 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463 pkts = tx_stats(txo)->tx_pkts;
464 bytes = tx_stats(txo)->tx_bytes;
465 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466 stats->tx_packets += pkts;
467 stats->tx_bytes += bytes;
468 }
469
470
471 stats->rx_errors = drvs->rx_crc_errors +
472 drvs->rx_alignment_symbol_errors +
473 drvs->rx_in_range_errors +
474 drvs->rx_out_range_errors +
475 drvs->rx_frame_too_long +
476 drvs->rx_dropped_too_small +
477 drvs->rx_dropped_too_short +
478 drvs->rx_dropped_header_too_small +
479 drvs->rx_dropped_tcp_length +
480 drvs->rx_dropped_runt;
481
482
483 stats->rx_length_errors = drvs->rx_in_range_errors +
484 drvs->rx_out_range_errors +
485 drvs->rx_frame_too_long;
486
487 stats->rx_crc_errors = drvs->rx_crc_errors;
488
489
490 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
491
492
493
494 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
495 drvs->rx_input_fifo_overflow_drop +
496 drvs->rx_drops_no_pbuf;
497 return stats;
498}
499
500void be_link_status_update(struct be_adapter *adapter, u8 link_status)
501{
502 struct net_device *netdev = adapter->netdev;
503
504 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
505 netif_carrier_off(netdev);
506 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
507 }
508
509 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510 netif_carrier_on(netdev);
511 else
512 netif_carrier_off(netdev);
513}
514
515static void be_tx_stats_update(struct be_tx_obj *txo,
516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
517{
518 struct be_tx_stats *stats = tx_stats(txo);
519
520 u64_stats_update_begin(&stats->sync);
521 stats->tx_reqs++;
522 stats->tx_wrbs += wrb_cnt;
523 stats->tx_bytes += copied;
524 stats->tx_pkts += (gso_segs ? gso_segs : 1);
525 if (stopped)
526 stats->tx_stops++;
527 u64_stats_update_end(&stats->sync);
528}
529
530
531static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532 bool *dummy)
533{
534 int cnt = (skb->len > skb->data_len);
535
536 cnt += skb_shinfo(skb)->nr_frags;
537
538
539 cnt++;
540 if (lancer_chip(adapter) || !(cnt & 1)) {
541 *dummy = false;
542 } else {
543
544 cnt++;
545 *dummy = true;
546 }
547 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548 return cnt;
549}
550
551static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552{
553 wrb->frag_pa_hi = upper_32_bits(addr);
554 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556}
557
558static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559 struct sk_buff *skb)
560{
561 u8 vlan_prio;
562 u16 vlan_tag;
563
564 vlan_tag = vlan_tx_tag_get(skb);
565 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566
567 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569 adapter->recommended_prio;
570
571 return vlan_tag;
572}
573
574static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
575 struct sk_buff *skb, u32 wrb_cnt, u32 len)
576{
577 u16 vlan_tag;
578
579 memset(hdr, 0, sizeof(*hdr));
580
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582
583 if (skb_is_gso(skb)) {
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
586 hdr, skb_shinfo(skb)->gso_size);
587 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
589 if (lancer_chip(adapter) && adapter->sli_family ==
590 LANCER_A0_SLI_FAMILY) {
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592 if (is_tcp_pkt(skb))
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594 tcpcs, hdr, 1);
595 else if (is_udp_pkt(skb))
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
597 udpcs, hdr, 1);
598 }
599 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600 if (is_tcp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
602 else if (is_udp_pkt(skb))
603 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
604 }
605
606 if (vlan_tx_tag_present(skb)) {
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
608 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
609 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
610 }
611
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
616}
617
618static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
619 bool unmap_single)
620{
621 dma_addr_t dma;
622
623 be_dws_le_to_cpu(wrb, sizeof(*wrb));
624
625 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
626 if (wrb->frag_len) {
627 if (unmap_single)
628 dma_unmap_single(dev, dma, wrb->frag_len,
629 DMA_TO_DEVICE);
630 else
631 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
632 }
633}
634
635static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
636 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
637{
638 dma_addr_t busaddr;
639 int i, copied = 0;
640 struct device *dev = &adapter->pdev->dev;
641 struct sk_buff *first_skb = skb;
642 struct be_eth_wrb *wrb;
643 struct be_eth_hdr_wrb *hdr;
644 bool map_single = false;
645 u16 map_head;
646
647 hdr = queue_head_node(txq);
648 queue_head_inc(txq);
649 map_head = txq->head;
650
651 if (skb->len > skb->data_len) {
652 int len = skb_headlen(skb);
653 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
654 if (dma_mapping_error(dev, busaddr))
655 goto dma_err;
656 map_single = true;
657 wrb = queue_head_node(txq);
658 wrb_fill(wrb, busaddr, len);
659 be_dws_cpu_to_le(wrb, sizeof(*wrb));
660 queue_head_inc(txq);
661 copied += len;
662 }
663
664 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
665 const struct skb_frag_struct *frag =
666 &skb_shinfo(skb)->frags[i];
667 busaddr = skb_frag_dma_map(dev, frag, 0,
668 skb_frag_size(frag), DMA_TO_DEVICE);
669 if (dma_mapping_error(dev, busaddr))
670 goto dma_err;
671 wrb = queue_head_node(txq);
672 wrb_fill(wrb, busaddr, skb_frag_size(frag));
673 be_dws_cpu_to_le(wrb, sizeof(*wrb));
674 queue_head_inc(txq);
675 copied += skb_frag_size(frag);
676 }
677
678 if (dummy_wrb) {
679 wrb = queue_head_node(txq);
680 wrb_fill(wrb, 0, 0);
681 be_dws_cpu_to_le(wrb, sizeof(*wrb));
682 queue_head_inc(txq);
683 }
684
685 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
686 be_dws_cpu_to_le(hdr, sizeof(*hdr));
687
688 return copied;
689dma_err:
690 txq->head = map_head;
691 while (copied) {
692 wrb = queue_head_node(txq);
693 unmap_tx_frag(dev, wrb, map_single);
694 map_single = false;
695 copied -= wrb->frag_len;
696 queue_head_inc(txq);
697 }
698 return 0;
699}
700
701static netdev_tx_t be_xmit(struct sk_buff *skb,
702 struct net_device *netdev)
703{
704 struct be_adapter *adapter = netdev_priv(netdev);
705 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
706 struct be_queue_info *txq = &txo->q;
707 u32 wrb_cnt = 0, copied = 0;
708 u32 start = txq->head;
709 bool dummy_wrb, stopped = false;
710
711
712
713
714
715
716
717 if (unlikely(vlan_tx_tag_present(skb) &&
718 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
719 skb = skb_share_check(skb, GFP_ATOMIC);
720 if (unlikely(!skb))
721 goto tx_drop;
722
723 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
724 if (unlikely(!skb))
725 goto tx_drop;
726
727 skb->vlan_tci = 0;
728 }
729
730 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
731
732 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
733 if (copied) {
734
735 BUG_ON(txo->sent_skb_list[start]);
736 txo->sent_skb_list[start] = skb;
737
738
739
740
741
742 atomic_add(wrb_cnt, &txq->used);
743 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
744 txq->len) {
745 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
746 stopped = true;
747 }
748
749 be_txq_notify(adapter, txq->id, wrb_cnt);
750
751 be_tx_stats_update(txo, wrb_cnt, copied,
752 skb_shinfo(skb)->gso_segs, stopped);
753 } else {
754 txq->head = start;
755 dev_kfree_skb_any(skb);
756 }
757tx_drop:
758 return NETDEV_TX_OK;
759}
760
761static int be_change_mtu(struct net_device *netdev, int new_mtu)
762{
763 struct be_adapter *adapter = netdev_priv(netdev);
764 if (new_mtu < BE_MIN_MTU ||
765 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
766 (ETH_HLEN + ETH_FCS_LEN))) {
767 dev_info(&adapter->pdev->dev,
768 "MTU must be between %d and %d bytes\n",
769 BE_MIN_MTU,
770 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
771 return -EINVAL;
772 }
773 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
774 netdev->mtu, new_mtu);
775 netdev->mtu = new_mtu;
776 return 0;
777}
778
779
780
781
782
783static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
784{
785 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
786 u16 vtag[BE_NUM_VLANS_SUPPORTED];
787 u16 ntags = 0, i;
788 int status = 0;
789
790 if (vf) {
791 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
792 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
793 1, 1, 0);
794 }
795
796
797 if (adapter->promiscuous)
798 return 0;
799
800 if (adapter->vlans_added <= adapter->max_vlans) {
801
802 for (i = 0; i < VLAN_N_VID; i++) {
803 if (adapter->vlan_tag[i]) {
804 vtag[ntags] = cpu_to_le16(i);
805 ntags++;
806 }
807 }
808 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809 vtag, ntags, 1, 0);
810 } else {
811 status = be_cmd_vlan_config(adapter, adapter->if_handle,
812 NULL, 0, 1, 1);
813 }
814
815 return status;
816}
817
818static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
819{
820 struct be_adapter *adapter = netdev_priv(netdev);
821 int status = 0;
822
823 if (!be_physfn(adapter)) {
824 status = -EINVAL;
825 goto ret;
826 }
827
828 adapter->vlan_tag[vid] = 1;
829 if (adapter->vlans_added <= (adapter->max_vlans + 1))
830 status = be_vid_config(adapter, false, 0);
831
832 if (!status)
833 adapter->vlans_added++;
834 else
835 adapter->vlan_tag[vid] = 0;
836ret:
837 return status;
838}
839
840static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
841{
842 struct be_adapter *adapter = netdev_priv(netdev);
843 int status = 0;
844
845 if (!be_physfn(adapter)) {
846 status = -EINVAL;
847 goto ret;
848 }
849
850 adapter->vlan_tag[vid] = 0;
851 if (adapter->vlans_added <= adapter->max_vlans)
852 status = be_vid_config(adapter, false, 0);
853
854 if (!status)
855 adapter->vlans_added--;
856 else
857 adapter->vlan_tag[vid] = 1;
858ret:
859 return status;
860}
861
862static void be_set_rx_mode(struct net_device *netdev)
863{
864 struct be_adapter *adapter = netdev_priv(netdev);
865
866 if (netdev->flags & IFF_PROMISC) {
867 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
868 adapter->promiscuous = true;
869 goto done;
870 }
871
872
873 if (adapter->promiscuous) {
874 adapter->promiscuous = false;
875 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
876
877 if (adapter->vlans_added)
878 be_vid_config(adapter, false, 0);
879 }
880
881
882 if (netdev->flags & IFF_ALLMULTI ||
883 netdev_mc_count(netdev) > BE_MAX_MC) {
884 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
885 goto done;
886 }
887
888 if (netdev_uc_count(netdev) != adapter->uc_macs) {
889 struct netdev_hw_addr *ha;
890 int i = 1;
891
892 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
893 be_cmd_pmac_del(adapter, adapter->if_handle,
894 adapter->pmac_id[i], 0);
895 }
896
897 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
898 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
899 adapter->promiscuous = true;
900 goto done;
901 }
902
903 netdev_for_each_uc_addr(ha, adapter->netdev) {
904 adapter->uc_macs++;
905 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
906 adapter->if_handle,
907 &adapter->pmac_id[adapter->uc_macs], 0);
908 }
909 }
910
911 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
912done:
913 return;
914}
915
916static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
917{
918 struct be_adapter *adapter = netdev_priv(netdev);
919 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
920 int status;
921
922 if (!sriov_enabled(adapter))
923 return -EPERM;
924
925 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
926 return -EINVAL;
927
928 if (lancer_chip(adapter)) {
929 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
930 } else {
931 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
932 vf_cfg->pmac_id, vf + 1);
933
934 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
935 &vf_cfg->pmac_id, vf + 1);
936 }
937
938 if (status)
939 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
940 mac, vf);
941 else
942 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
943
944 return status;
945}
946
947static int be_get_vf_config(struct net_device *netdev, int vf,
948 struct ifla_vf_info *vi)
949{
950 struct be_adapter *adapter = netdev_priv(netdev);
951 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
952
953 if (!sriov_enabled(adapter))
954 return -EPERM;
955
956 if (vf >= adapter->num_vfs)
957 return -EINVAL;
958
959 vi->vf = vf;
960 vi->tx_rate = vf_cfg->tx_rate;
961 vi->vlan = vf_cfg->vlan_tag;
962 vi->qos = 0;
963 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
964
965 return 0;
966}
967
968static int be_set_vf_vlan(struct net_device *netdev,
969 int vf, u16 vlan, u8 qos)
970{
971 struct be_adapter *adapter = netdev_priv(netdev);
972 int status = 0;
973
974 if (!sriov_enabled(adapter))
975 return -EPERM;
976
977 if (vf >= adapter->num_vfs || vlan > 4095)
978 return -EINVAL;
979
980 if (vlan) {
981 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
982
983 adapter->vf_cfg[vf].vlan_tag = vlan;
984
985 status = be_cmd_set_hsw_config(adapter, vlan,
986 vf + 1, adapter->vf_cfg[vf].if_handle);
987 }
988 } else {
989
990 adapter->vf_cfg[vf].vlan_tag = 0;
991 vlan = adapter->vf_cfg[vf].def_vid;
992 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
993 adapter->vf_cfg[vf].if_handle);
994 }
995
996
997 if (status)
998 dev_info(&adapter->pdev->dev,
999 "VLAN %d config on VF %d failed\n", vlan, vf);
1000 return status;
1001}
1002
1003static int be_set_vf_tx_rate(struct net_device *netdev,
1004 int vf, int rate)
1005{
1006 struct be_adapter *adapter = netdev_priv(netdev);
1007 int status = 0;
1008
1009 if (!sriov_enabled(adapter))
1010 return -EPERM;
1011
1012 if (vf >= adapter->num_vfs)
1013 return -EINVAL;
1014
1015 if (rate < 100 || rate > 10000) {
1016 dev_err(&adapter->pdev->dev,
1017 "tx rate must be between 100 and 10000 Mbps\n");
1018 return -EINVAL;
1019 }
1020
1021 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1022
1023 if (status)
1024 dev_err(&adapter->pdev->dev,
1025 "tx rate %d on VF %d failed\n", rate, vf);
1026 else
1027 adapter->vf_cfg[vf].tx_rate = rate;
1028 return status;
1029}
1030
1031static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1032{
1033 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1034 ulong now = jiffies;
1035 ulong delta = now - stats->rx_jiffies;
1036 u64 pkts;
1037 unsigned int start, eqd;
1038
1039 if (!eqo->enable_aic) {
1040 eqd = eqo->eqd;
1041 goto modify_eqd;
1042 }
1043
1044 if (eqo->idx >= adapter->num_rx_qs)
1045 return;
1046
1047 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1048
1049
1050 if (time_before(now, stats->rx_jiffies)) {
1051 stats->rx_jiffies = now;
1052 return;
1053 }
1054
1055
1056 if (delta < HZ)
1057 return;
1058
1059 do {
1060 start = u64_stats_fetch_begin_bh(&stats->sync);
1061 pkts = stats->rx_pkts;
1062 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1063
1064 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1065 stats->rx_pkts_prev = pkts;
1066 stats->rx_jiffies = now;
1067 eqd = (stats->rx_pps / 110000) << 3;
1068 eqd = min(eqd, eqo->max_eqd);
1069 eqd = max(eqd, eqo->min_eqd);
1070 if (eqd < 10)
1071 eqd = 0;
1072
1073modify_eqd:
1074 if (eqd != eqo->cur_eqd) {
1075 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1076 eqo->cur_eqd = eqd;
1077 }
1078}
1079
1080static void be_rx_stats_update(struct be_rx_obj *rxo,
1081 struct be_rx_compl_info *rxcp)
1082{
1083 struct be_rx_stats *stats = rx_stats(rxo);
1084
1085 u64_stats_update_begin(&stats->sync);
1086 stats->rx_compl++;
1087 stats->rx_bytes += rxcp->pkt_size;
1088 stats->rx_pkts++;
1089 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1090 stats->rx_mcast_pkts++;
1091 if (rxcp->err)
1092 stats->rx_compl_err++;
1093 u64_stats_update_end(&stats->sync);
1094}
1095
1096static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1097{
1098
1099
1100 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1101 (rxcp->ip_csum || rxcp->ipv6);
1102}
1103
1104static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1105 u16 frag_idx)
1106{
1107 struct be_adapter *adapter = rxo->adapter;
1108 struct be_rx_page_info *rx_page_info;
1109 struct be_queue_info *rxq = &rxo->q;
1110
1111 rx_page_info = &rxo->page_info_tbl[frag_idx];
1112 BUG_ON(!rx_page_info->page);
1113
1114 if (rx_page_info->last_page_user) {
1115 dma_unmap_page(&adapter->pdev->dev,
1116 dma_unmap_addr(rx_page_info, bus),
1117 adapter->big_page_size, DMA_FROM_DEVICE);
1118 rx_page_info->last_page_user = false;
1119 }
1120
1121 atomic_dec(&rxq->used);
1122 return rx_page_info;
1123}
1124
1125
1126static void be_rx_compl_discard(struct be_rx_obj *rxo,
1127 struct be_rx_compl_info *rxcp)
1128{
1129 struct be_queue_info *rxq = &rxo->q;
1130 struct be_rx_page_info *page_info;
1131 u16 i, num_rcvd = rxcp->num_rcvd;
1132
1133 for (i = 0; i < num_rcvd; i++) {
1134 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1135 put_page(page_info->page);
1136 memset(page_info, 0, sizeof(*page_info));
1137 index_inc(&rxcp->rxq_idx, rxq->len);
1138 }
1139}
1140
1141
1142
1143
1144
1145static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1146 struct be_rx_compl_info *rxcp)
1147{
1148 struct be_queue_info *rxq = &rxo->q;
1149 struct be_rx_page_info *page_info;
1150 u16 i, j;
1151 u16 hdr_len, curr_frag_len, remaining;
1152 u8 *start;
1153
1154 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1155 start = page_address(page_info->page) + page_info->page_offset;
1156 prefetch(start);
1157
1158
1159 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1160
1161
1162 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1163 memcpy(skb->data, start, hdr_len);
1164 skb->len = curr_frag_len;
1165 if (curr_frag_len <= BE_HDR_LEN) {
1166
1167 put_page(page_info->page);
1168 skb->data_len = 0;
1169 skb->tail += curr_frag_len;
1170 } else {
1171 skb_shinfo(skb)->nr_frags = 1;
1172 skb_frag_set_page(skb, 0, page_info->page);
1173 skb_shinfo(skb)->frags[0].page_offset =
1174 page_info->page_offset + hdr_len;
1175 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1176 skb->data_len = curr_frag_len - hdr_len;
1177 skb->truesize += rx_frag_size;
1178 skb->tail += hdr_len;
1179 }
1180 page_info->page = NULL;
1181
1182 if (rxcp->pkt_size <= rx_frag_size) {
1183 BUG_ON(rxcp->num_rcvd != 1);
1184 return;
1185 }
1186
1187
1188 index_inc(&rxcp->rxq_idx, rxq->len);
1189 remaining = rxcp->pkt_size - curr_frag_len;
1190 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1191 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1192 curr_frag_len = min(remaining, rx_frag_size);
1193
1194
1195 if (page_info->page_offset == 0) {
1196
1197 j++;
1198 skb_frag_set_page(skb, j, page_info->page);
1199 skb_shinfo(skb)->frags[j].page_offset =
1200 page_info->page_offset;
1201 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1202 skb_shinfo(skb)->nr_frags++;
1203 } else {
1204 put_page(page_info->page);
1205 }
1206
1207 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1208 skb->len += curr_frag_len;
1209 skb->data_len += curr_frag_len;
1210 skb->truesize += rx_frag_size;
1211 remaining -= curr_frag_len;
1212 index_inc(&rxcp->rxq_idx, rxq->len);
1213 page_info->page = NULL;
1214 }
1215 BUG_ON(j > MAX_SKB_FRAGS);
1216}
1217
1218
1219static void be_rx_compl_process(struct be_rx_obj *rxo,
1220 struct be_rx_compl_info *rxcp)
1221{
1222 struct be_adapter *adapter = rxo->adapter;
1223 struct net_device *netdev = adapter->netdev;
1224 struct sk_buff *skb;
1225
1226 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1227 if (unlikely(!skb)) {
1228 rx_stats(rxo)->rx_drops_no_skbs++;
1229 be_rx_compl_discard(rxo, rxcp);
1230 return;
1231 }
1232
1233 skb_fill_rx_data(rxo, skb, rxcp);
1234
1235 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1236 skb->ip_summed = CHECKSUM_UNNECESSARY;
1237 else
1238 skb_checksum_none_assert(skb);
1239
1240 skb->protocol = eth_type_trans(skb, netdev);
1241 if (netdev->features & NETIF_F_RXHASH)
1242 skb->rxhash = rxcp->rss_hash;
1243
1244
1245 if (rxcp->vlanf)
1246 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1247
1248 netif_receive_skb(skb);
1249}
1250
1251
1252void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1253 struct be_rx_compl_info *rxcp)
1254{
1255 struct be_adapter *adapter = rxo->adapter;
1256 struct be_rx_page_info *page_info;
1257 struct sk_buff *skb = NULL;
1258 struct be_queue_info *rxq = &rxo->q;
1259 u16 remaining, curr_frag_len;
1260 u16 i, j;
1261
1262 skb = napi_get_frags(napi);
1263 if (!skb) {
1264 be_rx_compl_discard(rxo, rxcp);
1265 return;
1266 }
1267
1268 remaining = rxcp->pkt_size;
1269 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1270 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1271
1272 curr_frag_len = min(remaining, rx_frag_size);
1273
1274
1275 if (i == 0 || page_info->page_offset == 0) {
1276
1277 j++;
1278 skb_frag_set_page(skb, j, page_info->page);
1279 skb_shinfo(skb)->frags[j].page_offset =
1280 page_info->page_offset;
1281 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1282 } else {
1283 put_page(page_info->page);
1284 }
1285 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1286 skb->truesize += rx_frag_size;
1287 remaining -= curr_frag_len;
1288 index_inc(&rxcp->rxq_idx, rxq->len);
1289 memset(page_info, 0, sizeof(*page_info));
1290 }
1291 BUG_ON(j > MAX_SKB_FRAGS);
1292
1293 skb_shinfo(skb)->nr_frags = j + 1;
1294 skb->len = rxcp->pkt_size;
1295 skb->data_len = rxcp->pkt_size;
1296 skb->ip_summed = CHECKSUM_UNNECESSARY;
1297 if (adapter->netdev->features & NETIF_F_RXHASH)
1298 skb->rxhash = rxcp->rss_hash;
1299
1300 if (rxcp->vlanf)
1301 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1302
1303 napi_gro_frags(napi);
1304}
1305
1306static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
1308{
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1333 compl);
1334 }
1335 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1336}
1337
1338static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1339 struct be_rx_compl_info *rxcp)
1340{
1341 rxcp->pkt_size =
1342 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1343 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1344 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1345 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1346 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1347 rxcp->ip_csum =
1348 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1349 rxcp->l4_csum =
1350 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1351 rxcp->ipv6 =
1352 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1353 rxcp->rxq_idx =
1354 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1355 rxcp->num_rcvd =
1356 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1357 rxcp->pkt_type =
1358 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1359 rxcp->rss_hash =
1360 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1361 if (rxcp->vlanf) {
1362 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1363 compl);
1364 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1365 compl);
1366 }
1367 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1368}
1369
1370static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1371{
1372 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1373 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1374 struct be_adapter *adapter = rxo->adapter;
1375
1376
1377
1378 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1379 return NULL;
1380
1381 rmb();
1382 be_dws_le_to_cpu(compl, sizeof(*compl));
1383
1384 if (adapter->be3_native)
1385 be_parse_rx_compl_v1(compl, rxcp);
1386 else
1387 be_parse_rx_compl_v0(compl, rxcp);
1388
1389 if (rxcp->vlanf) {
1390
1391
1392 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1393 rxcp->vlanf = 0;
1394
1395 if (!lancer_chip(adapter))
1396 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1397
1398 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1399 !adapter->vlan_tag[rxcp->vlan_tag])
1400 rxcp->vlanf = 0;
1401 }
1402
1403
1404 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1405
1406 queue_tail_inc(&rxo->cq);
1407 return rxcp;
1408}
1409
1410static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1411{
1412 u32 order = get_order(size);
1413
1414 if (order > 0)
1415 gfp |= __GFP_COMP;
1416 return alloc_pages(gfp, order);
1417}
1418
1419
1420
1421
1422
1423static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1424{
1425 struct be_adapter *adapter = rxo->adapter;
1426 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1427 struct be_queue_info *rxq = &rxo->q;
1428 struct page *pagep = NULL;
1429 struct be_eth_rx_d *rxd;
1430 u64 page_dmaaddr = 0, frag_dmaaddr;
1431 u32 posted, page_offset = 0;
1432
1433 page_info = &rxo->page_info_tbl[rxq->head];
1434 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1435 if (!pagep) {
1436 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1437 if (unlikely(!pagep)) {
1438 rx_stats(rxo)->rx_post_fail++;
1439 break;
1440 }
1441 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1442 0, adapter->big_page_size,
1443 DMA_FROM_DEVICE);
1444 page_info->page_offset = 0;
1445 } else {
1446 get_page(pagep);
1447 page_info->page_offset = page_offset + rx_frag_size;
1448 }
1449 page_offset = page_info->page_offset;
1450 page_info->page = pagep;
1451 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1452 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1453
1454 rxd = queue_head_node(rxq);
1455 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1456 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1457
1458
1459 if ((page_offset + rx_frag_size + rx_frag_size) >
1460 adapter->big_page_size) {
1461 pagep = NULL;
1462 page_info->last_page_user = true;
1463 }
1464
1465 prev_page_info = page_info;
1466 queue_head_inc(rxq);
1467 page_info = &rxo->page_info_tbl[rxq->head];
1468 }
1469 if (pagep)
1470 prev_page_info->last_page_user = true;
1471
1472 if (posted) {
1473 atomic_add(posted, &rxq->used);
1474 be_rxq_notify(adapter, rxq->id, posted);
1475 } else if (atomic_read(&rxq->used) == 0) {
1476
1477 rxo->rx_post_starved = true;
1478 }
1479}
1480
1481static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1482{
1483 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1484
1485 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1486 return NULL;
1487
1488 rmb();
1489 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1490
1491 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1492
1493 queue_tail_inc(tx_cq);
1494 return txcp;
1495}
1496
1497static u16 be_tx_compl_process(struct be_adapter *adapter,
1498 struct be_tx_obj *txo, u16 last_index)
1499{
1500 struct be_queue_info *txq = &txo->q;
1501 struct be_eth_wrb *wrb;
1502 struct sk_buff **sent_skbs = txo->sent_skb_list;
1503 struct sk_buff *sent_skb;
1504 u16 cur_index, num_wrbs = 1;
1505 bool unmap_skb_hdr = true;
1506
1507 sent_skb = sent_skbs[txq->tail];
1508 BUG_ON(!sent_skb);
1509 sent_skbs[txq->tail] = NULL;
1510
1511
1512 queue_tail_inc(txq);
1513
1514 do {
1515 cur_index = txq->tail;
1516 wrb = queue_tail_node(txq);
1517 unmap_tx_frag(&adapter->pdev->dev, wrb,
1518 (unmap_skb_hdr && skb_headlen(sent_skb)));
1519 unmap_skb_hdr = false;
1520
1521 num_wrbs++;
1522 queue_tail_inc(txq);
1523 } while (cur_index != last_index);
1524
1525 kfree_skb(sent_skb);
1526 return num_wrbs;
1527}
1528
1529
1530static inline int events_get(struct be_eq_obj *eqo)
1531{
1532 struct be_eq_entry *eqe;
1533 int num = 0;
1534
1535 do {
1536 eqe = queue_tail_node(&eqo->q);
1537 if (eqe->evt == 0)
1538 break;
1539
1540 rmb();
1541 eqe->evt = 0;
1542 num++;
1543 queue_tail_inc(&eqo->q);
1544 } while (true);
1545
1546 return num;
1547}
1548
1549static int event_handle(struct be_eq_obj *eqo)
1550{
1551 bool rearm = false;
1552 int num = events_get(eqo);
1553
1554
1555 if (!num)
1556 rearm = true;
1557
1558 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1559 if (num)
1560 napi_schedule(&eqo->napi);
1561
1562 return num;
1563}
1564
1565
1566static void be_eq_clean(struct be_eq_obj *eqo)
1567{
1568 int num = events_get(eqo);
1569
1570 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1571}
1572
1573static void be_rx_cq_clean(struct be_rx_obj *rxo)
1574{
1575 struct be_rx_page_info *page_info;
1576 struct be_queue_info *rxq = &rxo->q;
1577 struct be_queue_info *rx_cq = &rxo->cq;
1578 struct be_rx_compl_info *rxcp;
1579 u16 tail;
1580
1581
1582 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1583 be_rx_compl_discard(rxo, rxcp);
1584 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1585 }
1586
1587
1588 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1589 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1590 page_info = get_rx_page_info(rxo, tail);
1591 put_page(page_info->page);
1592 memset(page_info, 0, sizeof(*page_info));
1593 }
1594 BUG_ON(atomic_read(&rxq->used));
1595 rxq->tail = rxq->head = 0;
1596}
1597
1598static void be_tx_compl_clean(struct be_adapter *adapter)
1599{
1600 struct be_tx_obj *txo;
1601 struct be_queue_info *txq;
1602 struct be_eth_tx_compl *txcp;
1603 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1604 struct sk_buff *sent_skb;
1605 bool dummy_wrb;
1606 int i, pending_txqs;
1607
1608
1609 do {
1610 pending_txqs = adapter->num_tx_qs;
1611
1612 for_all_tx_queues(adapter, txo, i) {
1613 txq = &txo->q;
1614 while ((txcp = be_tx_compl_get(&txo->cq))) {
1615 end_idx =
1616 AMAP_GET_BITS(struct amap_eth_tx_compl,
1617 wrb_index, txcp);
1618 num_wrbs += be_tx_compl_process(adapter, txo,
1619 end_idx);
1620 cmpl++;
1621 }
1622 if (cmpl) {
1623 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1624 atomic_sub(num_wrbs, &txq->used);
1625 cmpl = 0;
1626 num_wrbs = 0;
1627 }
1628 if (atomic_read(&txq->used) == 0)
1629 pending_txqs--;
1630 }
1631
1632 if (pending_txqs == 0 || ++timeo > 200)
1633 break;
1634
1635 mdelay(1);
1636 } while (true);
1637
1638 for_all_tx_queues(adapter, txo, i) {
1639 txq = &txo->q;
1640 if (atomic_read(&txq->used))
1641 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1642 atomic_read(&txq->used));
1643
1644
1645 while (atomic_read(&txq->used)) {
1646 sent_skb = txo->sent_skb_list[txq->tail];
1647 end_idx = txq->tail;
1648 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1649 &dummy_wrb);
1650 index_adv(&end_idx, num_wrbs - 1, txq->len);
1651 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1652 atomic_sub(num_wrbs, &txq->used);
1653 }
1654 }
1655}
1656
1657static void be_evt_queues_destroy(struct be_adapter *adapter)
1658{
1659 struct be_eq_obj *eqo;
1660 int i;
1661
1662 for_all_evt_queues(adapter, eqo, i) {
1663 be_eq_clean(eqo);
1664 if (eqo->q.created)
1665 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1666 be_queue_free(adapter, &eqo->q);
1667 }
1668}
1669
1670static int be_evt_queues_create(struct be_adapter *adapter)
1671{
1672 struct be_queue_info *eq;
1673 struct be_eq_obj *eqo;
1674 int i, rc;
1675
1676 adapter->num_evt_qs = num_irqs(adapter);
1677
1678 for_all_evt_queues(adapter, eqo, i) {
1679 eqo->adapter = adapter;
1680 eqo->tx_budget = BE_TX_BUDGET;
1681 eqo->idx = i;
1682 eqo->max_eqd = BE_MAX_EQD;
1683 eqo->enable_aic = true;
1684
1685 eq = &eqo->q;
1686 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1687 sizeof(struct be_eq_entry));
1688 if (rc)
1689 return rc;
1690
1691 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1692 if (rc)
1693 return rc;
1694 }
1695 return 0;
1696}
1697
1698static void be_mcc_queues_destroy(struct be_adapter *adapter)
1699{
1700 struct be_queue_info *q;
1701
1702 q = &adapter->mcc_obj.q;
1703 if (q->created)
1704 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1705 be_queue_free(adapter, q);
1706
1707 q = &adapter->mcc_obj.cq;
1708 if (q->created)
1709 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1710 be_queue_free(adapter, q);
1711}
1712
1713
1714static int be_mcc_queues_create(struct be_adapter *adapter)
1715{
1716 struct be_queue_info *q, *cq;
1717
1718 cq = &adapter->mcc_obj.cq;
1719 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1720 sizeof(struct be_mcc_compl)))
1721 goto err;
1722
1723
1724 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1725 goto mcc_cq_free;
1726
1727 q = &adapter->mcc_obj.q;
1728 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1729 goto mcc_cq_destroy;
1730
1731 if (be_cmd_mccq_create(adapter, q, cq))
1732 goto mcc_q_free;
1733
1734 return 0;
1735
1736mcc_q_free:
1737 be_queue_free(adapter, q);
1738mcc_cq_destroy:
1739 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1740mcc_cq_free:
1741 be_queue_free(adapter, cq);
1742err:
1743 return -1;
1744}
1745
1746static void be_tx_queues_destroy(struct be_adapter *adapter)
1747{
1748 struct be_queue_info *q;
1749 struct be_tx_obj *txo;
1750 u8 i;
1751
1752 for_all_tx_queues(adapter, txo, i) {
1753 q = &txo->q;
1754 if (q->created)
1755 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1756 be_queue_free(adapter, q);
1757
1758 q = &txo->cq;
1759 if (q->created)
1760 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1761 be_queue_free(adapter, q);
1762 }
1763}
1764
1765static int be_num_txqs_want(struct be_adapter *adapter)
1766{
1767 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
1768 lancer_chip(adapter) || !be_physfn(adapter) ||
1769 adapter->generation == BE_GEN2)
1770 return 1;
1771 else
1772 return MAX_TX_QS;
1773}
1774
1775static int be_tx_cqs_create(struct be_adapter *adapter)
1776{
1777 struct be_queue_info *cq, *eq;
1778 int status;
1779 struct be_tx_obj *txo;
1780 u8 i;
1781
1782 adapter->num_tx_qs = be_num_txqs_want(adapter);
1783 if (adapter->num_tx_qs != MAX_TX_QS) {
1784 rtnl_lock();
1785 netif_set_real_num_tx_queues(adapter->netdev,
1786 adapter->num_tx_qs);
1787 rtnl_unlock();
1788 }
1789
1790 for_all_tx_queues(adapter, txo, i) {
1791 cq = &txo->cq;
1792 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1793 sizeof(struct be_eth_tx_compl));
1794 if (status)
1795 return status;
1796
1797
1798
1799
1800 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1801 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1802 if (status)
1803 return status;
1804 }
1805 return 0;
1806}
1807
1808static int be_tx_qs_create(struct be_adapter *adapter)
1809{
1810 struct be_tx_obj *txo;
1811 int i, status;
1812
1813 for_all_tx_queues(adapter, txo, i) {
1814 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1815 sizeof(struct be_eth_wrb));
1816 if (status)
1817 return status;
1818
1819 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1820 if (status)
1821 return status;
1822 }
1823
1824 return 0;
1825}
1826
1827static void be_rx_cqs_destroy(struct be_adapter *adapter)
1828{
1829 struct be_queue_info *q;
1830 struct be_rx_obj *rxo;
1831 int i;
1832
1833 for_all_rx_queues(adapter, rxo, i) {
1834 q = &rxo->cq;
1835 if (q->created)
1836 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1837 be_queue_free(adapter, q);
1838 }
1839}
1840
1841static int be_rx_cqs_create(struct be_adapter *adapter)
1842{
1843 struct be_queue_info *eq, *cq;
1844 struct be_rx_obj *rxo;
1845 int rc, i;
1846
1847
1848
1849
1850 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1851 num_irqs(adapter) + 1 : 1;
1852
1853 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1854 for_all_rx_queues(adapter, rxo, i) {
1855 rxo->adapter = adapter;
1856 cq = &rxo->cq;
1857 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1858 sizeof(struct be_eth_rx_compl));
1859 if (rc)
1860 return rc;
1861
1862 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1863 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1864 if (rc)
1865 return rc;
1866 }
1867
1868 if (adapter->num_rx_qs != MAX_RX_QS)
1869 dev_info(&adapter->pdev->dev,
1870 "Created only %d receive queues", adapter->num_rx_qs);
1871
1872 return 0;
1873}
1874
1875static irqreturn_t be_intx(int irq, void *dev)
1876{
1877 struct be_adapter *adapter = dev;
1878 int num_evts;
1879
1880
1881 num_evts = event_handle(&adapter->eq_obj[0]);
1882 if (num_evts)
1883 return IRQ_HANDLED;
1884 else
1885 return IRQ_NONE;
1886}
1887
1888static irqreturn_t be_msix(int irq, void *dev)
1889{
1890 struct be_eq_obj *eqo = dev;
1891
1892 event_handle(eqo);
1893 return IRQ_HANDLED;
1894}
1895
1896static inline bool do_gro(struct be_rx_compl_info *rxcp)
1897{
1898 return (rxcp->tcpf && !rxcp->err) ? true : false;
1899}
1900
1901static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1902 int budget)
1903{
1904 struct be_adapter *adapter = rxo->adapter;
1905 struct be_queue_info *rx_cq = &rxo->cq;
1906 struct be_rx_compl_info *rxcp;
1907 u32 work_done;
1908
1909 for (work_done = 0; work_done < budget; work_done++) {
1910 rxcp = be_rx_compl_get(rxo);
1911 if (!rxcp)
1912 break;
1913
1914
1915 if (unlikely(rxcp->num_rcvd == 0))
1916 goto loop_continue;
1917
1918
1919 if (unlikely(!rxcp->pkt_size)) {
1920 be_rx_compl_discard(rxo, rxcp);
1921 goto loop_continue;
1922 }
1923
1924
1925
1926
1927 if (unlikely(rxcp->port != adapter->port_num &&
1928 !lancer_chip(adapter))) {
1929 be_rx_compl_discard(rxo, rxcp);
1930 goto loop_continue;
1931 }
1932
1933 if (do_gro(rxcp))
1934 be_rx_compl_process_gro(rxo, napi, rxcp);
1935 else
1936 be_rx_compl_process(rxo, rxcp);
1937loop_continue:
1938 be_rx_stats_update(rxo, rxcp);
1939 }
1940
1941 if (work_done) {
1942 be_cq_notify(adapter, rx_cq->id, true, work_done);
1943
1944 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1945 be_post_rx_frags(rxo, GFP_ATOMIC);
1946 }
1947
1948 return work_done;
1949}
1950
1951static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1952 int budget, int idx)
1953{
1954 struct be_eth_tx_compl *txcp;
1955 int num_wrbs = 0, work_done;
1956
1957 for (work_done = 0; work_done < budget; work_done++) {
1958 txcp = be_tx_compl_get(&txo->cq);
1959 if (!txcp)
1960 break;
1961 num_wrbs += be_tx_compl_process(adapter, txo,
1962 AMAP_GET_BITS(struct amap_eth_tx_compl,
1963 wrb_index, txcp));
1964 }
1965
1966 if (work_done) {
1967 be_cq_notify(adapter, txo->cq.id, true, work_done);
1968 atomic_sub(num_wrbs, &txo->q.used);
1969
1970
1971
1972 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1973 atomic_read(&txo->q.used) < txo->q.len / 2) {
1974 netif_wake_subqueue(adapter->netdev, idx);
1975 }
1976
1977 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1978 tx_stats(txo)->tx_compl += work_done;
1979 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1980 }
1981 return (work_done < budget);
1982}
1983
1984int be_poll(struct napi_struct *napi, int budget)
1985{
1986 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1987 struct be_adapter *adapter = eqo->adapter;
1988 int max_work = 0, work, i;
1989 bool tx_done;
1990
1991
1992 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1993 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1994 eqo->tx_budget, i);
1995 if (!tx_done)
1996 max_work = budget;
1997 }
1998
1999
2000
2001
2002
2003 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2004 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2005 max_work = max(work, max_work);
2006 }
2007
2008 if (is_mcc_eqo(eqo))
2009 be_process_mcc(adapter);
2010
2011 if (max_work < budget) {
2012 napi_complete(napi);
2013 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2014 } else {
2015
2016 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2017 }
2018 return max_work;
2019}
2020
2021void be_detect_dump_ue(struct be_adapter *adapter)
2022{
2023 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2024 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2025 u32 i;
2026
2027 if (adapter->eeh_err || adapter->ue_detected)
2028 return;
2029
2030 if (lancer_chip(adapter)) {
2031 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2032 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2033 sliport_err1 = ioread32(adapter->db +
2034 SLIPORT_ERROR1_OFFSET);
2035 sliport_err2 = ioread32(adapter->db +
2036 SLIPORT_ERROR2_OFFSET);
2037 }
2038 } else {
2039 pci_read_config_dword(adapter->pdev,
2040 PCICFG_UE_STATUS_LOW, &ue_lo);
2041 pci_read_config_dword(adapter->pdev,
2042 PCICFG_UE_STATUS_HIGH, &ue_hi);
2043 pci_read_config_dword(adapter->pdev,
2044 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2045 pci_read_config_dword(adapter->pdev,
2046 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2047
2048 ue_lo = (ue_lo & (~ue_lo_mask));
2049 ue_hi = (ue_hi & (~ue_hi_mask));
2050 }
2051
2052 if (ue_lo || ue_hi ||
2053 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2054 adapter->ue_detected = true;
2055 adapter->eeh_err = true;
2056 dev_err(&adapter->pdev->dev,
2057 "Unrecoverable error in the card\n");
2058 }
2059
2060 if (ue_lo) {
2061 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2062 if (ue_lo & 1)
2063 dev_err(&adapter->pdev->dev,
2064 "UE: %s bit set\n", ue_status_low_desc[i]);
2065 }
2066 }
2067 if (ue_hi) {
2068 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2069 if (ue_hi & 1)
2070 dev_err(&adapter->pdev->dev,
2071 "UE: %s bit set\n", ue_status_hi_desc[i]);
2072 }
2073 }
2074
2075 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2076 dev_err(&adapter->pdev->dev,
2077 "sliport status 0x%x\n", sliport_status);
2078 dev_err(&adapter->pdev->dev,
2079 "sliport error1 0x%x\n", sliport_err1);
2080 dev_err(&adapter->pdev->dev,
2081 "sliport error2 0x%x\n", sliport_err2);
2082 }
2083}
2084
2085static void be_msix_disable(struct be_adapter *adapter)
2086{
2087 if (msix_enabled(adapter)) {
2088 pci_disable_msix(adapter->pdev);
2089 adapter->num_msix_vec = 0;
2090 }
2091}
2092
2093static uint be_num_rss_want(struct be_adapter *adapter)
2094{
2095 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2096 adapter->num_vfs == 0 && be_physfn(adapter) &&
2097 !be_is_mc(adapter))
2098 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2099 else
2100 return 0;
2101}
2102
2103static void be_msix_enable(struct be_adapter *adapter)
2104{
2105#define BE_MIN_MSIX_VECTORS 1
2106 int i, status, num_vec;
2107
2108
2109 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2110 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2111
2112 for (i = 0; i < num_vec; i++)
2113 adapter->msix_entries[i].entry = i;
2114
2115 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2116 if (status == 0) {
2117 goto done;
2118 } else if (status >= BE_MIN_MSIX_VECTORS) {
2119 num_vec = status;
2120 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2121 num_vec) == 0)
2122 goto done;
2123 }
2124 return;
2125done:
2126 adapter->num_msix_vec = num_vec;
2127 return;
2128}
2129
2130static int be_sriov_enable(struct be_adapter *adapter)
2131{
2132 be_check_sriov_fn_type(adapter);
2133
2134#ifdef CONFIG_PCI_IOV
2135 if (be_physfn(adapter) && num_vfs) {
2136 int status, pos;
2137 u16 dev_vfs;
2138
2139 pos = pci_find_ext_capability(adapter->pdev,
2140 PCI_EXT_CAP_ID_SRIOV);
2141 pci_read_config_word(adapter->pdev,
2142 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
2143
2144 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2145 if (adapter->num_vfs != num_vfs)
2146 dev_info(&adapter->pdev->dev,
2147 "Device supports %d VFs and not %d\n",
2148 adapter->num_vfs, num_vfs);
2149
2150 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2151 if (status)
2152 adapter->num_vfs = 0;
2153
2154 if (adapter->num_vfs) {
2155 adapter->vf_cfg = kcalloc(num_vfs,
2156 sizeof(struct be_vf_cfg),
2157 GFP_KERNEL);
2158 if (!adapter->vf_cfg)
2159 return -ENOMEM;
2160 }
2161 }
2162#endif
2163 return 0;
2164}
2165
2166static void be_sriov_disable(struct be_adapter *adapter)
2167{
2168#ifdef CONFIG_PCI_IOV
2169 if (sriov_enabled(adapter)) {
2170 pci_disable_sriov(adapter->pdev);
2171 kfree(adapter->vf_cfg);
2172 adapter->num_vfs = 0;
2173 }
2174#endif
2175}
2176
2177static inline int be_msix_vec_get(struct be_adapter *adapter,
2178 struct be_eq_obj *eqo)
2179{
2180 return adapter->msix_entries[eqo->idx].vector;
2181}
2182
2183static int be_msix_register(struct be_adapter *adapter)
2184{
2185 struct net_device *netdev = adapter->netdev;
2186 struct be_eq_obj *eqo;
2187 int status, i, vec;
2188
2189 for_all_evt_queues(adapter, eqo, i) {
2190 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2191 vec = be_msix_vec_get(adapter, eqo);
2192 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2193 if (status)
2194 goto err_msix;
2195 }
2196
2197 return 0;
2198err_msix:
2199 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2200 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2201 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2202 status);
2203 be_msix_disable(adapter);
2204 return status;
2205}
2206
2207static int be_irq_register(struct be_adapter *adapter)
2208{
2209 struct net_device *netdev = adapter->netdev;
2210 int status;
2211
2212 if (msix_enabled(adapter)) {
2213 status = be_msix_register(adapter);
2214 if (status == 0)
2215 goto done;
2216
2217 if (!be_physfn(adapter))
2218 return status;
2219 }
2220
2221
2222 netdev->irq = adapter->pdev->irq;
2223 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2224 adapter);
2225 if (status) {
2226 dev_err(&adapter->pdev->dev,
2227 "INTx request IRQ failed - err %d\n", status);
2228 return status;
2229 }
2230done:
2231 adapter->isr_registered = true;
2232 return 0;
2233}
2234
2235static void be_irq_unregister(struct be_adapter *adapter)
2236{
2237 struct net_device *netdev = adapter->netdev;
2238 struct be_eq_obj *eqo;
2239 int i;
2240
2241 if (!adapter->isr_registered)
2242 return;
2243
2244
2245 if (!msix_enabled(adapter)) {
2246 free_irq(netdev->irq, adapter);
2247 goto done;
2248 }
2249
2250
2251 for_all_evt_queues(adapter, eqo, i)
2252 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2253
2254done:
2255 adapter->isr_registered = false;
2256}
2257
2258static void be_rx_qs_destroy(struct be_adapter *adapter)
2259{
2260 struct be_queue_info *q;
2261 struct be_rx_obj *rxo;
2262 int i;
2263
2264 for_all_rx_queues(adapter, rxo, i) {
2265 q = &rxo->q;
2266 if (q->created) {
2267 be_cmd_rxq_destroy(adapter, q);
2268
2269
2270
2271
2272 mdelay(1);
2273 be_rx_cq_clean(rxo);
2274 }
2275 be_queue_free(adapter, q);
2276 }
2277}
2278
2279static int be_close(struct net_device *netdev)
2280{
2281 struct be_adapter *adapter = netdev_priv(netdev);
2282 struct be_eq_obj *eqo;
2283 int i;
2284
2285 be_async_mcc_disable(adapter);
2286
2287 if (!lancer_chip(adapter))
2288 be_intr_set(adapter, false);
2289
2290 for_all_evt_queues(adapter, eqo, i) {
2291 napi_disable(&eqo->napi);
2292 if (msix_enabled(adapter))
2293 synchronize_irq(be_msix_vec_get(adapter, eqo));
2294 else
2295 synchronize_irq(netdev->irq);
2296 be_eq_clean(eqo);
2297 }
2298
2299 be_irq_unregister(adapter);
2300
2301
2302
2303
2304 be_tx_compl_clean(adapter);
2305
2306 be_rx_qs_destroy(adapter);
2307 return 0;
2308}
2309
2310static int be_rx_qs_create(struct be_adapter *adapter)
2311{
2312 struct be_rx_obj *rxo;
2313 int rc, i, j;
2314 u8 rsstable[128];
2315
2316 for_all_rx_queues(adapter, rxo, i) {
2317 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2318 sizeof(struct be_eth_rx_d));
2319 if (rc)
2320 return rc;
2321 }
2322
2323
2324 rxo = default_rxo(adapter);
2325 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2326 adapter->if_handle, false, &rxo->rss_id);
2327 if (rc)
2328 return rc;
2329
2330 for_all_rss_queues(adapter, rxo, i) {
2331 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2332 rx_frag_size, adapter->if_handle,
2333 true, &rxo->rss_id);
2334 if (rc)
2335 return rc;
2336 }
2337
2338 if (be_multi_rxq(adapter)) {
2339 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2340 for_all_rss_queues(adapter, rxo, i) {
2341 if ((j + i) >= 128)
2342 break;
2343 rsstable[j + i] = rxo->rss_id;
2344 }
2345 }
2346 rc = be_cmd_rss_config(adapter, rsstable, 128);
2347 if (rc)
2348 return rc;
2349 }
2350
2351
2352 for_all_rx_queues(adapter, rxo, i)
2353 be_post_rx_frags(rxo, GFP_KERNEL);
2354 return 0;
2355}
2356
2357static int be_open(struct net_device *netdev)
2358{
2359 struct be_adapter *adapter = netdev_priv(netdev);
2360 struct be_eq_obj *eqo;
2361 struct be_rx_obj *rxo;
2362 struct be_tx_obj *txo;
2363 u8 link_status;
2364 int status, i;
2365
2366 status = be_rx_qs_create(adapter);
2367 if (status)
2368 goto err;
2369
2370 be_irq_register(adapter);
2371
2372 if (!lancer_chip(adapter))
2373 be_intr_set(adapter, true);
2374
2375 for_all_rx_queues(adapter, rxo, i)
2376 be_cq_notify(adapter, rxo->cq.id, true, 0);
2377
2378 for_all_tx_queues(adapter, txo, i)
2379 be_cq_notify(adapter, txo->cq.id, true, 0);
2380
2381 be_async_mcc_enable(adapter);
2382
2383 for_all_evt_queues(adapter, eqo, i) {
2384 napi_enable(&eqo->napi);
2385 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2386 }
2387
2388 status = be_cmd_link_status_query(adapter, NULL, NULL,
2389 &link_status, 0);
2390 if (!status)
2391 be_link_status_update(adapter, link_status);
2392
2393 return 0;
2394err:
2395 be_close(adapter->netdev);
2396 return -EIO;
2397}
2398
2399static int be_setup_wol(struct be_adapter *adapter, bool enable)
2400{
2401 struct be_dma_mem cmd;
2402 int status = 0;
2403 u8 mac[ETH_ALEN];
2404
2405 memset(mac, 0, ETH_ALEN);
2406
2407 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2408 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2409 GFP_KERNEL);
2410 if (cmd.va == NULL)
2411 return -1;
2412 memset(cmd.va, 0, cmd.size);
2413
2414 if (enable) {
2415 status = pci_write_config_dword(adapter->pdev,
2416 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2417 if (status) {
2418 dev_err(&adapter->pdev->dev,
2419 "Could not enable Wake-on-lan\n");
2420 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2421 cmd.dma);
2422 return status;
2423 }
2424 status = be_cmd_enable_magic_wol(adapter,
2425 adapter->netdev->dev_addr, &cmd);
2426 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2427 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2428 } else {
2429 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2430 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2431 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2432 }
2433
2434 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2435 return status;
2436}
2437
2438
2439
2440
2441
2442
2443
2444static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2445{
2446 u32 vf;
2447 int status = 0;
2448 u8 mac[ETH_ALEN];
2449 struct be_vf_cfg *vf_cfg;
2450
2451 be_vf_eth_addr_generate(adapter, mac);
2452
2453 for_all_vfs(adapter, vf_cfg, vf) {
2454 if (lancer_chip(adapter)) {
2455 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2456 } else {
2457 status = be_cmd_pmac_add(adapter, mac,
2458 vf_cfg->if_handle,
2459 &vf_cfg->pmac_id, vf + 1);
2460 }
2461
2462 if (status)
2463 dev_err(&adapter->pdev->dev,
2464 "Mac address assignment failed for VF %d\n", vf);
2465 else
2466 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2467
2468 mac[5] += 1;
2469 }
2470 return status;
2471}
2472
2473static void be_vf_clear(struct be_adapter *adapter)
2474{
2475 struct be_vf_cfg *vf_cfg;
2476 u32 vf;
2477
2478 for_all_vfs(adapter, vf_cfg, vf) {
2479 if (lancer_chip(adapter))
2480 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2481 else
2482 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2483 vf_cfg->pmac_id, vf + 1);
2484
2485 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2486 }
2487}
2488
2489static int be_clear(struct be_adapter *adapter)
2490{
2491 int i = 1;
2492
2493 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2494 cancel_delayed_work_sync(&adapter->work);
2495 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2496 }
2497
2498 if (sriov_enabled(adapter))
2499 be_vf_clear(adapter);
2500
2501 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2502 be_cmd_pmac_del(adapter, adapter->if_handle,
2503 adapter->pmac_id[i], 0);
2504
2505 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2506
2507 be_mcc_queues_destroy(adapter);
2508 be_rx_cqs_destroy(adapter);
2509 be_tx_queues_destroy(adapter);
2510 be_evt_queues_destroy(adapter);
2511
2512
2513 be_cmd_fw_clean(adapter);
2514
2515 be_msix_disable(adapter);
2516 kfree(adapter->pmac_id);
2517 return 0;
2518}
2519
2520static void be_vf_setup_init(struct be_adapter *adapter)
2521{
2522 struct be_vf_cfg *vf_cfg;
2523 int vf;
2524
2525 for_all_vfs(adapter, vf_cfg, vf) {
2526 vf_cfg->if_handle = -1;
2527 vf_cfg->pmac_id = -1;
2528 }
2529}
2530
2531static int be_vf_setup(struct be_adapter *adapter)
2532{
2533 struct be_vf_cfg *vf_cfg;
2534 u32 cap_flags, en_flags, vf;
2535 u16 def_vlan, lnk_speed;
2536 int status;
2537
2538 be_vf_setup_init(adapter);
2539
2540 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2541 BE_IF_FLAGS_MULTICAST;
2542 for_all_vfs(adapter, vf_cfg, vf) {
2543 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2544 &vf_cfg->if_handle, NULL, vf + 1);
2545 if (status)
2546 goto err;
2547 }
2548
2549 status = be_vf_eth_addr_config(adapter);
2550 if (status)
2551 goto err;
2552
2553 for_all_vfs(adapter, vf_cfg, vf) {
2554 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2555 NULL, vf + 1);
2556 if (status)
2557 goto err;
2558 vf_cfg->tx_rate = lnk_speed * 10;
2559
2560 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2561 vf + 1, vf_cfg->if_handle);
2562 if (status)
2563 goto err;
2564 vf_cfg->def_vid = def_vlan;
2565 }
2566 return 0;
2567err:
2568 return status;
2569}
2570
2571static void be_setup_init(struct be_adapter *adapter)
2572{
2573 adapter->vlan_prio_bmap = 0xff;
2574 adapter->link_speed = -1;
2575 adapter->if_handle = -1;
2576 adapter->be3_native = false;
2577 adapter->promiscuous = false;
2578 adapter->eq_next_idx = 0;
2579}
2580
2581static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
2582{
2583 u32 pmac_id;
2584 int status;
2585 bool pmac_id_active;
2586
2587 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2588 &pmac_id, mac);
2589 if (status != 0)
2590 goto do_none;
2591
2592 if (pmac_id_active) {
2593 status = be_cmd_mac_addr_query(adapter, mac,
2594 MAC_ADDRESS_TYPE_NETWORK,
2595 false, adapter->if_handle, pmac_id);
2596
2597 if (!status)
2598 adapter->pmac_id[0] = pmac_id;
2599 } else {
2600 status = be_cmd_pmac_add(adapter, mac,
2601 adapter->if_handle, &adapter->pmac_id[0], 0);
2602 }
2603do_none:
2604 return status;
2605}
2606
2607static int be_setup(struct be_adapter *adapter)
2608{
2609 struct net_device *netdev = adapter->netdev;
2610 u32 cap_flags, en_flags;
2611 u32 tx_fc, rx_fc;
2612 int status;
2613 u8 mac[ETH_ALEN];
2614
2615 be_setup_init(adapter);
2616
2617 be_cmd_req_native_mode(adapter);
2618
2619 be_msix_enable(adapter);
2620
2621 status = be_evt_queues_create(adapter);
2622 if (status)
2623 goto err;
2624
2625 status = be_tx_cqs_create(adapter);
2626 if (status)
2627 goto err;
2628
2629 status = be_rx_cqs_create(adapter);
2630 if (status)
2631 goto err;
2632
2633 status = be_mcc_queues_create(adapter);
2634 if (status)
2635 goto err;
2636
2637 memset(mac, 0, ETH_ALEN);
2638 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2639 true , 0, 0);
2640 if (status)
2641 return status;
2642 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2643 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2644
2645 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2646 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2647 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2648 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2649
2650 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2651 cap_flags |= BE_IF_FLAGS_RSS;
2652 en_flags |= BE_IF_FLAGS_RSS;
2653 }
2654 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2655 netdev->dev_addr, &adapter->if_handle,
2656 &adapter->pmac_id[0], 0);
2657 if (status != 0)
2658 goto err;
2659
2660
2661
2662
2663
2664 if (!be_physfn(adapter)) {
2665 if (lancer_chip(adapter))
2666 status = be_add_mac_from_list(adapter, mac);
2667 else
2668 status = be_cmd_mac_addr_query(adapter, mac,
2669 MAC_ADDRESS_TYPE_NETWORK, false,
2670 adapter->if_handle, 0);
2671 if (!status) {
2672 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2673 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2674 }
2675 }
2676
2677 status = be_tx_qs_create(adapter);
2678 if (status)
2679 goto err;
2680
2681 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2682
2683 status = be_vid_config(adapter, false, 0);
2684 if (status)
2685 goto err;
2686
2687 be_set_rx_mode(adapter->netdev);
2688
2689 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2690
2691 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2692 goto err;
2693
2694 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2695 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2696 adapter->rx_fc);
2697
2698 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2699 goto err;
2700 }
2701
2702 pcie_set_readrq(adapter->pdev, 4096);
2703
2704 if (sriov_enabled(adapter)) {
2705 status = be_vf_setup(adapter);
2706 if (status)
2707 goto err;
2708 }
2709
2710 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2711 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2712
2713 return 0;
2714err:
2715 be_clear(adapter);
2716 return status;
2717}
2718
2719#ifdef CONFIG_NET_POLL_CONTROLLER
2720static void be_netpoll(struct net_device *netdev)
2721{
2722 struct be_adapter *adapter = netdev_priv(netdev);
2723 struct be_eq_obj *eqo;
2724 int i;
2725
2726 for_all_evt_queues(adapter, eqo, i)
2727 event_handle(eqo);
2728
2729 return;
2730}
2731#endif
2732
2733#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2734static bool be_flash_redboot(struct be_adapter *adapter,
2735 const u8 *p, u32 img_start, int image_size,
2736 int hdr_size)
2737{
2738 u32 crc_offset;
2739 u8 flashed_crc[4];
2740 int status;
2741
2742 crc_offset = hdr_size + img_start + image_size - 4;
2743
2744 p += crc_offset;
2745
2746 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2747 (image_size - 4));
2748 if (status) {
2749 dev_err(&adapter->pdev->dev,
2750 "could not get crc from flash, not flashing redboot\n");
2751 return false;
2752 }
2753
2754
2755 if (!memcmp(flashed_crc, p, 4))
2756 return false;
2757 else
2758 return true;
2759}
2760
2761static bool phy_flashing_required(struct be_adapter *adapter)
2762{
2763 int status = 0;
2764 struct be_phy_info phy_info;
2765
2766 status = be_cmd_get_phy_info(adapter, &phy_info);
2767 if (status)
2768 return false;
2769 if ((phy_info.phy_type == TN_8022) &&
2770 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2771 return true;
2772 }
2773 return false;
2774}
2775
2776static int be_flash_data(struct be_adapter *adapter,
2777 const struct firmware *fw,
2778 struct be_dma_mem *flash_cmd, int num_of_images)
2779
2780{
2781 int status = 0, i, filehdr_size = 0;
2782 u32 total_bytes = 0, flash_op;
2783 int num_bytes;
2784 const u8 *p = fw->data;
2785 struct be_cmd_write_flashrom *req = flash_cmd->va;
2786 const struct flash_comp *pflashcomp;
2787 int num_comp;
2788
2789 static const struct flash_comp gen3_flash_types[10] = {
2790 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2791 FLASH_IMAGE_MAX_SIZE_g3},
2792 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2793 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2794 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2795 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2796 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2797 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2798 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2799 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2800 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2801 FLASH_IMAGE_MAX_SIZE_g3},
2802 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2803 FLASH_IMAGE_MAX_SIZE_g3},
2804 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2805 FLASH_IMAGE_MAX_SIZE_g3},
2806 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2807 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2808 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2809 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2810 };
2811 static const struct flash_comp gen2_flash_types[8] = {
2812 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2813 FLASH_IMAGE_MAX_SIZE_g2},
2814 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2815 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2816 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2817 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2818 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2819 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2820 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2821 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2822 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2823 FLASH_IMAGE_MAX_SIZE_g2},
2824 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2825 FLASH_IMAGE_MAX_SIZE_g2},
2826 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2827 FLASH_IMAGE_MAX_SIZE_g2}
2828 };
2829
2830 if (adapter->generation == BE_GEN3) {
2831 pflashcomp = gen3_flash_types;
2832 filehdr_size = sizeof(struct flash_file_hdr_g3);
2833 num_comp = ARRAY_SIZE(gen3_flash_types);
2834 } else {
2835 pflashcomp = gen2_flash_types;
2836 filehdr_size = sizeof(struct flash_file_hdr_g2);
2837 num_comp = ARRAY_SIZE(gen2_flash_types);
2838 }
2839 for (i = 0; i < num_comp; i++) {
2840 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2841 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2842 continue;
2843 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2844 if (!phy_flashing_required(adapter))
2845 continue;
2846 }
2847 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2848 (!be_flash_redboot(adapter, fw->data,
2849 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2850 (num_of_images * sizeof(struct image_hdr)))))
2851 continue;
2852 p = fw->data;
2853 p += filehdr_size + pflashcomp[i].offset
2854 + (num_of_images * sizeof(struct image_hdr));
2855 if (p + pflashcomp[i].size > fw->data + fw->size)
2856 return -1;
2857 total_bytes = pflashcomp[i].size;
2858 while (total_bytes) {
2859 if (total_bytes > 32*1024)
2860 num_bytes = 32*1024;
2861 else
2862 num_bytes = total_bytes;
2863 total_bytes -= num_bytes;
2864 if (!total_bytes) {
2865 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2866 flash_op = FLASHROM_OPER_PHY_FLASH;
2867 else
2868 flash_op = FLASHROM_OPER_FLASH;
2869 } else {
2870 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2871 flash_op = FLASHROM_OPER_PHY_SAVE;
2872 else
2873 flash_op = FLASHROM_OPER_SAVE;
2874 }
2875 memcpy(req->params.data_buf, p, num_bytes);
2876 p += num_bytes;
2877 status = be_cmd_write_flashrom(adapter, flash_cmd,
2878 pflashcomp[i].optype, flash_op, num_bytes);
2879 if (status) {
2880 if ((status == ILLEGAL_IOCTL_REQ) &&
2881 (pflashcomp[i].optype ==
2882 IMG_TYPE_PHY_FW))
2883 break;
2884 dev_err(&adapter->pdev->dev,
2885 "cmd to write to flash rom failed.\n");
2886 return -1;
2887 }
2888 }
2889 }
2890 return 0;
2891}
2892
2893static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2894{
2895 if (fhdr == NULL)
2896 return 0;
2897 if (fhdr->build[0] == '3')
2898 return BE_GEN3;
2899 else if (fhdr->build[0] == '2')
2900 return BE_GEN2;
2901 else
2902 return 0;
2903}
2904
2905static int lancer_fw_download(struct be_adapter *adapter,
2906 const struct firmware *fw)
2907{
2908#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2909#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2910 struct be_dma_mem flash_cmd;
2911 const u8 *data_ptr = NULL;
2912 u8 *dest_image_ptr = NULL;
2913 size_t image_size = 0;
2914 u32 chunk_size = 0;
2915 u32 data_written = 0;
2916 u32 offset = 0;
2917 int status = 0;
2918 u8 add_status = 0;
2919
2920 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2921 dev_err(&adapter->pdev->dev,
2922 "FW Image not properly aligned. "
2923 "Length must be 4 byte aligned.\n");
2924 status = -EINVAL;
2925 goto lancer_fw_exit;
2926 }
2927
2928 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2929 + LANCER_FW_DOWNLOAD_CHUNK;
2930 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2931 &flash_cmd.dma, GFP_KERNEL);
2932 if (!flash_cmd.va) {
2933 status = -ENOMEM;
2934 dev_err(&adapter->pdev->dev,
2935 "Memory allocation failure while flashing\n");
2936 goto lancer_fw_exit;
2937 }
2938
2939 dest_image_ptr = flash_cmd.va +
2940 sizeof(struct lancer_cmd_req_write_object);
2941 image_size = fw->size;
2942 data_ptr = fw->data;
2943
2944 while (image_size) {
2945 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2946
2947
2948 memcpy(dest_image_ptr, data_ptr, chunk_size);
2949
2950 status = lancer_cmd_write_object(adapter, &flash_cmd,
2951 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2952 &data_written, &add_status);
2953
2954 if (status)
2955 break;
2956
2957 offset += data_written;
2958 data_ptr += data_written;
2959 image_size -= data_written;
2960 }
2961
2962 if (!status) {
2963
2964 status = lancer_cmd_write_object(adapter, &flash_cmd,
2965 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2966 &data_written, &add_status);
2967 }
2968
2969 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2970 flash_cmd.dma);
2971 if (status) {
2972 dev_err(&adapter->pdev->dev,
2973 "Firmware load error. "
2974 "Status code: 0x%x Additional Status: 0x%x\n",
2975 status, add_status);
2976 goto lancer_fw_exit;
2977 }
2978
2979 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2980lancer_fw_exit:
2981 return status;
2982}
2983
2984static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2985{
2986 struct flash_file_hdr_g2 *fhdr;
2987 struct flash_file_hdr_g3 *fhdr3;
2988 struct image_hdr *img_hdr_ptr = NULL;
2989 struct be_dma_mem flash_cmd;
2990 const u8 *p;
2991 int status = 0, i = 0, num_imgs = 0;
2992
2993 p = fw->data;
2994 fhdr = (struct flash_file_hdr_g2 *) p;
2995
2996 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2997 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2998 &flash_cmd.dma, GFP_KERNEL);
2999 if (!flash_cmd.va) {
3000 status = -ENOMEM;
3001 dev_err(&adapter->pdev->dev,
3002 "Memory allocation failure while flashing\n");
3003 goto be_fw_exit;
3004 }
3005
3006 if ((adapter->generation == BE_GEN3) &&
3007 (get_ufigen_type(fhdr) == BE_GEN3)) {
3008 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3009 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3010 for (i = 0; i < num_imgs; i++) {
3011 img_hdr_ptr = (struct image_hdr *) (fw->data +
3012 (sizeof(struct flash_file_hdr_g3) +
3013 i * sizeof(struct image_hdr)));
3014 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3015 status = be_flash_data(adapter, fw, &flash_cmd,
3016 num_imgs);
3017 }
3018 } else if ((adapter->generation == BE_GEN2) &&
3019 (get_ufigen_type(fhdr) == BE_GEN2)) {
3020 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3021 } else {
3022 dev_err(&adapter->pdev->dev,
3023 "UFI and Interface are not compatible for flashing\n");
3024 status = -1;
3025 }
3026
3027 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3028 flash_cmd.dma);
3029 if (status) {
3030 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3031 goto be_fw_exit;
3032 }
3033
3034 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3035
3036be_fw_exit:
3037 return status;
3038}
3039
3040int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3041{
3042 const struct firmware *fw;
3043 int status;
3044
3045 if (!netif_running(adapter->netdev)) {
3046 dev_err(&adapter->pdev->dev,
3047 "Firmware load not allowed (interface is down)\n");
3048 return -1;
3049 }
3050
3051 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3052 if (status)
3053 goto fw_exit;
3054
3055 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3056
3057 if (lancer_chip(adapter))
3058 status = lancer_fw_download(adapter, fw);
3059 else
3060 status = be_fw_download(adapter, fw);
3061
3062fw_exit:
3063 release_firmware(fw);
3064 return status;
3065}
3066
3067static const struct net_device_ops be_netdev_ops = {
3068 .ndo_open = be_open,
3069 .ndo_stop = be_close,
3070 .ndo_start_xmit = be_xmit,
3071 .ndo_set_rx_mode = be_set_rx_mode,
3072 .ndo_set_mac_address = be_mac_addr_set,
3073 .ndo_change_mtu = be_change_mtu,
3074 .ndo_get_stats64 = be_get_stats64,
3075 .ndo_validate_addr = eth_validate_addr,
3076 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3077 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3078 .ndo_set_vf_mac = be_set_vf_mac,
3079 .ndo_set_vf_vlan = be_set_vf_vlan,
3080 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3081 .ndo_get_vf_config = be_get_vf_config,
3082#ifdef CONFIG_NET_POLL_CONTROLLER
3083 .ndo_poll_controller = be_netpoll,
3084#endif
3085};
3086
3087static void be_netdev_init(struct net_device *netdev)
3088{
3089 struct be_adapter *adapter = netdev_priv(netdev);
3090 struct be_eq_obj *eqo;
3091 int i;
3092
3093 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3094 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3095 NETIF_F_HW_VLAN_TX;
3096 if (be_multi_rxq(adapter))
3097 netdev->hw_features |= NETIF_F_RXHASH;
3098
3099 netdev->features |= netdev->hw_features |
3100 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3101
3102 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3103 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3104
3105 netdev->priv_flags |= IFF_UNICAST_FLT;
3106
3107 netdev->flags |= IFF_MULTICAST;
3108
3109 netif_set_gso_max_size(netdev, 65535);
3110
3111 netdev->netdev_ops = &be_netdev_ops;
3112
3113 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3114
3115 for_all_evt_queues(adapter, eqo, i)
3116 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3117}
3118
3119static void be_unmap_pci_bars(struct be_adapter *adapter)
3120{
3121 if (adapter->csr)
3122 iounmap(adapter->csr);
3123 if (adapter->db)
3124 iounmap(adapter->db);
3125}
3126
3127static int be_map_pci_bars(struct be_adapter *adapter)
3128{
3129 u8 __iomem *addr;
3130 int db_reg;
3131
3132 if (lancer_chip(adapter)) {
3133 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3134 pci_resource_len(adapter->pdev, 0));
3135 if (addr == NULL)
3136 return -ENOMEM;
3137 adapter->db = addr;
3138 return 0;
3139 }
3140
3141 if (be_physfn(adapter)) {
3142 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3143 pci_resource_len(adapter->pdev, 2));
3144 if (addr == NULL)
3145 return -ENOMEM;
3146 adapter->csr = addr;
3147 }
3148
3149 if (adapter->generation == BE_GEN2) {
3150 db_reg = 4;
3151 } else {
3152 if (be_physfn(adapter))
3153 db_reg = 4;
3154 else
3155 db_reg = 0;
3156 }
3157 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3158 pci_resource_len(adapter->pdev, db_reg));
3159 if (addr == NULL)
3160 goto pci_map_err;
3161 adapter->db = addr;
3162
3163 return 0;
3164pci_map_err:
3165 be_unmap_pci_bars(adapter);
3166 return -ENOMEM;
3167}
3168
3169
3170static void be_ctrl_cleanup(struct be_adapter *adapter)
3171{
3172 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3173
3174 be_unmap_pci_bars(adapter);
3175
3176 if (mem->va)
3177 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3178 mem->dma);
3179
3180 mem = &adapter->rx_filter;
3181 if (mem->va)
3182 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3183 mem->dma);
3184}
3185
3186static int be_ctrl_init(struct be_adapter *adapter)
3187{
3188 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3189 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3190 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3191 int status;
3192
3193 status = be_map_pci_bars(adapter);
3194 if (status)
3195 goto done;
3196
3197 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3198 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3199 mbox_mem_alloc->size,
3200 &mbox_mem_alloc->dma,
3201 GFP_KERNEL);
3202 if (!mbox_mem_alloc->va) {
3203 status = -ENOMEM;
3204 goto unmap_pci_bars;
3205 }
3206 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3207 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3208 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3209 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3210
3211 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3212 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3213 &rx_filter->dma, GFP_KERNEL);
3214 if (rx_filter->va == NULL) {
3215 status = -ENOMEM;
3216 goto free_mbox;
3217 }
3218 memset(rx_filter->va, 0, rx_filter->size);
3219
3220 mutex_init(&adapter->mbox_lock);
3221 spin_lock_init(&adapter->mcc_lock);
3222 spin_lock_init(&adapter->mcc_cq_lock);
3223
3224 init_completion(&adapter->flash_compl);
3225 pci_save_state(adapter->pdev);
3226 return 0;
3227
3228free_mbox:
3229 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3230 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3231
3232unmap_pci_bars:
3233 be_unmap_pci_bars(adapter);
3234
3235done:
3236 return status;
3237}
3238
3239static void be_stats_cleanup(struct be_adapter *adapter)
3240{
3241 struct be_dma_mem *cmd = &adapter->stats_cmd;
3242
3243 if (cmd->va)
3244 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3245 cmd->va, cmd->dma);
3246}
3247
3248static int be_stats_init(struct be_adapter *adapter)
3249{
3250 struct be_dma_mem *cmd = &adapter->stats_cmd;
3251
3252 if (adapter->generation == BE_GEN2) {
3253 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3254 } else {
3255 if (lancer_chip(adapter))
3256 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3257 else
3258 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3259 }
3260 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3261 GFP_KERNEL);
3262 if (cmd->va == NULL)
3263 return -1;
3264 memset(cmd->va, 0, cmd->size);
3265 return 0;
3266}
3267
3268static void __devexit be_remove(struct pci_dev *pdev)
3269{
3270 struct be_adapter *adapter = pci_get_drvdata(pdev);
3271
3272 if (!adapter)
3273 return;
3274
3275 unregister_netdev(adapter->netdev);
3276
3277 be_clear(adapter);
3278
3279 be_stats_cleanup(adapter);
3280
3281 be_ctrl_cleanup(adapter);
3282
3283 be_sriov_disable(adapter);
3284
3285 pci_set_drvdata(pdev, NULL);
3286 pci_release_regions(pdev);
3287 pci_disable_device(pdev);
3288
3289 free_netdev(adapter->netdev);
3290}
3291
3292bool be_is_wol_supported(struct be_adapter *adapter)
3293{
3294 return ((adapter->wol_cap & BE_WOL_CAP) &&
3295 !be_is_wol_excluded(adapter)) ? true : false;
3296}
3297
3298static int be_get_config(struct be_adapter *adapter)
3299{
3300 int status;
3301
3302 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3303 &adapter->function_mode, &adapter->function_caps);
3304 if (status)
3305 return status;
3306
3307 if (adapter->function_mode & FLEX10_MODE)
3308 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3309 else
3310 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3311
3312 if (be_physfn(adapter))
3313 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3314 else
3315 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3316
3317
3318 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3319 sizeof(u32), GFP_KERNEL);
3320 if (!adapter->pmac_id)
3321 return -ENOMEM;
3322
3323 status = be_cmd_get_cntl_attributes(adapter);
3324 if (status)
3325 return status;
3326
3327 status = be_cmd_get_acpi_wol_cap(adapter);
3328 if (status) {
3329
3330
3331 if (!be_is_wol_excluded(adapter))
3332 adapter->wol_cap |= BE_WOL_CAP;
3333 }
3334
3335 if (be_is_wol_supported(adapter))
3336 adapter->wol = true;
3337
3338 return 0;
3339}
3340
3341static int be_dev_family_check(struct be_adapter *adapter)
3342{
3343 struct pci_dev *pdev = adapter->pdev;
3344 u32 sli_intf = 0, if_type;
3345
3346 switch (pdev->device) {
3347 case BE_DEVICE_ID1:
3348 case OC_DEVICE_ID1:
3349 adapter->generation = BE_GEN2;
3350 break;
3351 case BE_DEVICE_ID2:
3352 case OC_DEVICE_ID2:
3353 case OC_DEVICE_ID5:
3354 adapter->generation = BE_GEN3;
3355 break;
3356 case OC_DEVICE_ID3:
3357 case OC_DEVICE_ID4:
3358 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3359 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3360 SLI_INTF_IF_TYPE_SHIFT;
3361
3362 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3363 if_type != 0x02) {
3364 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3365 return -EINVAL;
3366 }
3367 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3368 SLI_INTF_FAMILY_SHIFT);
3369 adapter->generation = BE_GEN3;
3370 break;
3371 default:
3372 adapter->generation = 0;
3373 }
3374 return 0;
3375}
3376
3377static int lancer_wait_ready(struct be_adapter *adapter)
3378{
3379#define SLIPORT_READY_TIMEOUT 30
3380 u32 sliport_status;
3381 int status = 0, i;
3382
3383 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3384 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3385 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3386 break;
3387
3388 msleep(1000);
3389 }
3390
3391 if (i == SLIPORT_READY_TIMEOUT)
3392 status = -1;
3393
3394 return status;
3395}
3396
3397static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3398{
3399 int status;
3400 u32 sliport_status, err, reset_needed;
3401 status = lancer_wait_ready(adapter);
3402 if (!status) {
3403 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3404 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3405 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3406 if (err && reset_needed) {
3407 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3408 adapter->db + SLIPORT_CONTROL_OFFSET);
3409
3410
3411 status = lancer_wait_ready(adapter);
3412 sliport_status = ioread32(adapter->db +
3413 SLIPORT_STATUS_OFFSET);
3414 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3415 SLIPORT_STATUS_RN_MASK);
3416 if (status || sliport_status)
3417 status = -1;
3418 } else if (err || reset_needed) {
3419 status = -1;
3420 }
3421 }
3422 return status;
3423}
3424
3425static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3426{
3427 int status;
3428 u32 sliport_status;
3429
3430 if (adapter->eeh_err || adapter->ue_detected)
3431 return;
3432
3433 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3434
3435 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3436 dev_err(&adapter->pdev->dev,
3437 "Adapter in error state."
3438 "Trying to recover.\n");
3439
3440 status = lancer_test_and_set_rdy_state(adapter);
3441 if (status)
3442 goto err;
3443
3444 netif_device_detach(adapter->netdev);
3445
3446 if (netif_running(adapter->netdev))
3447 be_close(adapter->netdev);
3448
3449 be_clear(adapter);
3450
3451 adapter->fw_timeout = false;
3452
3453 status = be_setup(adapter);
3454 if (status)
3455 goto err;
3456
3457 if (netif_running(adapter->netdev)) {
3458 status = be_open(adapter->netdev);
3459 if (status)
3460 goto err;
3461 }
3462
3463 netif_device_attach(adapter->netdev);
3464
3465 dev_err(&adapter->pdev->dev,
3466 "Adapter error recovery succeeded\n");
3467 }
3468 return;
3469err:
3470 dev_err(&adapter->pdev->dev,
3471 "Adapter error recovery failed\n");
3472}
3473
3474static void be_worker(struct work_struct *work)
3475{
3476 struct be_adapter *adapter =
3477 container_of(work, struct be_adapter, work.work);
3478 struct be_rx_obj *rxo;
3479 struct be_eq_obj *eqo;
3480 int i;
3481
3482 if (lancer_chip(adapter))
3483 lancer_test_and_recover_fn_err(adapter);
3484
3485 be_detect_dump_ue(adapter);
3486
3487
3488
3489 if (!netif_running(adapter->netdev)) {
3490 be_process_mcc(adapter);
3491 goto reschedule;
3492 }
3493
3494 if (!adapter->stats_cmd_sent) {
3495 if (lancer_chip(adapter))
3496 lancer_cmd_get_pport_stats(adapter,
3497 &adapter->stats_cmd);
3498 else
3499 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3500 }
3501
3502 for_all_rx_queues(adapter, rxo, i) {
3503 if (rxo->rx_post_starved) {
3504 rxo->rx_post_starved = false;
3505 be_post_rx_frags(rxo, GFP_KERNEL);
3506 }
3507 }
3508
3509 for_all_evt_queues(adapter, eqo, i)
3510 be_eqd_update(adapter, eqo);
3511
3512reschedule:
3513 adapter->work_counter++;
3514 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3515}
3516
3517static int __devinit be_probe(struct pci_dev *pdev,
3518 const struct pci_device_id *pdev_id)
3519{
3520 int status = 0;
3521 struct be_adapter *adapter;
3522 struct net_device *netdev;
3523
3524 status = pci_enable_device(pdev);
3525 if (status)
3526 goto do_none;
3527
3528 status = pci_request_regions(pdev, DRV_NAME);
3529 if (status)
3530 goto disable_dev;
3531 pci_set_master(pdev);
3532
3533 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3534 if (netdev == NULL) {
3535 status = -ENOMEM;
3536 goto rel_reg;
3537 }
3538 adapter = netdev_priv(netdev);
3539 adapter->pdev = pdev;
3540 pci_set_drvdata(pdev, adapter);
3541
3542 status = be_dev_family_check(adapter);
3543 if (status)
3544 goto free_netdev;
3545
3546 adapter->netdev = netdev;
3547 SET_NETDEV_DEV(netdev, &pdev->dev);
3548
3549 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3550 if (!status) {
3551 netdev->features |= NETIF_F_HIGHDMA;
3552 } else {
3553 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3554 if (status) {
3555 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3556 goto free_netdev;
3557 }
3558 }
3559
3560 status = be_sriov_enable(adapter);
3561 if (status)
3562 goto free_netdev;
3563
3564 status = be_ctrl_init(adapter);
3565 if (status)
3566 goto disable_sriov;
3567
3568 if (lancer_chip(adapter)) {
3569 status = lancer_wait_ready(adapter);
3570 if (!status) {
3571 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3572 adapter->db + SLIPORT_CONTROL_OFFSET);
3573 status = lancer_test_and_set_rdy_state(adapter);
3574 }
3575 if (status) {
3576 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3577 goto ctrl_clean;
3578 }
3579 }
3580
3581
3582 if (be_physfn(adapter)) {
3583 status = be_cmd_POST(adapter);
3584 if (status)
3585 goto ctrl_clean;
3586 }
3587
3588
3589 status = be_cmd_fw_init(adapter);
3590 if (status)
3591 goto ctrl_clean;
3592
3593 status = be_cmd_reset_function(adapter);
3594 if (status)
3595 goto ctrl_clean;
3596
3597
3598
3599
3600 if (!lancer_chip(adapter))
3601 be_intr_set(adapter, false);
3602
3603 status = be_stats_init(adapter);
3604 if (status)
3605 goto ctrl_clean;
3606
3607 status = be_get_config(adapter);
3608 if (status)
3609 goto stats_clean;
3610
3611 INIT_DELAYED_WORK(&adapter->work, be_worker);
3612 adapter->rx_fc = adapter->tx_fc = true;
3613
3614 status = be_setup(adapter);
3615 if (status)
3616 goto msix_disable;
3617
3618 be_netdev_init(netdev);
3619 status = register_netdev(netdev);
3620 if (status != 0)
3621 goto unsetup;
3622
3623 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3624 adapter->port_num);
3625
3626 return 0;
3627
3628unsetup:
3629 be_clear(adapter);
3630msix_disable:
3631 be_msix_disable(adapter);
3632stats_clean:
3633 be_stats_cleanup(adapter);
3634ctrl_clean:
3635 be_ctrl_cleanup(adapter);
3636disable_sriov:
3637 be_sriov_disable(adapter);
3638free_netdev:
3639 free_netdev(netdev);
3640 pci_set_drvdata(pdev, NULL);
3641rel_reg:
3642 pci_release_regions(pdev);
3643disable_dev:
3644 pci_disable_device(pdev);
3645do_none:
3646 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3647 return status;
3648}
3649
3650static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3651{
3652 struct be_adapter *adapter = pci_get_drvdata(pdev);
3653 struct net_device *netdev = adapter->netdev;
3654
3655 if (adapter->wol)
3656 be_setup_wol(adapter, true);
3657
3658 netif_device_detach(netdev);
3659 if (netif_running(netdev)) {
3660 rtnl_lock();
3661 be_close(netdev);
3662 rtnl_unlock();
3663 }
3664 be_clear(adapter);
3665
3666 pci_save_state(pdev);
3667 pci_disable_device(pdev);
3668 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3669 return 0;
3670}
3671
3672static int be_resume(struct pci_dev *pdev)
3673{
3674 int status = 0;
3675 struct be_adapter *adapter = pci_get_drvdata(pdev);
3676 struct net_device *netdev = adapter->netdev;
3677
3678 netif_device_detach(netdev);
3679
3680 status = pci_enable_device(pdev);
3681 if (status)
3682 return status;
3683
3684 pci_set_power_state(pdev, 0);
3685 pci_restore_state(pdev);
3686
3687
3688 status = be_cmd_fw_init(adapter);
3689 if (status)
3690 return status;
3691
3692 be_setup(adapter);
3693 if (netif_running(netdev)) {
3694 rtnl_lock();
3695 be_open(netdev);
3696 rtnl_unlock();
3697 }
3698 netif_device_attach(netdev);
3699
3700 if (adapter->wol)
3701 be_setup_wol(adapter, false);
3702
3703 return 0;
3704}
3705
3706
3707
3708
3709static void be_shutdown(struct pci_dev *pdev)
3710{
3711 struct be_adapter *adapter = pci_get_drvdata(pdev);
3712
3713 if (!adapter)
3714 return;
3715
3716 cancel_delayed_work_sync(&adapter->work);
3717
3718 netif_device_detach(adapter->netdev);
3719
3720 if (adapter->wol)
3721 be_setup_wol(adapter, true);
3722
3723 be_cmd_reset_function(adapter);
3724
3725 pci_disable_device(pdev);
3726}
3727
3728static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3729 pci_channel_state_t state)
3730{
3731 struct be_adapter *adapter = pci_get_drvdata(pdev);
3732 struct net_device *netdev = adapter->netdev;
3733
3734 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3735
3736 adapter->eeh_err = true;
3737
3738 netif_device_detach(netdev);
3739
3740 if (netif_running(netdev)) {
3741 rtnl_lock();
3742 be_close(netdev);
3743 rtnl_unlock();
3744 }
3745 be_clear(adapter);
3746
3747 if (state == pci_channel_io_perm_failure)
3748 return PCI_ERS_RESULT_DISCONNECT;
3749
3750 pci_disable_device(pdev);
3751
3752 return PCI_ERS_RESULT_NEED_RESET;
3753}
3754
3755static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3756{
3757 struct be_adapter *adapter = pci_get_drvdata(pdev);
3758 int status;
3759
3760 dev_info(&adapter->pdev->dev, "EEH reset\n");
3761 adapter->eeh_err = false;
3762 adapter->ue_detected = false;
3763 adapter->fw_timeout = false;
3764
3765 status = pci_enable_device(pdev);
3766 if (status)
3767 return PCI_ERS_RESULT_DISCONNECT;
3768
3769 pci_set_master(pdev);
3770 pci_set_power_state(pdev, 0);
3771 pci_restore_state(pdev);
3772
3773
3774 status = be_cmd_POST(adapter);
3775 if (status)
3776 return PCI_ERS_RESULT_DISCONNECT;
3777
3778 return PCI_ERS_RESULT_RECOVERED;
3779}
3780
3781static void be_eeh_resume(struct pci_dev *pdev)
3782{
3783 int status = 0;
3784 struct be_adapter *adapter = pci_get_drvdata(pdev);
3785 struct net_device *netdev = adapter->netdev;
3786
3787 dev_info(&adapter->pdev->dev, "EEH resume\n");
3788
3789 pci_save_state(pdev);
3790
3791
3792 status = be_cmd_fw_init(adapter);
3793 if (status)
3794 goto err;
3795
3796 status = be_setup(adapter);
3797 if (status)
3798 goto err;
3799
3800 if (netif_running(netdev)) {
3801 status = be_open(netdev);
3802 if (status)
3803 goto err;
3804 }
3805 netif_device_attach(netdev);
3806 return;
3807err:
3808 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3809}
3810
3811static struct pci_error_handlers be_eeh_handlers = {
3812 .error_detected = be_eeh_err_detected,
3813 .slot_reset = be_eeh_reset,
3814 .resume = be_eeh_resume,
3815};
3816
3817static struct pci_driver be_driver = {
3818 .name = DRV_NAME,
3819 .id_table = be_dev_ids,
3820 .probe = be_probe,
3821 .remove = be_remove,
3822 .suspend = be_suspend,
3823 .resume = be_resume,
3824 .shutdown = be_shutdown,
3825 .err_handler = &be_eeh_handlers
3826};
3827
3828static int __init be_init_module(void)
3829{
3830 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3831 rx_frag_size != 2048) {
3832 printk(KERN_WARNING DRV_NAME
3833 " : Module param rx_frag_size must be 2048/4096/8192."
3834 " Using 2048\n");
3835 rx_frag_size = 2048;
3836 }
3837
3838 return pci_register_driver(&be_driver);
3839}
3840module_init(be_init_module);
3841
3842static void __exit be_exit_module(void)
3843{
3844 pci_unregister_driver(&be_driver);
3845}
3846module_exit(be_exit_module);
3847