1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <linux/etherdevice.h>
21#include <linux/if_vlan.h>
22#include <linux/interrupt.h>
23#include <linux/ip.h>
24#include <net/ipv6.h>
25#include <net/ip6_checksum.h>
26#include <linux/prefetch.h>
27#include "bnx2x_cmn.h"
28#include "bnx2x_init.h"
29#include "bnx2x_sp.h"
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
49
50
51 from_fp->napi = to_fp->napi;
52
53
54 memcpy(to_fp, from_fp, sizeof(*to_fp));
55 to_fp->index = to;
56}
57
58int load_count[2][3] = { {0} };
59
60
61
62
63static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
64 u16 idx, unsigned int *pkts_compl,
65 unsigned int *bytes_compl)
66{
67 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
68 struct eth_tx_start_bd *tx_start_bd;
69 struct eth_tx_bd *tx_data_bd;
70 struct sk_buff *skb = tx_buf->skb;
71 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
72 int nbd;
73
74
75 prefetch(&skb->end);
76
77 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
78 txdata->txq_index, idx, tx_buf, skb);
79
80
81 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
82 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
83 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
84
85
86 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
87#ifdef BNX2X_STOP_ON_ERROR
88 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
89 BNX2X_ERR("BAD nbd!\n");
90 bnx2x_panic();
91 }
92#endif
93 new_cons = nbd + tx_buf->first_bd;
94
95
96 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
97
98
99 --nbd;
100 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
101
102
103 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
104 --nbd;
105 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
106 }
107
108
109 while (nbd > 0) {
110
111 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
112 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
113 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
114 if (--nbd)
115 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
116 }
117
118
119 WARN_ON(!skb);
120 if (likely(skb)) {
121 (*pkts_compl)++;
122 (*bytes_compl) += skb->len;
123 }
124
125 dev_kfree_skb_any(skb);
126 tx_buf->first_bd = 0;
127 tx_buf->skb = NULL;
128
129 return new_cons;
130}
131
132int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
133{
134 struct netdev_queue *txq;
135 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
136 unsigned int pkts_compl = 0, bytes_compl = 0;
137
138#ifdef BNX2X_STOP_ON_ERROR
139 if (unlikely(bp->panic))
140 return -1;
141#endif
142
143 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
144 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
145 sw_cons = txdata->tx_pkt_cons;
146
147 while (sw_cons != hw_cons) {
148 u16 pkt_cons;
149
150 pkt_cons = TX_BD(sw_cons);
151
152 DP(NETIF_MSG_TX_DONE,
153 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
154 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
155
156 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
157 &pkts_compl, &bytes_compl);
158
159 sw_cons++;
160 }
161
162 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
163
164 txdata->tx_pkt_cons = sw_cons;
165 txdata->tx_bd_cons = bd_cons;
166
167
168
169
170
171
172
173
174
175
176 smp_mb();
177
178 if (unlikely(netif_tx_queue_stopped(txq))) {
179
180
181
182
183
184
185
186
187
188
189 __netif_tx_lock(txq, smp_processor_id());
190
191 if ((netif_tx_queue_stopped(txq)) &&
192 (bp->state == BNX2X_STATE_OPEN) &&
193 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
194 netif_tx_wake_queue(txq);
195
196 __netif_tx_unlock(txq);
197 }
198 return 0;
199}
200
201static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
202 u16 idx)
203{
204 u16 last_max = fp->last_max_sge;
205
206 if (SUB_S16(idx, last_max) > 0)
207 fp->last_max_sge = idx;
208}
209
210static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
211 u16 sge_len,
212 struct eth_end_agg_rx_cqe *cqe)
213{
214 struct bnx2x *bp = fp->bp;
215 u16 last_max, last_elem, first_elem;
216 u16 delta = 0;
217 u16 i;
218
219 if (!sge_len)
220 return;
221
222
223 for (i = 0; i < sge_len; i++)
224 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
225 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
226
227 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
228 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
229
230
231 prefetch((void *)(fp->sge_mask));
232 bnx2x_update_last_max_sge(fp,
233 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
234
235 last_max = RX_SGE(fp->last_max_sge);
236 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
237 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
238
239
240 if (last_elem + 1 != first_elem)
241 last_elem++;
242
243
244 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
245 if (likely(fp->sge_mask[i]))
246 break;
247
248 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
249 delta += BIT_VEC64_ELEM_SZ;
250 }
251
252 if (delta > 0) {
253 fp->rx_sge_prod += delta;
254
255 bnx2x_clear_sge_mask_next_elems(fp);
256 }
257
258 DP(NETIF_MSG_RX_STATUS,
259 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
260 fp->last_max_sge, fp->rx_sge_prod);
261}
262
263
264
265
266static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
267 const struct eth_fast_path_rx_cqe *cqe)
268{
269
270 if ((bp->dev->features & NETIF_F_RXHASH) &&
271 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
272 return le32_to_cpu(cqe->rss_hash_result);
273 return 0;
274}
275
276static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
277 u16 cons, u16 prod,
278 struct eth_fast_path_rx_cqe *cqe)
279{
280 struct bnx2x *bp = fp->bp;
281 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
282 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
283 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
284 dma_addr_t mapping;
285 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
286 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
287
288
289 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
290 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
291
292
293 mapping = dma_map_single(&bp->pdev->dev,
294 first_buf->data + NET_SKB_PAD,
295 fp->rx_buf_size, DMA_FROM_DEVICE);
296
297
298
299
300
301
302 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
303
304 bnx2x_reuse_rx_data(fp, cons, prod);
305 tpa_info->tpa_state = BNX2X_TPA_ERROR;
306 return;
307 }
308
309
310 prod_rx_buf->data = first_buf->data;
311 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
312
313 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
314 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
315
316
317 *first_buf = *cons_rx_buf;
318
319
320 tpa_info->parsing_flags =
321 le16_to_cpu(cqe->pars_flags.flags);
322 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
323 tpa_info->tpa_state = BNX2X_TPA_START;
324 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
325 tpa_info->placement_offset = cqe->placement_offset;
326 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
327 if (fp->mode == TPA_MODE_GRO) {
328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
329 tpa_info->full_page =
330 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
331 tpa_info->gro_size = gro_size;
332 }
333
334#ifdef BNX2X_STOP_ON_ERROR
335 fp->tpa_queue_used |= (1 << queue);
336#ifdef _ASM_GENERIC_INT_L64_H
337 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
338#else
339 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
340#endif
341 fp->tpa_queue_used);
342#endif
343}
344
345
346
347
348
349#define TPA_TSTAMP_OPT_LEN 12
350
351
352
353
354
355
356
357
358
359
360
361static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
362 u16 len_on_bd)
363{
364
365
366
367
368 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
369
370 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
371 PRS_FLAG_OVERETH_IPV6)
372 hdrs_len += sizeof(struct ipv6hdr);
373 else
374 hdrs_len += sizeof(struct iphdr);
375
376
377
378
379
380
381
382 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
383 hdrs_len += TPA_TSTAMP_OPT_LEN;
384
385 return len_on_bd - hdrs_len;
386}
387
388static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
389 struct bnx2x_fastpath *fp, u16 index)
390{
391 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
392 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
393 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
394 dma_addr_t mapping;
395
396 if (unlikely(page == NULL)) {
397 BNX2X_ERR("Can't alloc sge\n");
398 return -ENOMEM;
399 }
400
401 mapping = dma_map_page(&bp->pdev->dev, page, 0,
402 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
403 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
404 __free_pages(page, PAGES_PER_SGE_SHIFT);
405 BNX2X_ERR("Can't map sge\n");
406 return -ENOMEM;
407 }
408
409 sw_buf->page = page;
410 dma_unmap_addr_set(sw_buf, mapping, mapping);
411
412 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
413 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
414
415 return 0;
416}
417
418static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
419 struct bnx2x_agg_info *tpa_info,
420 u16 pages,
421 struct sk_buff *skb,
422 struct eth_end_agg_rx_cqe *cqe,
423 u16 cqe_idx)
424{
425 struct sw_rx_page *rx_pg, old_rx_pg;
426 u32 i, frag_len, frag_size;
427 int err, j, frag_id = 0;
428 u16 len_on_bd = tpa_info->len_on_bd;
429 u16 full_page = 0, gro_size = 0;
430
431 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
432
433 if (fp->mode == TPA_MODE_GRO) {
434 gro_size = tpa_info->gro_size;
435 full_page = tpa_info->full_page;
436 }
437
438
439 if (frag_size) {
440 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
441 tpa_info->parsing_flags, len_on_bd);
442
443
444 if (fp->mode == TPA_MODE_GRO)
445 skb_shinfo(skb)->gso_type =
446 (GET_FLAG(tpa_info->parsing_flags,
447 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
448 PRS_FLAG_OVERETH_IPV6) ?
449 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
450 }
451
452
453#ifdef BNX2X_STOP_ON_ERROR
454 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
455 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
456 pages, cqe_idx);
457 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
458 bnx2x_panic();
459 return -EINVAL;
460 }
461#endif
462
463
464 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
465 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
466
467
468
469 if (fp->mode == TPA_MODE_GRO)
470 frag_len = min_t(u32, frag_size, (u32)full_page);
471 else
472 frag_len = min_t(u32, frag_size,
473 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
474
475 rx_pg = &fp->rx_page_ring[sge_idx];
476 old_rx_pg = *rx_pg;
477
478
479
480 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
481 if (unlikely(err)) {
482 fp->eth_q_stats.rx_skb_alloc_failed++;
483 return err;
484 }
485
486
487 dma_unmap_page(&bp->pdev->dev,
488 dma_unmap_addr(&old_rx_pg, mapping),
489 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
490
491 if (fp->mode == TPA_MODE_LRO)
492 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
493 else {
494 int rem;
495 int offset = 0;
496 for (rem = frag_len; rem > 0; rem -= gro_size) {
497 int len = rem > gro_size ? gro_size : rem;
498 skb_fill_page_desc(skb, frag_id++,
499 old_rx_pg.page, offset, len);
500 if (offset)
501 get_page(old_rx_pg.page);
502 offset += len;
503 }
504 }
505
506 skb->data_len += frag_len;
507 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
508 skb->len += frag_len;
509
510 frag_size -= frag_len;
511 }
512
513 return 0;
514}
515
516static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
517 struct bnx2x_agg_info *tpa_info,
518 u16 pages,
519 struct eth_end_agg_rx_cqe *cqe,
520 u16 cqe_idx)
521{
522 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
523 u8 pad = tpa_info->placement_offset;
524 u16 len = tpa_info->len_on_bd;
525 struct sk_buff *skb = NULL;
526 u8 *new_data, *data = rx_buf->data;
527 u8 old_tpa_state = tpa_info->tpa_state;
528
529 tpa_info->tpa_state = BNX2X_TPA_STOP;
530
531
532
533
534 if (old_tpa_state == BNX2X_TPA_ERROR)
535 goto drop;
536
537
538 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
539
540
541
542
543 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
544 fp->rx_buf_size, DMA_FROM_DEVICE);
545 if (likely(new_data))
546 skb = build_skb(data, 0);
547
548 if (likely(skb)) {
549#ifdef BNX2X_STOP_ON_ERROR
550 if (pad + len > fp->rx_buf_size) {
551 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
552 pad, len, fp->rx_buf_size);
553 bnx2x_panic();
554 return;
555 }
556#endif
557
558 skb_reserve(skb, pad + NET_SKB_PAD);
559 skb_put(skb, len);
560 skb->rxhash = tpa_info->rxhash;
561
562 skb->protocol = eth_type_trans(skb, bp->dev);
563 skb->ip_summed = CHECKSUM_UNNECESSARY;
564
565 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
566 skb, cqe, cqe_idx)) {
567 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
568 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
569 napi_gro_receive(&fp->napi, skb);
570 } else {
571 DP(NETIF_MSG_RX_STATUS,
572 "Failed to allocate new pages - dropping packet!\n");
573 dev_kfree_skb_any(skb);
574 }
575
576
577
578 rx_buf->data = new_data;
579
580 return;
581 }
582 kfree(new_data);
583drop:
584
585 DP(NETIF_MSG_RX_STATUS,
586 "Failed to allocate or map a new skb - dropping packet!\n");
587 fp->eth_q_stats.rx_skb_alloc_failed++;
588}
589
590static int bnx2x_alloc_rx_data(struct bnx2x *bp,
591 struct bnx2x_fastpath *fp, u16 index)
592{
593 u8 *data;
594 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
595 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
596 dma_addr_t mapping;
597
598 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
599 if (unlikely(data == NULL))
600 return -ENOMEM;
601
602 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
603 fp->rx_buf_size,
604 DMA_FROM_DEVICE);
605 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
606 kfree(data);
607 BNX2X_ERR("Can't map rx data\n");
608 return -ENOMEM;
609 }
610
611 rx_buf->data = data;
612 dma_unmap_addr_set(rx_buf, mapping, mapping);
613
614 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
615 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
616
617 return 0;
618}
619
620static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
621 struct bnx2x_fastpath *fp)
622{
623
624
625 if (cqe->fast_path_cqe.status_flags &
626 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
627 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
628 return;
629
630
631
632 if (cqe->fast_path_cqe.type_error_flags &
633 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
634 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
635 fp->eth_q_stats.hw_csum_err++;
636 else
637 skb->ip_summed = CHECKSUM_UNNECESSARY;
638}
639
640int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
641{
642 struct bnx2x *bp = fp->bp;
643 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
644 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
645 int rx_pkt = 0;
646
647#ifdef BNX2X_STOP_ON_ERROR
648 if (unlikely(bp->panic))
649 return 0;
650#endif
651
652
653
654 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
655 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
656 hw_comp_cons++;
657
658 bd_cons = fp->rx_bd_cons;
659 bd_prod = fp->rx_bd_prod;
660 bd_prod_fw = bd_prod;
661 sw_comp_cons = fp->rx_comp_cons;
662 sw_comp_prod = fp->rx_comp_prod;
663
664
665
666
667 rmb();
668
669 DP(NETIF_MSG_RX_STATUS,
670 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
671 fp->index, hw_comp_cons, sw_comp_cons);
672
673 while (sw_comp_cons != hw_comp_cons) {
674 struct sw_rx_bd *rx_buf = NULL;
675 struct sk_buff *skb;
676 union eth_rx_cqe *cqe;
677 struct eth_fast_path_rx_cqe *cqe_fp;
678 u8 cqe_fp_flags;
679 enum eth_rx_cqe_type cqe_fp_type;
680 u16 len, pad, queue;
681 u8 *data;
682
683#ifdef BNX2X_STOP_ON_ERROR
684 if (unlikely(bp->panic))
685 return 0;
686#endif
687
688 comp_ring_cons = RCQ_BD(sw_comp_cons);
689 bd_prod = RX_BD(bd_prod);
690 bd_cons = RX_BD(bd_cons);
691
692 cqe = &fp->rx_comp_ring[comp_ring_cons];
693 cqe_fp = &cqe->fast_path_cqe;
694 cqe_fp_flags = cqe_fp->type_error_flags;
695 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
696
697 DP(NETIF_MSG_RX_STATUS,
698 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
699 CQE_TYPE(cqe_fp_flags),
700 cqe_fp_flags, cqe_fp->status_flags,
701 le32_to_cpu(cqe_fp->rss_hash_result),
702 le16_to_cpu(cqe_fp->vlan_tag),
703 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
704
705
706 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
707 bnx2x_sp_event(fp, cqe);
708 goto next_cqe;
709 }
710
711 rx_buf = &fp->rx_buf_ring[bd_cons];
712 data = rx_buf->data;
713
714 if (!CQE_TYPE_FAST(cqe_fp_type)) {
715 struct bnx2x_agg_info *tpa_info;
716 u16 frag_size, pages;
717#ifdef BNX2X_STOP_ON_ERROR
718
719 if (fp->disable_tpa &&
720 (CQE_TYPE_START(cqe_fp_type) ||
721 CQE_TYPE_STOP(cqe_fp_type)))
722 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
723 CQE_TYPE(cqe_fp_type));
724#endif
725
726 if (CQE_TYPE_START(cqe_fp_type)) {
727 u16 queue = cqe_fp->queue_index;
728 DP(NETIF_MSG_RX_STATUS,
729 "calling tpa_start on queue %d\n",
730 queue);
731
732 bnx2x_tpa_start(fp, queue,
733 bd_cons, bd_prod,
734 cqe_fp);
735
736 goto next_rx;
737
738 }
739 queue = cqe->end_agg_cqe.queue_index;
740 tpa_info = &fp->tpa_info[queue];
741 DP(NETIF_MSG_RX_STATUS,
742 "calling tpa_stop on queue %d\n",
743 queue);
744
745 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
746 tpa_info->len_on_bd;
747
748 if (fp->mode == TPA_MODE_GRO)
749 pages = (frag_size + tpa_info->full_page - 1) /
750 tpa_info->full_page;
751 else
752 pages = SGE_PAGE_ALIGN(frag_size) >>
753 SGE_PAGE_SHIFT;
754
755 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
756 &cqe->end_agg_cqe, comp_ring_cons);
757#ifdef BNX2X_STOP_ON_ERROR
758 if (bp->panic)
759 return 0;
760#endif
761
762 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
763 goto next_cqe;
764 }
765
766 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
767 pad = cqe_fp->placement_offset;
768 dma_sync_single_for_cpu(&bp->pdev->dev,
769 dma_unmap_addr(rx_buf, mapping),
770 pad + RX_COPY_THRESH,
771 DMA_FROM_DEVICE);
772 pad += NET_SKB_PAD;
773 prefetch(data + pad);
774
775 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
776 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
777 "ERROR flags %x rx packet %u\n",
778 cqe_fp_flags, sw_comp_cons);
779 fp->eth_q_stats.rx_err_discard_pkt++;
780 goto reuse_rx;
781 }
782
783
784
785
786 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
787 (len <= RX_COPY_THRESH)) {
788 skb = netdev_alloc_skb_ip_align(bp->dev, len);
789 if (skb == NULL) {
790 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
791 "ERROR packet dropped because of alloc failure\n");
792 fp->eth_q_stats.rx_skb_alloc_failed++;
793 goto reuse_rx;
794 }
795 memcpy(skb->data, data + pad, len);
796 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
797 } else {
798 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
799 dma_unmap_single(&bp->pdev->dev,
800 dma_unmap_addr(rx_buf, mapping),
801 fp->rx_buf_size,
802 DMA_FROM_DEVICE);
803 skb = build_skb(data, 0);
804 if (unlikely(!skb)) {
805 kfree(data);
806 fp->eth_q_stats.rx_skb_alloc_failed++;
807 goto next_rx;
808 }
809 skb_reserve(skb, pad);
810 } else {
811 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
812 "ERROR packet dropped because of alloc failure\n");
813 fp->eth_q_stats.rx_skb_alloc_failed++;
814reuse_rx:
815 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
816 goto next_rx;
817 }
818 }
819
820 skb_put(skb, len);
821 skb->protocol = eth_type_trans(skb, bp->dev);
822
823
824 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
825
826 skb_checksum_none_assert(skb);
827
828 if (bp->dev->features & NETIF_F_RXCSUM)
829 bnx2x_csum_validate(skb, cqe, fp);
830
831
832 skb_record_rx_queue(skb, fp->rx_queue);
833
834 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
835 PARSING_FLAGS_VLAN)
836 __vlan_hwaccel_put_tag(skb,
837 le16_to_cpu(cqe_fp->vlan_tag));
838 napi_gro_receive(&fp->napi, skb);
839
840
841next_rx:
842 rx_buf->data = NULL;
843
844 bd_cons = NEXT_RX_IDX(bd_cons);
845 bd_prod = NEXT_RX_IDX(bd_prod);
846 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
847 rx_pkt++;
848next_cqe:
849 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
850 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
851
852 if (rx_pkt == budget)
853 break;
854 }
855
856 fp->rx_bd_cons = bd_cons;
857 fp->rx_bd_prod = bd_prod_fw;
858 fp->rx_comp_cons = sw_comp_cons;
859 fp->rx_comp_prod = sw_comp_prod;
860
861
862 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
863 fp->rx_sge_prod);
864
865 fp->rx_pkt += rx_pkt;
866 fp->rx_calls++;
867
868 return rx_pkt;
869}
870
871static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
872{
873 struct bnx2x_fastpath *fp = fp_cookie;
874 struct bnx2x *bp = fp->bp;
875 u8 cos;
876
877 DP(NETIF_MSG_INTR,
878 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
879 fp->index, fp->fw_sb_id, fp->igu_sb_id);
880 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
881
882#ifdef BNX2X_STOP_ON_ERROR
883 if (unlikely(bp->panic))
884 return IRQ_HANDLED;
885#endif
886
887
888 prefetch(fp->rx_cons_sb);
889
890 for_each_cos_in_tx_queue(fp, cos)
891 prefetch(fp->txdata[cos].tx_cons_sb);
892
893 prefetch(&fp->sb_running_index[SM_RX_ID]);
894 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
895
896 return IRQ_HANDLED;
897}
898
899
900void bnx2x_acquire_phy_lock(struct bnx2x *bp)
901{
902 mutex_lock(&bp->port.phy_mutex);
903
904 if (bp->port.need_hw_lock)
905 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
906}
907
908void bnx2x_release_phy_lock(struct bnx2x *bp)
909{
910 if (bp->port.need_hw_lock)
911 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
912
913 mutex_unlock(&bp->port.phy_mutex);
914}
915
916
917u16 bnx2x_get_mf_speed(struct bnx2x *bp)
918{
919 u16 line_speed = bp->link_vars.line_speed;
920 if (IS_MF(bp)) {
921 u16 maxCfg = bnx2x_extract_max_cfg(bp,
922 bp->mf_config[BP_VN(bp)]);
923
924
925
926
927 if (IS_MF_SI(bp))
928 line_speed = (line_speed * maxCfg) / 100;
929 else {
930 u16 vn_max_rate = maxCfg * 100;
931
932 if (vn_max_rate < line_speed)
933 line_speed = vn_max_rate;
934 }
935 }
936
937 return line_speed;
938}
939
940
941
942
943
944
945
946
947
948static void bnx2x_fill_report_data(struct bnx2x *bp,
949 struct bnx2x_link_report_data *data)
950{
951 u16 line_speed = bnx2x_get_mf_speed(bp);
952
953 memset(data, 0, sizeof(*data));
954
955
956 data->line_speed = line_speed;
957
958
959 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
960 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
961 &data->link_report_flags);
962
963
964 if (bp->link_vars.duplex == DUPLEX_FULL)
965 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
966
967
968 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
969 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
970
971
972 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
973 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
974}
975
976
977
978
979
980
981
982
983
984
985
986void bnx2x_link_report(struct bnx2x *bp)
987{
988 bnx2x_acquire_phy_lock(bp);
989 __bnx2x_link_report(bp);
990 bnx2x_release_phy_lock(bp);
991}
992
993
994
995
996
997
998
999
1000
1001void __bnx2x_link_report(struct bnx2x *bp)
1002{
1003 struct bnx2x_link_report_data cur_data;
1004
1005
1006 if (!CHIP_IS_E1(bp))
1007 bnx2x_read_mf_cfg(bp);
1008
1009
1010 bnx2x_fill_report_data(bp, &cur_data);
1011
1012
1013 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1014 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1015 &bp->last_reported_link.link_report_flags) &&
1016 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1017 &cur_data.link_report_flags)))
1018 return;
1019
1020 bp->link_cnt++;
1021
1022
1023
1024
1025 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1026
1027 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1028 &cur_data.link_report_flags)) {
1029 netif_carrier_off(bp->dev);
1030 netdev_err(bp->dev, "NIC Link is Down\n");
1031 return;
1032 } else {
1033 const char *duplex;
1034 const char *flow;
1035
1036 netif_carrier_on(bp->dev);
1037
1038 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1039 &cur_data.link_report_flags))
1040 duplex = "full";
1041 else
1042 duplex = "half";
1043
1044
1045
1046
1047
1048 if (cur_data.link_report_flags) {
1049 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1050 &cur_data.link_report_flags)) {
1051 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1052 &cur_data.link_report_flags))
1053 flow = "ON - receive & transmit";
1054 else
1055 flow = "ON - receive";
1056 } else {
1057 flow = "ON - transmit";
1058 }
1059 } else {
1060 flow = "none";
1061 }
1062 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1063 cur_data.line_speed, duplex, flow);
1064 }
1065}
1066
1067static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1068{
1069 int i;
1070
1071 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1072 struct eth_rx_sge *sge;
1073
1074 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1075 sge->addr_hi =
1076 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1077 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1078
1079 sge->addr_lo =
1080 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1081 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1082 }
1083}
1084
1085static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1086 struct bnx2x_fastpath *fp, int last)
1087{
1088 int i;
1089
1090 for (i = 0; i < last; i++) {
1091 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1092 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1093 u8 *data = first_buf->data;
1094
1095 if (data == NULL) {
1096 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1097 continue;
1098 }
1099 if (tpa_info->tpa_state == BNX2X_TPA_START)
1100 dma_unmap_single(&bp->pdev->dev,
1101 dma_unmap_addr(first_buf, mapping),
1102 fp->rx_buf_size, DMA_FROM_DEVICE);
1103 kfree(data);
1104 first_buf->data = NULL;
1105 }
1106}
1107
1108void bnx2x_init_rx_rings(struct bnx2x *bp)
1109{
1110 int func = BP_FUNC(bp);
1111 u16 ring_prod;
1112 int i, j;
1113
1114
1115 for_each_rx_queue(bp, j) {
1116 struct bnx2x_fastpath *fp = &bp->fp[j];
1117
1118 DP(NETIF_MSG_IFUP,
1119 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1120
1121 if (!fp->disable_tpa) {
1122
1123 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1124 struct bnx2x_agg_info *tpa_info =
1125 &fp->tpa_info[i];
1126 struct sw_rx_bd *first_buf =
1127 &tpa_info->first_buf;
1128
1129 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1130 GFP_ATOMIC);
1131 if (!first_buf->data) {
1132 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1133 j);
1134 bnx2x_free_tpa_pool(bp, fp, i);
1135 fp->disable_tpa = 1;
1136 break;
1137 }
1138 dma_unmap_addr_set(first_buf, mapping, 0);
1139 tpa_info->tpa_state = BNX2X_TPA_STOP;
1140 }
1141
1142
1143 bnx2x_set_next_page_sgl(fp);
1144
1145
1146 bnx2x_init_sge_ring_bit_mask(fp);
1147
1148
1149 for (i = 0, ring_prod = 0;
1150 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1151
1152 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1153 BNX2X_ERR("was only able to allocate %d rx sges\n",
1154 i);
1155 BNX2X_ERR("disabling TPA for queue[%d]\n",
1156 j);
1157
1158 bnx2x_free_rx_sge_range(bp, fp,
1159 ring_prod);
1160 bnx2x_free_tpa_pool(bp, fp,
1161 MAX_AGG_QS(bp));
1162 fp->disable_tpa = 1;
1163 ring_prod = 0;
1164 break;
1165 }
1166 ring_prod = NEXT_SGE_IDX(ring_prod);
1167 }
1168
1169 fp->rx_sge_prod = ring_prod;
1170 }
1171 }
1172
1173 for_each_rx_queue(bp, j) {
1174 struct bnx2x_fastpath *fp = &bp->fp[j];
1175
1176 fp->rx_bd_cons = 0;
1177
1178
1179
1180
1181
1182
1183 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1184 fp->rx_sge_prod);
1185
1186 if (j != 0)
1187 continue;
1188
1189 if (CHIP_IS_E1(bp)) {
1190 REG_WR(bp, BAR_USTRORM_INTMEM +
1191 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1192 U64_LO(fp->rx_comp_mapping));
1193 REG_WR(bp, BAR_USTRORM_INTMEM +
1194 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1195 U64_HI(fp->rx_comp_mapping));
1196 }
1197 }
1198}
1199
1200static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1201{
1202 int i;
1203 u8 cos;
1204
1205 for_each_tx_queue(bp, i) {
1206 struct bnx2x_fastpath *fp = &bp->fp[i];
1207 for_each_cos_in_tx_queue(fp, cos) {
1208 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
1209 unsigned pkts_compl = 0, bytes_compl = 0;
1210
1211 u16 sw_prod = txdata->tx_pkt_prod;
1212 u16 sw_cons = txdata->tx_pkt_cons;
1213
1214 while (sw_cons != sw_prod) {
1215 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1216 &pkts_compl, &bytes_compl);
1217 sw_cons++;
1218 }
1219 netdev_tx_reset_queue(
1220 netdev_get_tx_queue(bp->dev, txdata->txq_index));
1221 }
1222 }
1223}
1224
1225static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1226{
1227 struct bnx2x *bp = fp->bp;
1228 int i;
1229
1230
1231 if (fp->rx_buf_ring == NULL)
1232 return;
1233
1234 for (i = 0; i < NUM_RX_BD; i++) {
1235 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1236 u8 *data = rx_buf->data;
1237
1238 if (data == NULL)
1239 continue;
1240 dma_unmap_single(&bp->pdev->dev,
1241 dma_unmap_addr(rx_buf, mapping),
1242 fp->rx_buf_size, DMA_FROM_DEVICE);
1243
1244 rx_buf->data = NULL;
1245 kfree(data);
1246 }
1247}
1248
1249static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1250{
1251 int j;
1252
1253 for_each_rx_queue(bp, j) {
1254 struct bnx2x_fastpath *fp = &bp->fp[j];
1255
1256 bnx2x_free_rx_bds(fp);
1257
1258 if (!fp->disable_tpa)
1259 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1260 }
1261}
1262
1263void bnx2x_free_skbs(struct bnx2x *bp)
1264{
1265 bnx2x_free_tx_skbs(bp);
1266 bnx2x_free_rx_skbs(bp);
1267}
1268
1269void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1270{
1271
1272 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1273
1274 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1275
1276 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1277
1278
1279 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1280 & FUNC_MF_CFG_MAX_BW_MASK;
1281
1282 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1283 }
1284}
1285
1286
1287
1288
1289
1290
1291
1292static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1293{
1294 int i, offset = 0;
1295
1296 if (nvecs == offset)
1297 return;
1298 free_irq(bp->msix_table[offset].vector, bp->dev);
1299 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1300 bp->msix_table[offset].vector);
1301 offset++;
1302#ifdef BCM_CNIC
1303 if (nvecs == offset)
1304 return;
1305 offset++;
1306#endif
1307
1308 for_each_eth_queue(bp, i) {
1309 if (nvecs == offset)
1310 return;
1311 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1312 i, bp->msix_table[offset].vector);
1313
1314 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1315 }
1316}
1317
1318void bnx2x_free_irq(struct bnx2x *bp)
1319{
1320 if (bp->flags & USING_MSIX_FLAG &&
1321 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1322 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1323 CNIC_PRESENT + 1);
1324 else
1325 free_irq(bp->dev->irq, bp->dev);
1326}
1327
1328int __devinit bnx2x_enable_msix(struct bnx2x *bp)
1329{
1330 int msix_vec = 0, i, rc, req_cnt;
1331
1332 bp->msix_table[msix_vec].entry = msix_vec;
1333 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1334 bp->msix_table[0].entry);
1335 msix_vec++;
1336
1337#ifdef BCM_CNIC
1338 bp->msix_table[msix_vec].entry = msix_vec;
1339 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1340 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1341 msix_vec++;
1342#endif
1343
1344 for_each_eth_queue(bp, i) {
1345 bp->msix_table[msix_vec].entry = msix_vec;
1346 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1347 msix_vec, msix_vec, i);
1348 msix_vec++;
1349 }
1350
1351 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
1352
1353 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1354
1355
1356
1357
1358
1359 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1360
1361 int diff = req_cnt - rc;
1362
1363 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1364
1365 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1366
1367 if (rc) {
1368 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1369 goto no_msix;
1370 }
1371
1372
1373
1374 bp->num_queues -= diff;
1375
1376 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1377 bp->num_queues);
1378 } else if (rc > 0) {
1379
1380 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1381 if (rc) {
1382 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1383 rc);
1384 goto no_msix;
1385 }
1386
1387 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1388 bp->flags |= USING_SINGLE_MSIX_FLAG;
1389
1390 } else if (rc < 0) {
1391 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1392 goto no_msix;
1393 }
1394
1395 bp->flags |= USING_MSIX_FLAG;
1396
1397 return 0;
1398
1399no_msix:
1400
1401 if (rc == -ENOMEM)
1402 bp->flags |= DISABLE_MSI_FLAG;
1403
1404 return rc;
1405}
1406
1407static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1408{
1409 int i, rc, offset = 0;
1410
1411 rc = request_irq(bp->msix_table[offset++].vector,
1412 bnx2x_msix_sp_int, 0,
1413 bp->dev->name, bp->dev);
1414 if (rc) {
1415 BNX2X_ERR("request sp irq failed\n");
1416 return -EBUSY;
1417 }
1418
1419#ifdef BCM_CNIC
1420 offset++;
1421#endif
1422 for_each_eth_queue(bp, i) {
1423 struct bnx2x_fastpath *fp = &bp->fp[i];
1424 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1425 bp->dev->name, i);
1426
1427 rc = request_irq(bp->msix_table[offset].vector,
1428 bnx2x_msix_fp_int, 0, fp->name, fp);
1429 if (rc) {
1430 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1431 bp->msix_table[offset].vector, rc);
1432 bnx2x_free_msix_irqs(bp, offset);
1433 return -EBUSY;
1434 }
1435
1436 offset++;
1437 }
1438
1439 i = BNX2X_NUM_ETH_QUEUES(bp);
1440 offset = 1 + CNIC_PRESENT;
1441 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1442 bp->msix_table[0].vector,
1443 0, bp->msix_table[offset].vector,
1444 i - 1, bp->msix_table[offset + i - 1].vector);
1445
1446 return 0;
1447}
1448
1449int bnx2x_enable_msi(struct bnx2x *bp)
1450{
1451 int rc;
1452
1453 rc = pci_enable_msi(bp->pdev);
1454 if (rc) {
1455 BNX2X_DEV_INFO("MSI is not attainable\n");
1456 return -1;
1457 }
1458 bp->flags |= USING_MSI_FLAG;
1459
1460 return 0;
1461}
1462
1463static int bnx2x_req_irq(struct bnx2x *bp)
1464{
1465 unsigned long flags;
1466 unsigned int irq;
1467
1468 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1469 flags = 0;
1470 else
1471 flags = IRQF_SHARED;
1472
1473 if (bp->flags & USING_MSIX_FLAG)
1474 irq = bp->msix_table[0].vector;
1475 else
1476 irq = bp->pdev->irq;
1477
1478 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1479}
1480
1481static int bnx2x_setup_irqs(struct bnx2x *bp)
1482{
1483 int rc = 0;
1484 if (bp->flags & USING_MSIX_FLAG &&
1485 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1486 rc = bnx2x_req_msix_irqs(bp);
1487 if (rc)
1488 return rc;
1489 } else {
1490 bnx2x_ack_int(bp);
1491 rc = bnx2x_req_irq(bp);
1492 if (rc) {
1493 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1494 return rc;
1495 }
1496 if (bp->flags & USING_MSI_FLAG) {
1497 bp->dev->irq = bp->pdev->irq;
1498 netdev_info(bp->dev, "using MSI IRQ %d\n",
1499 bp->dev->irq);
1500 }
1501 if (bp->flags & USING_MSIX_FLAG) {
1502 bp->dev->irq = bp->msix_table[0].vector;
1503 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1504 bp->dev->irq);
1505 }
1506 }
1507
1508 return 0;
1509}
1510
1511static void bnx2x_napi_enable(struct bnx2x *bp)
1512{
1513 int i;
1514
1515 for_each_rx_queue(bp, i)
1516 napi_enable(&bnx2x_fp(bp, i, napi));
1517}
1518
1519static void bnx2x_napi_disable(struct bnx2x *bp)
1520{
1521 int i;
1522
1523 for_each_rx_queue(bp, i)
1524 napi_disable(&bnx2x_fp(bp, i, napi));
1525}
1526
1527void bnx2x_netif_start(struct bnx2x *bp)
1528{
1529 if (netif_running(bp->dev)) {
1530 bnx2x_napi_enable(bp);
1531 bnx2x_int_enable(bp);
1532 if (bp->state == BNX2X_STATE_OPEN)
1533 netif_tx_wake_all_queues(bp->dev);
1534 }
1535}
1536
1537void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1538{
1539 bnx2x_int_disable_sync(bp, disable_hw);
1540 bnx2x_napi_disable(bp);
1541}
1542
1543u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1544{
1545 struct bnx2x *bp = netdev_priv(dev);
1546
1547#ifdef BCM_CNIC
1548 if (!NO_FCOE(bp)) {
1549 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1550 u16 ether_type = ntohs(hdr->h_proto);
1551
1552
1553 if (ether_type == ETH_P_8021Q) {
1554 struct vlan_ethhdr *vhdr =
1555 (struct vlan_ethhdr *)skb->data;
1556
1557 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1558 }
1559
1560
1561 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1562 return bnx2x_fcoe_tx(bp, txq_index);
1563 }
1564#endif
1565
1566 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1567}
1568
1569
1570void bnx2x_set_num_queues(struct bnx2x *bp)
1571{
1572
1573 bp->num_queues = bnx2x_calc_num_queues(bp);
1574
1575#ifdef BCM_CNIC
1576
1577 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1578 bp->num_queues = 1;
1579#endif
1580
1581 bp->num_queues += NON_ETH_CONTEXT_USE;
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1607{
1608 int rc, tx, rx;
1609
1610 tx = MAX_TXQS_PER_COS * bp->max_cos;
1611 rx = BNX2X_NUM_ETH_QUEUES(bp);
1612
1613
1614#ifdef BCM_CNIC
1615 if (!NO_FCOE(bp)) {
1616 rx += FCOE_PRESENT;
1617 tx += FCOE_PRESENT;
1618 }
1619#endif
1620
1621 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1622 if (rc) {
1623 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1624 return rc;
1625 }
1626 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1627 if (rc) {
1628 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1629 return rc;
1630 }
1631
1632 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1633 tx, rx);
1634
1635 return rc;
1636}
1637
1638static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1639{
1640 int i;
1641
1642 for_each_queue(bp, i) {
1643 struct bnx2x_fastpath *fp = &bp->fp[i];
1644 u32 mtu;
1645
1646
1647 if (IS_FCOE_IDX(i))
1648
1649
1650
1651
1652
1653
1654 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
1655 else
1656 mtu = bp->dev->mtu;
1657 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1658 IP_HEADER_ALIGNMENT_PADDING +
1659 ETH_OVREHEAD +
1660 mtu +
1661 BNX2X_FW_RX_ALIGN_END;
1662
1663 }
1664}
1665
1666static int bnx2x_init_rss_pf(struct bnx2x *bp)
1667{
1668 int i;
1669 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1670 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1671
1672
1673
1674
1675 for (i = 0; i < sizeof(ind_table); i++)
1676 ind_table[i] =
1677 bp->fp->cl_id +
1678 ethtool_rxfh_indir_default(i, num_eth_queues);
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 return bnx2x_config_rss_eth(bp, ind_table,
1689 bp->port.pmf || !CHIP_IS_E1x(bp));
1690}
1691
1692int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1693 u8 *ind_table, bool config_hash)
1694{
1695 struct bnx2x_config_rss_params params = {NULL};
1696 int i;
1697
1698
1699
1700
1701
1702
1703
1704
1705 params.rss_obj = rss_obj;
1706
1707 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
1708
1709 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
1710
1711
1712 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
1713 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
1714 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
1715 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
1716
1717
1718 params.rss_result_mask = MULTI_MASK;
1719
1720 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1721
1722 if (config_hash) {
1723
1724 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1725 params.rss_key[i] = random32();
1726
1727 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
1728 }
1729
1730 return bnx2x_config_rss(bp, ¶ms);
1731}
1732
1733static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1734{
1735 struct bnx2x_func_state_params func_params = {NULL};
1736
1737
1738 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1739
1740 func_params.f_obj = &bp->func_obj;
1741 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1742
1743 func_params.params.hw_init.load_phase = load_code;
1744
1745 return bnx2x_func_state_change(bp, &func_params);
1746}
1747
1748
1749
1750
1751
1752static void bnx2x_squeeze_objects(struct bnx2x *bp)
1753{
1754 int rc;
1755 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1756 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1757 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1758
1759
1760
1761
1762 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1763
1764 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1765
1766
1767 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1768 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1769 &ramrod_flags);
1770 if (rc != 0)
1771 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1772
1773
1774 vlan_mac_flags = 0;
1775 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1776 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1777 &ramrod_flags);
1778 if (rc != 0)
1779 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1780
1781
1782 rparam.mcast_obj = &bp->mcast_obj;
1783 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1784
1785
1786 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1787 if (rc < 0)
1788 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1789 rc);
1790
1791
1792 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1793 while (rc != 0) {
1794 if (rc < 0) {
1795 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1796 rc);
1797 return;
1798 }
1799
1800 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1801 }
1802}
1803
1804#ifndef BNX2X_STOP_ON_ERROR
1805#define LOAD_ERROR_EXIT(bp, label) \
1806 do { \
1807 (bp)->state = BNX2X_STATE_ERROR; \
1808 goto label; \
1809 } while (0)
1810#else
1811#define LOAD_ERROR_EXIT(bp, label) \
1812 do { \
1813 (bp)->state = BNX2X_STATE_ERROR; \
1814 (bp)->panic = 1; \
1815 return -EBUSY; \
1816 } while (0)
1817#endif
1818
1819bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1820{
1821
1822 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1823 (BCM_5710_FW_MINOR_VERSION << 8) +
1824 (BCM_5710_FW_REVISION_VERSION << 16) +
1825 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1826
1827
1828 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1829
1830 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1831
1832 if (loaded_fw != my_fw) {
1833 if (is_err)
1834 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1835 loaded_fw, my_fw);
1836 return false;
1837 }
1838
1839 return true;
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1852{
1853 struct bnx2x_fastpath *fp = &bp->fp[index];
1854 struct napi_struct orig_napi = fp->napi;
1855
1856 if (bp->stats_init)
1857 memset(fp, 0, sizeof(*fp));
1858 else {
1859
1860 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1861 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1862
1863 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1864 GFP_KERNEL);
1865 if (tmp_eth_q_stats)
1866 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1867 sizeof(struct bnx2x_eth_q_stats));
1868
1869 tmp_eth_q_stats_old =
1870 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1871 GFP_KERNEL);
1872 if (tmp_eth_q_stats_old)
1873 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1874 sizeof(struct bnx2x_eth_q_stats_old));
1875
1876 memset(fp, 0, sizeof(*fp));
1877
1878 if (tmp_eth_q_stats) {
1879 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1880 sizeof(struct bnx2x_eth_q_stats));
1881 kfree(tmp_eth_q_stats);
1882 }
1883
1884 if (tmp_eth_q_stats_old) {
1885 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1886 sizeof(struct bnx2x_eth_q_stats_old));
1887 kfree(tmp_eth_q_stats_old);
1888 }
1889
1890 }
1891
1892
1893 fp->napi = orig_napi;
1894
1895 fp->bp = bp;
1896 fp->index = index;
1897 if (IS_ETH_FP(fp))
1898 fp->max_cos = bp->max_cos;
1899 else
1900
1901 fp->max_cos = 1;
1902
1903
1904
1905
1906
1907 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1908 (bp->flags & GRO_ENABLE_FLAG &&
1909 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1910 if (bp->flags & TPA_ENABLE_FLAG)
1911 fp->mode = TPA_MODE_LRO;
1912 else if (bp->flags & GRO_ENABLE_FLAG)
1913 fp->mode = TPA_MODE_GRO;
1914
1915#ifdef BCM_CNIC
1916
1917 if (IS_FCOE_FP(fp))
1918 fp->disable_tpa = 1;
1919#endif
1920}
1921
1922
1923
1924int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1925{
1926 int port = BP_PORT(bp);
1927 u32 load_code;
1928 int i, rc;
1929
1930#ifdef BNX2X_STOP_ON_ERROR
1931 if (unlikely(bp->panic)) {
1932 BNX2X_ERR("Can't load NIC when there is panic\n");
1933 return -EPERM;
1934 }
1935#endif
1936
1937 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1938
1939
1940 bnx2x_acquire_phy_lock(bp);
1941 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1942 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1943 &bp->last_reported_link.link_report_flags);
1944 bnx2x_release_phy_lock(bp);
1945
1946
1947 bnx2x_ilt_set_info(bp);
1948
1949
1950
1951
1952
1953
1954 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
1955 for_each_queue(bp, i)
1956 bnx2x_bz_fp(bp, i);
1957
1958
1959
1960 bnx2x_set_rx_buf_size(bp);
1961
1962 if (bnx2x_alloc_mem(bp))
1963 return -ENOMEM;
1964
1965
1966
1967
1968
1969 rc = bnx2x_set_real_num_queues(bp);
1970 if (rc) {
1971 BNX2X_ERR("Unable to set real_num_queues\n");
1972 LOAD_ERROR_EXIT(bp, load_error0);
1973 }
1974
1975
1976
1977
1978
1979 bnx2x_setup_tc(bp->dev, bp->max_cos);
1980
1981 bnx2x_napi_enable(bp);
1982
1983
1984 bnx2x_set_pf_load(bp);
1985
1986
1987
1988
1989
1990
1991 if (!BP_NOMCP(bp)) {
1992
1993 bp->fw_seq =
1994 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1995 DRV_MSG_SEQ_NUMBER_MASK);
1996 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1997
1998
1999 bp->fw_drv_pulse_wr_seq =
2000 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2001 DRV_PULSE_SEQ_MASK);
2002 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2003
2004 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
2005 if (!load_code) {
2006 BNX2X_ERR("MCP response failure, aborting\n");
2007 rc = -EBUSY;
2008 LOAD_ERROR_EXIT(bp, load_error1);
2009 }
2010 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2011 BNX2X_ERR("Driver load refused\n");
2012 rc = -EBUSY;
2013 LOAD_ERROR_EXIT(bp, load_error1);
2014 }
2015 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2016 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2017
2018 if (!bnx2x_test_firmware_version(bp, true)) {
2019 rc = -EBUSY;
2020 LOAD_ERROR_EXIT(bp, load_error2);
2021 }
2022 }
2023
2024 } else {
2025 int path = BP_PATH(bp);
2026
2027 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2028 path, load_count[path][0], load_count[path][1],
2029 load_count[path][2]);
2030 load_count[path][0]++;
2031 load_count[path][1 + port]++;
2032 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2033 path, load_count[path][0], load_count[path][1],
2034 load_count[path][2]);
2035 if (load_count[path][0] == 1)
2036 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
2037 else if (load_count[path][1 + port] == 1)
2038 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
2039 else
2040 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
2041 }
2042
2043 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2044 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2045 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2046 bp->port.pmf = 1;
2047
2048
2049
2050
2051
2052 smp_mb();
2053 } else
2054 bp->port.pmf = 0;
2055
2056 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
2057
2058
2059 bnx2x__init_func_obj(bp);
2060
2061
2062 rc = bnx2x_init_hw(bp, load_code);
2063 if (rc) {
2064 BNX2X_ERR("HW init failed, aborting\n");
2065 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2066 LOAD_ERROR_EXIT(bp, load_error2);
2067 }
2068
2069
2070 rc = bnx2x_setup_irqs(bp);
2071 if (rc) {
2072 BNX2X_ERR("IRQs setup failed\n");
2073 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2074 LOAD_ERROR_EXIT(bp, load_error2);
2075 }
2076
2077
2078 bnx2x_nic_init(bp, load_code);
2079
2080
2081 bnx2x_init_bp_objs(bp);
2082
2083 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2084 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2085 (bp->common.shmem2_base)) {
2086 if (SHMEM2_HAS(bp, dcc_support))
2087 SHMEM2_WR(bp, dcc_support,
2088 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2089 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2090 if (SHMEM2_HAS(bp, afex_driver_support))
2091 SHMEM2_WR(bp, afex_driver_support,
2092 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2093 }
2094
2095
2096 bp->afex_def_vlan_tag = -1;
2097
2098 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2099 rc = bnx2x_func_start(bp);
2100 if (rc) {
2101 BNX2X_ERR("Function start failed!\n");
2102 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2103 LOAD_ERROR_EXIT(bp, load_error3);
2104 }
2105
2106
2107 if (!BP_NOMCP(bp)) {
2108 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2109 if (!load_code) {
2110 BNX2X_ERR("MCP response failure, aborting\n");
2111 rc = -EBUSY;
2112 LOAD_ERROR_EXIT(bp, load_error3);
2113 }
2114 }
2115
2116 rc = bnx2x_setup_leading(bp);
2117 if (rc) {
2118 BNX2X_ERR("Setup leading failed!\n");
2119 LOAD_ERROR_EXIT(bp, load_error3);
2120 }
2121
2122#ifdef BCM_CNIC
2123
2124 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2125#endif
2126
2127 for_each_nondefault_queue(bp, i) {
2128 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2129 if (rc) {
2130 BNX2X_ERR("Queue setup failed\n");
2131 LOAD_ERROR_EXIT(bp, load_error4);
2132 }
2133 }
2134
2135 rc = bnx2x_init_rss_pf(bp);
2136 if (rc) {
2137 BNX2X_ERR("PF RSS init failed\n");
2138 LOAD_ERROR_EXIT(bp, load_error4);
2139 }
2140
2141
2142 bp->state = BNX2X_STATE_OPEN;
2143
2144
2145 rc = bnx2x_set_eth_mac(bp, true);
2146 if (rc) {
2147 BNX2X_ERR("Setting Ethernet MAC failed\n");
2148 LOAD_ERROR_EXIT(bp, load_error4);
2149 }
2150
2151 if (bp->pending_max) {
2152 bnx2x_update_max_mf_config(bp, bp->pending_max);
2153 bp->pending_max = 0;
2154 }
2155
2156 if (bp->port.pmf)
2157 bnx2x_initial_phy_init(bp, load_mode);
2158
2159
2160
2161
2162 netif_addr_lock_bh(bp->dev);
2163 bnx2x_set_rx_mode(bp->dev);
2164 netif_addr_unlock_bh(bp->dev);
2165
2166
2167 switch (load_mode) {
2168 case LOAD_NORMAL:
2169
2170 netif_tx_wake_all_queues(bp->dev);
2171 break;
2172
2173 case LOAD_OPEN:
2174 netif_tx_start_all_queues(bp->dev);
2175 smp_mb__after_clear_bit();
2176 break;
2177
2178 case LOAD_DIAG:
2179 bp->state = BNX2X_STATE_DIAG;
2180 break;
2181
2182 default:
2183 break;
2184 }
2185
2186 if (bp->port.pmf)
2187 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
2188 else
2189 bnx2x__link_status_update(bp);
2190
2191
2192 mod_timer(&bp->timer, jiffies + bp->current_interval);
2193
2194#ifdef BCM_CNIC
2195
2196 bnx2x_get_iscsi_info(bp);
2197 bnx2x_setup_cnic_irq_info(bp);
2198 if (bp->state == BNX2X_STATE_OPEN)
2199 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2200#endif
2201
2202
2203 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2204 u32 val;
2205 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2206 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2207 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2208 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2209 }
2210
2211
2212 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2213 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2214 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2215 return -EBUSY;
2216 }
2217
2218 bnx2x_dcbx_init(bp);
2219 return 0;
2220
2221#ifndef BNX2X_STOP_ON_ERROR
2222load_error4:
2223#ifdef BCM_CNIC
2224
2225 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2226#endif
2227load_error3:
2228 bnx2x_int_disable_sync(bp, 1);
2229
2230
2231 bnx2x_squeeze_objects(bp);
2232
2233
2234 bnx2x_free_skbs(bp);
2235 for_each_rx_queue(bp, i)
2236 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2237
2238
2239 bnx2x_free_irq(bp);
2240load_error2:
2241 if (!BP_NOMCP(bp)) {
2242 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2243 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2244 }
2245
2246 bp->port.pmf = 0;
2247load_error1:
2248 bnx2x_napi_disable(bp);
2249
2250 bnx2x_clear_pf_load(bp);
2251load_error0:
2252 bnx2x_free_mem(bp);
2253
2254 return rc;
2255#endif
2256}
2257
2258
2259int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2260{
2261 int i;
2262 bool global = false;
2263
2264
2265 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2266 u32 val;
2267 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2268 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2269 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2270 }
2271
2272 if ((bp->state == BNX2X_STATE_CLOSED) ||
2273 (bp->state == BNX2X_STATE_ERROR)) {
2274
2275
2276
2277
2278
2279
2280
2281 bp->recovery_state = BNX2X_RECOVERY_DONE;
2282 bp->is_leader = 0;
2283 bnx2x_release_leader_lock(bp);
2284 smp_mb();
2285
2286 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2287 BNX2X_ERR("Can't unload in closed or error state\n");
2288 return -EINVAL;
2289 }
2290
2291
2292
2293
2294
2295
2296 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2297 smp_mb();
2298
2299
2300 bnx2x_tx_disable(bp);
2301
2302#ifdef BCM_CNIC
2303 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2304#endif
2305
2306 bp->rx_mode = BNX2X_RX_MODE_NONE;
2307
2308 del_timer_sync(&bp->timer);
2309
2310
2311 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2312
2313 bnx2x_drv_pulse(bp);
2314
2315 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2316 bnx2x_save_statistics(bp);
2317
2318
2319 if (unload_mode != UNLOAD_RECOVERY)
2320 bnx2x_chip_cleanup(bp, unload_mode);
2321 else {
2322
2323 bnx2x_send_unload_req(bp, unload_mode);
2324
2325
2326
2327
2328
2329
2330
2331
2332 if (!CHIP_IS_E1x(bp))
2333 bnx2x_pf_disable(bp);
2334
2335
2336 bnx2x_netif_stop(bp, 1);
2337
2338
2339 bnx2x_free_irq(bp);
2340
2341
2342 bnx2x_send_unload_done(bp);
2343 }
2344
2345
2346
2347
2348
2349 bnx2x_squeeze_objects(bp);
2350
2351
2352 bp->sp_state = 0;
2353
2354 bp->port.pmf = 0;
2355
2356
2357 bnx2x_free_skbs(bp);
2358 for_each_rx_queue(bp, i)
2359 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2360
2361 bnx2x_free_mem(bp);
2362
2363 bp->state = BNX2X_STATE_CLOSED;
2364
2365
2366
2367
2368 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2369 bnx2x_set_reset_in_progress(bp);
2370
2371
2372 if (global)
2373 bnx2x_set_reset_global(bp);
2374 }
2375
2376
2377
2378
2379
2380 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2381 bnx2x_disable_close_the_gate(bp);
2382
2383 return 0;
2384}
2385
2386int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2387{
2388 u16 pmcsr;
2389
2390
2391 if (!bp->pm_cap) {
2392 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2393 return 0;
2394 }
2395
2396 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2397
2398 switch (state) {
2399 case PCI_D0:
2400 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2401 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2402 PCI_PM_CTRL_PME_STATUS));
2403
2404 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2405
2406 msleep(20);
2407 break;
2408
2409 case PCI_D3hot:
2410
2411
2412 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2413 return 0;
2414
2415 if (CHIP_REV_IS_SLOW(bp))
2416 return 0;
2417
2418 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2419 pmcsr |= 3;
2420
2421 if (bp->wol)
2422 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2423
2424 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2425 pmcsr);
2426
2427
2428
2429
2430 break;
2431
2432 default:
2433 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
2434 return -EINVAL;
2435 }
2436 return 0;
2437}
2438
2439
2440
2441
2442int bnx2x_poll(struct napi_struct *napi, int budget)
2443{
2444 int work_done = 0;
2445 u8 cos;
2446 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2447 napi);
2448 struct bnx2x *bp = fp->bp;
2449
2450 while (1) {
2451#ifdef BNX2X_STOP_ON_ERROR
2452 if (unlikely(bp->panic)) {
2453 napi_complete(napi);
2454 return 0;
2455 }
2456#endif
2457
2458 for_each_cos_in_tx_queue(fp, cos)
2459 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2460 bnx2x_tx_int(bp, &fp->txdata[cos]);
2461
2462
2463 if (bnx2x_has_rx_work(fp)) {
2464 work_done += bnx2x_rx_int(fp, budget - work_done);
2465
2466
2467 if (work_done >= budget)
2468 break;
2469 }
2470
2471
2472 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2473#ifdef BCM_CNIC
2474
2475
2476
2477
2478 if (IS_FCOE_FP(fp)) {
2479 napi_complete(napi);
2480 break;
2481 }
2482#endif
2483
2484 bnx2x_update_fpsb_idx(fp);
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498 rmb();
2499
2500 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2501 napi_complete(napi);
2502
2503 DP(NETIF_MSG_RX_STATUS,
2504 "Update index to %d\n", fp->fp_hc_idx);
2505 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2506 le16_to_cpu(fp->fp_hc_idx),
2507 IGU_INT_ENABLE, 1);
2508 break;
2509 }
2510 }
2511 }
2512
2513 return work_done;
2514}
2515
2516
2517
2518
2519
2520static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2521 struct bnx2x_fp_txdata *txdata,
2522 struct sw_tx_bd *tx_buf,
2523 struct eth_tx_start_bd **tx_bd, u16 hlen,
2524 u16 bd_prod, int nbd)
2525{
2526 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2527 struct eth_tx_bd *d_tx_bd;
2528 dma_addr_t mapping;
2529 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2530
2531
2532 h_tx_bd->nbd = cpu_to_le16(nbd);
2533 h_tx_bd->nbytes = cpu_to_le16(hlen);
2534
2535 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2536 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
2537
2538
2539
2540 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2541 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
2542
2543 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2544 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2545
2546 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2547 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2548 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2549
2550
2551 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2552
2553 DP(NETIF_MSG_TX_QUEUED,
2554 "TSO split data size is %d (%x:%x)\n",
2555 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2556
2557
2558 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2559
2560 return bd_prod;
2561}
2562
2563static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2564{
2565 if (fix > 0)
2566 csum = (u16) ~csum_fold(csum_sub(csum,
2567 csum_partial(t_header - fix, fix, 0)));
2568
2569 else if (fix < 0)
2570 csum = (u16) ~csum_fold(csum_add(csum,
2571 csum_partial(t_header, -fix, 0)));
2572
2573 return swab16(csum);
2574}
2575
2576static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2577{
2578 u32 rc;
2579
2580 if (skb->ip_summed != CHECKSUM_PARTIAL)
2581 rc = XMIT_PLAIN;
2582
2583 else {
2584 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
2585 rc = XMIT_CSUM_V6;
2586 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2587 rc |= XMIT_CSUM_TCP;
2588
2589 } else {
2590 rc = XMIT_CSUM_V4;
2591 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2592 rc |= XMIT_CSUM_TCP;
2593 }
2594 }
2595
2596 if (skb_is_gso_v6(skb))
2597 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2598 else if (skb_is_gso(skb))
2599 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
2600
2601 return rc;
2602}
2603
2604#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2605
2606
2607
2608static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2609 u32 xmit_type)
2610{
2611 int to_copy = 0;
2612 int hlen = 0;
2613 int first_bd_sz = 0;
2614
2615
2616 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2617
2618 if (xmit_type & XMIT_GSO) {
2619 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2620
2621
2622 int wnd_size = MAX_FETCH_BD - 3;
2623
2624 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2625 int wnd_idx = 0;
2626 int frag_idx = 0;
2627 u32 wnd_sum = 0;
2628
2629
2630 hlen = (int)(skb_transport_header(skb) - skb->data) +
2631 tcp_hdrlen(skb);
2632
2633
2634 first_bd_sz = skb_headlen(skb) - hlen;
2635
2636 wnd_sum = first_bd_sz;
2637
2638
2639 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2640 wnd_sum +=
2641 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
2642
2643
2644 if (first_bd_sz > 0) {
2645 if (unlikely(wnd_sum < lso_mss)) {
2646 to_copy = 1;
2647 goto exit_lbl;
2648 }
2649
2650 wnd_sum -= first_bd_sz;
2651 }
2652
2653
2654
2655 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2656 wnd_sum +=
2657 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
2658
2659 if (unlikely(wnd_sum < lso_mss)) {
2660 to_copy = 1;
2661 break;
2662 }
2663 wnd_sum -=
2664 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
2665 }
2666 } else {
2667
2668
2669 to_copy = 1;
2670 }
2671 }
2672
2673exit_lbl:
2674 if (unlikely(to_copy))
2675 DP(NETIF_MSG_TX_QUEUED,
2676 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
2677 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2678 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2679
2680 return to_copy;
2681}
2682#endif
2683
2684static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2685 u32 xmit_type)
2686{
2687 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2688 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2689 ETH_TX_PARSE_BD_E2_LSO_MSS;
2690 if ((xmit_type & XMIT_GSO_V6) &&
2691 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2692 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2693}
2694
2695
2696
2697
2698
2699
2700
2701
2702static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2703 struct eth_tx_parse_bd_e1x *pbd,
2704 u32 xmit_type)
2705{
2706 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2707 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2708 pbd->tcp_flags = pbd_tcp_flags(skb);
2709
2710 if (xmit_type & XMIT_GSO_V4) {
2711 pbd->ip_id = swab16(ip_hdr(skb)->id);
2712 pbd->tcp_pseudo_csum =
2713 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2714 ip_hdr(skb)->daddr,
2715 0, IPPROTO_TCP, 0));
2716
2717 } else
2718 pbd->tcp_pseudo_csum =
2719 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2720 &ipv6_hdr(skb)->daddr,
2721 0, IPPROTO_TCP, 0));
2722
2723 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2724}
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2737 u32 *parsing_data, u32 xmit_type)
2738{
2739 *parsing_data |=
2740 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2741 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2742 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2743
2744 if (xmit_type & XMIT_CSUM_TCP) {
2745 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2746 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2747 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2748
2749 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2750 } else
2751
2752
2753
2754 return skb_transport_header(skb) +
2755 sizeof(struct udphdr) - skb->data;
2756}
2757
2758static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2759 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2760{
2761 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2762
2763 if (xmit_type & XMIT_CSUM_V4)
2764 tx_start_bd->bd_flags.as_bitfield |=
2765 ETH_TX_BD_FLAGS_IP_CSUM;
2766 else
2767 tx_start_bd->bd_flags.as_bitfield |=
2768 ETH_TX_BD_FLAGS_IPV6;
2769
2770 if (!(xmit_type & XMIT_CSUM_TCP))
2771 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2772}
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2783 struct eth_tx_parse_bd_e1x *pbd,
2784 u32 xmit_type)
2785{
2786 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2787
2788
2789 pbd->global_data =
2790 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2791 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2792
2793 pbd->ip_hlen_w = (skb_transport_header(skb) -
2794 skb_network_header(skb)) >> 1;
2795
2796 hlen += pbd->ip_hlen_w;
2797
2798
2799 if (xmit_type & XMIT_CSUM_TCP)
2800 hlen += tcp_hdrlen(skb) / 2;
2801 else
2802 hlen += sizeof(struct udphdr) / 2;
2803
2804 pbd->total_hlen_w = cpu_to_le16(hlen);
2805 hlen = hlen*2;
2806
2807 if (xmit_type & XMIT_CSUM_TCP) {
2808 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2809
2810 } else {
2811 s8 fix = SKB_CS_OFF(skb);
2812
2813 DP(NETIF_MSG_TX_QUEUED,
2814 "hlen %d fix %d csum before fix %x\n",
2815 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2816
2817
2818 pbd->tcp_pseudo_csum =
2819 bnx2x_csum_fix(skb_transport_header(skb),
2820 SKB_CS(skb), fix);
2821
2822 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2823 pbd->tcp_pseudo_csum);
2824 }
2825
2826 return hlen;
2827}
2828
2829
2830
2831
2832
2833netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2834{
2835 struct bnx2x *bp = netdev_priv(dev);
2836
2837 struct bnx2x_fastpath *fp;
2838 struct netdev_queue *txq;
2839 struct bnx2x_fp_txdata *txdata;
2840 struct sw_tx_bd *tx_buf;
2841 struct eth_tx_start_bd *tx_start_bd, *first_bd;
2842 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2843 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2844 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2845 u32 pbd_e2_parsing_data = 0;
2846 u16 pkt_prod, bd_prod;
2847 int nbd, txq_index, fp_index, txdata_index;
2848 dma_addr_t mapping;
2849 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2850 int i;
2851 u8 hlen = 0;
2852 __le16 pkt_size = 0;
2853 struct ethhdr *eth;
2854 u8 mac_type = UNICAST_ADDRESS;
2855
2856#ifdef BNX2X_STOP_ON_ERROR
2857 if (unlikely(bp->panic))
2858 return NETDEV_TX_BUSY;
2859#endif
2860
2861 txq_index = skb_get_queue_mapping(skb);
2862 txq = netdev_get_tx_queue(dev, txq_index);
2863
2864 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2865
2866
2867 fp_index = TXQ_TO_FP(txq_index);
2868 txdata_index = TXQ_TO_COS(txq_index);
2869
2870#ifdef BCM_CNIC
2871
2872
2873
2874
2875
2876 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2877 bnx2x_fcoe_tx(bp, txq_index)))) {
2878 fp_index = FCOE_IDX;
2879 txdata_index = 0;
2880 }
2881#endif
2882
2883
2884
2885
2886
2887
2888 fp = &bp->fp[fp_index];
2889 txdata = &fp->txdata[txdata_index];
2890
2891
2892
2893
2894
2895
2896 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2897 (skb_shinfo(skb)->nr_frags + 3))) {
2898 fp->eth_q_stats.driver_xoff++;
2899 netif_tx_stop_queue(txq);
2900 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2901 return NETDEV_TX_BUSY;
2902 }
2903
2904 DP(NETIF_MSG_TX_QUEUED,
2905 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
2906 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2907 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2908
2909 eth = (struct ethhdr *)skb->data;
2910
2911
2912 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2913 if (is_broadcast_ether_addr(eth->h_dest))
2914 mac_type = BROADCAST_ADDRESS;
2915 else
2916 mac_type = MULTICAST_ADDRESS;
2917 }
2918
2919#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2920
2921
2922
2923 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2924
2925 bp->lin_cnt++;
2926 if (skb_linearize(skb) != 0) {
2927 DP(NETIF_MSG_TX_QUEUED,
2928 "SKB linearization failed - silently dropping this SKB\n");
2929 dev_kfree_skb_any(skb);
2930 return NETDEV_TX_OK;
2931 }
2932 }
2933#endif
2934
2935 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2936 skb_headlen(skb), DMA_TO_DEVICE);
2937 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2938 DP(NETIF_MSG_TX_QUEUED,
2939 "SKB mapping failed - silently dropping this SKB\n");
2940 dev_kfree_skb_any(skb);
2941 return NETDEV_TX_OK;
2942 }
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955 pkt_prod = txdata->tx_pkt_prod;
2956 bd_prod = TX_BD(txdata->tx_bd_prod);
2957
2958
2959
2960
2961
2962 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2963 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
2964 first_bd = tx_start_bd;
2965
2966 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2967 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2968 mac_type);
2969
2970
2971 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2972
2973
2974 tx_buf->first_bd = txdata->tx_bd_prod;
2975 tx_buf->skb = skb;
2976 tx_buf->flags = 0;
2977
2978 DP(NETIF_MSG_TX_QUEUED,
2979 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2980 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
2981
2982 if (vlan_tx_tag_present(skb)) {
2983 tx_start_bd->vlan_or_ethertype =
2984 cpu_to_le16(vlan_tx_tag_get(skb));
2985 tx_start_bd->bd_flags.as_bitfield |=
2986 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2987 } else
2988 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2989
2990
2991 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2992
2993 if (xmit_type & XMIT_CSUM)
2994 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2995
2996 if (!CHIP_IS_E1x(bp)) {
2997 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2998 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2999
3000 if (xmit_type & XMIT_CSUM)
3001 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3002 &pbd_e2_parsing_data,
3003 xmit_type);
3004 if (IS_MF_SI(bp)) {
3005
3006
3007
3008
3009 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3010 &pbd_e2->src_mac_addr_mid,
3011 &pbd_e2->src_mac_addr_lo,
3012 eth->h_source);
3013 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3014 &pbd_e2->dst_mac_addr_mid,
3015 &pbd_e2->dst_mac_addr_lo,
3016 eth->h_dest);
3017 }
3018 } else {
3019 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3020 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3021
3022 if (xmit_type & XMIT_CSUM)
3023 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3024
3025 }
3026
3027
3028 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3029 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3030 nbd = 2;
3031 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3032 pkt_size = tx_start_bd->nbytes;
3033
3034 DP(NETIF_MSG_TX_QUEUED,
3035 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3036 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3037 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
3038 tx_start_bd->bd_flags.as_bitfield,
3039 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3040
3041 if (xmit_type & XMIT_GSO) {
3042
3043 DP(NETIF_MSG_TX_QUEUED,
3044 "TSO packet len %d hlen %d total len %d tso size %d\n",
3045 skb->len, hlen, skb_headlen(skb),
3046 skb_shinfo(skb)->gso_size);
3047
3048 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3049
3050 if (unlikely(skb_headlen(skb) > hlen))
3051 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3052 &tx_start_bd, hlen,
3053 bd_prod, ++nbd);
3054 if (!CHIP_IS_E1x(bp))
3055 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3056 xmit_type);
3057 else
3058 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
3059 }
3060
3061
3062
3063
3064 if (pbd_e2_parsing_data)
3065 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3066
3067 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3068
3069
3070 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3071 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3072
3073 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3074 skb_frag_size(frag), DMA_TO_DEVICE);
3075 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3076 unsigned int pkts_compl = 0, bytes_compl = 0;
3077
3078 DP(NETIF_MSG_TX_QUEUED,
3079 "Unable to map page - dropping packet...\n");
3080
3081
3082
3083
3084
3085
3086 first_bd->nbd = cpu_to_le16(nbd);
3087 bnx2x_free_tx_pkt(bp, txdata,
3088 TX_BD(txdata->tx_pkt_prod),
3089 &pkts_compl, &bytes_compl);
3090 return NETDEV_TX_OK;
3091 }
3092
3093 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3094 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3095 if (total_pkt_bd == NULL)
3096 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3097
3098 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3099 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3100 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3101 le16_add_cpu(&pkt_size, skb_frag_size(frag));
3102 nbd++;
3103
3104 DP(NETIF_MSG_TX_QUEUED,
3105 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3106 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3107 le16_to_cpu(tx_data_bd->nbytes));
3108 }
3109
3110 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3111
3112
3113 first_bd->nbd = cpu_to_le16(nbd);
3114
3115 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3116
3117
3118
3119
3120 if (TX_BD_POFF(bd_prod) < nbd)
3121 nbd++;
3122
3123
3124
3125
3126
3127
3128
3129
3130 if (total_pkt_bd != NULL)
3131 total_pkt_bd->total_pkt_bytes = pkt_size;
3132
3133 if (pbd_e1x)
3134 DP(NETIF_MSG_TX_QUEUED,
3135 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3136 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3137 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3138 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3139 le16_to_cpu(pbd_e1x->total_hlen_w));
3140 if (pbd_e2)
3141 DP(NETIF_MSG_TX_QUEUED,
3142 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3143 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3144 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3145 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3146 pbd_e2->parsing_data);
3147 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3148
3149 netdev_tx_sent_queue(txq, skb->len);
3150
3151 skb_tx_timestamp(skb);
3152
3153 txdata->tx_pkt_prod++;
3154
3155
3156
3157
3158
3159
3160
3161 wmb();
3162
3163 txdata->tx_db.data.prod += nbd;
3164 barrier();
3165
3166 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
3167
3168 mmiowb();
3169
3170 txdata->tx_bd_prod += nbd;
3171
3172 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
3173 netif_tx_stop_queue(txq);
3174
3175
3176
3177
3178 smp_mb();
3179
3180 fp->eth_q_stats.driver_xoff++;
3181 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
3182 netif_tx_wake_queue(txq);
3183 }
3184 txdata->tx_pkt++;
3185
3186 return NETDEV_TX_OK;
3187}
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3198{
3199 int cos, prio, count, offset;
3200 struct bnx2x *bp = netdev_priv(dev);
3201
3202
3203 ASSERT_RTNL();
3204
3205
3206 if (!num_tc) {
3207 netdev_reset_tc(dev);
3208 return 0;
3209 }
3210
3211
3212 if (num_tc > bp->max_cos) {
3213 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3214 num_tc, bp->max_cos);
3215 return -EINVAL;
3216 }
3217
3218
3219 if (netdev_set_num_tc(dev, num_tc)) {
3220 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
3221 return -EINVAL;
3222 }
3223
3224
3225 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3226 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
3227 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3228 "mapping priority %d to tc %d\n",
3229 prio, bp->prio_to_cos[prio]);
3230 }
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244 for (cos = 0; cos < bp->max_cos; cos++) {
3245 count = BNX2X_NUM_ETH_QUEUES(bp);
3246 offset = cos * MAX_TXQS_PER_COS;
3247 netdev_set_tc_queue(dev, cos, count, offset);
3248 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3249 "mapping tc %d to offset %d count %d\n",
3250 cos, offset, count);
3251 }
3252
3253 return 0;
3254}
3255
3256
3257int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3258{
3259 struct sockaddr *addr = p;
3260 struct bnx2x *bp = netdev_priv(dev);
3261 int rc = 0;
3262
3263 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3264 BNX2X_ERR("Requested MAC address is not valid\n");
3265 return -EINVAL;
3266 }
3267
3268#ifdef BCM_CNIC
3269 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3270 !is_zero_ether_addr(addr->sa_data)) {
3271 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3272 return -EINVAL;
3273 }
3274#endif
3275
3276 if (netif_running(dev)) {
3277 rc = bnx2x_set_eth_mac(bp, false);
3278 if (rc)
3279 return rc;
3280 }
3281
3282 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
3283 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3284
3285 if (netif_running(dev))
3286 rc = bnx2x_set_eth_mac(bp, true);
3287
3288 return rc;
3289}
3290
3291static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3292{
3293 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3294 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
3295 u8 cos;
3296
3297
3298#ifdef BCM_CNIC
3299 if (IS_FCOE_IDX(fp_index)) {
3300 memset(sb, 0, sizeof(union host_hc_status_block));
3301 fp->status_blk_mapping = 0;
3302
3303 } else {
3304#endif
3305
3306 if (!CHIP_IS_E1x(bp))
3307 BNX2X_PCI_FREE(sb->e2_sb,
3308 bnx2x_fp(bp, fp_index,
3309 status_blk_mapping),
3310 sizeof(struct host_hc_status_block_e2));
3311 else
3312 BNX2X_PCI_FREE(sb->e1x_sb,
3313 bnx2x_fp(bp, fp_index,
3314 status_blk_mapping),
3315 sizeof(struct host_hc_status_block_e1x));
3316#ifdef BCM_CNIC
3317 }
3318#endif
3319
3320 if (!skip_rx_queue(bp, fp_index)) {
3321 bnx2x_free_rx_bds(fp);
3322
3323
3324 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3325 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3326 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3327 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3328
3329 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3330 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3331 sizeof(struct eth_fast_path_rx_cqe) *
3332 NUM_RCQ_BD);
3333
3334
3335 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3336 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3337 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3338 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3339 }
3340
3341
3342 if (!skip_tx_queue(bp, fp_index)) {
3343
3344 for_each_cos_in_tx_queue(fp, cos) {
3345 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3346
3347 DP(NETIF_MSG_IFDOWN,
3348 "freeing tx memory of fp %d cos %d cid %d\n",
3349 fp_index, cos, txdata->cid);
3350
3351 BNX2X_FREE(txdata->tx_buf_ring);
3352 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3353 txdata->tx_desc_mapping,
3354 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3355 }
3356 }
3357
3358}
3359
3360void bnx2x_free_fp_mem(struct bnx2x *bp)
3361{
3362 int i;
3363 for_each_queue(bp, i)
3364 bnx2x_free_fp_mem_at(bp, i);
3365}
3366
3367static void set_sb_shortcuts(struct bnx2x *bp, int index)
3368{
3369 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3370 if (!CHIP_IS_E1x(bp)) {
3371 bnx2x_fp(bp, index, sb_index_values) =
3372 (__le16 *)status_blk.e2_sb->sb.index_values;
3373 bnx2x_fp(bp, index, sb_running_index) =
3374 (__le16 *)status_blk.e2_sb->sb.running_index;
3375 } else {
3376 bnx2x_fp(bp, index, sb_index_values) =
3377 (__le16 *)status_blk.e1x_sb->sb.index_values;
3378 bnx2x_fp(bp, index, sb_running_index) =
3379 (__le16 *)status_blk.e1x_sb->sb.running_index;
3380 }
3381}
3382
3383
3384static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3385 int rx_ring_size)
3386{
3387 struct bnx2x *bp = fp->bp;
3388 u16 ring_prod, cqe_ring_prod;
3389 int i, failure_cnt = 0;
3390
3391 fp->rx_comp_cons = 0;
3392 cqe_ring_prod = ring_prod = 0;
3393
3394
3395
3396
3397 for (i = 0; i < rx_ring_size; i++) {
3398 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3399 failure_cnt++;
3400 continue;
3401 }
3402 ring_prod = NEXT_RX_IDX(ring_prod);
3403 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3404 WARN_ON(ring_prod <= (i - failure_cnt));
3405 }
3406
3407 if (failure_cnt)
3408 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3409 i - failure_cnt, fp->index);
3410
3411 fp->rx_bd_prod = ring_prod;
3412
3413 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3414 cqe_ring_prod);
3415 fp->rx_pkt = fp->rx_calls = 0;
3416
3417 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3418
3419 return i - failure_cnt;
3420}
3421
3422static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3423{
3424 int i;
3425
3426 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3427 struct eth_rx_cqe_next_page *nextpg;
3428
3429 nextpg = (struct eth_rx_cqe_next_page *)
3430 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3431 nextpg->addr_hi =
3432 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3434 nextpg->addr_lo =
3435 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3436 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3437 }
3438}
3439
3440static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3441{
3442 union host_hc_status_block *sb;
3443 struct bnx2x_fastpath *fp = &bp->fp[index];
3444 int ring_size = 0;
3445 u8 cos;
3446 int rx_ring_size = 0;
3447
3448#ifdef BCM_CNIC
3449 if (!bp->rx_ring_size &&
3450 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3451 rx_ring_size = MIN_RX_SIZE_NONTPA;
3452 bp->rx_ring_size = rx_ring_size;
3453 } else
3454#endif
3455 if (!bp->rx_ring_size) {
3456 u32 cfg = SHMEM_RD(bp,
3457 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
3458
3459 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3460
3461
3462 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3463 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3464 rx_ring_size /= 10;
3465
3466
3467 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3468 MIN_RX_SIZE_TPA, rx_ring_size);
3469
3470 bp->rx_ring_size = rx_ring_size;
3471 } else
3472 rx_ring_size = bp->rx_ring_size;
3473
3474
3475 sb = &bnx2x_fp(bp, index, status_blk);
3476#ifdef BCM_CNIC
3477 if (!IS_FCOE_IDX(index)) {
3478#endif
3479
3480 if (!CHIP_IS_E1x(bp))
3481 BNX2X_PCI_ALLOC(sb->e2_sb,
3482 &bnx2x_fp(bp, index, status_blk_mapping),
3483 sizeof(struct host_hc_status_block_e2));
3484 else
3485 BNX2X_PCI_ALLOC(sb->e1x_sb,
3486 &bnx2x_fp(bp, index, status_blk_mapping),
3487 sizeof(struct host_hc_status_block_e1x));
3488#ifdef BCM_CNIC
3489 }
3490#endif
3491
3492
3493
3494
3495 if (!IS_FCOE_IDX(index))
3496 set_sb_shortcuts(bp, index);
3497
3498
3499 if (!skip_tx_queue(bp, index)) {
3500
3501 for_each_cos_in_tx_queue(fp, cos) {
3502 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3503
3504 DP(NETIF_MSG_IFUP,
3505 "allocating tx memory of fp %d cos %d\n",
3506 index, cos);
3507
3508 BNX2X_ALLOC(txdata->tx_buf_ring,
3509 sizeof(struct sw_tx_bd) * NUM_TX_BD);
3510 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3511 &txdata->tx_desc_mapping,
3512 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3513 }
3514 }
3515
3516
3517 if (!skip_rx_queue(bp, index)) {
3518
3519 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3520 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3521 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3522 &bnx2x_fp(bp, index, rx_desc_mapping),
3523 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3524
3525 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3526 &bnx2x_fp(bp, index, rx_comp_mapping),
3527 sizeof(struct eth_fast_path_rx_cqe) *
3528 NUM_RCQ_BD);
3529
3530
3531 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3532 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3533 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3534 &bnx2x_fp(bp, index, rx_sge_mapping),
3535 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3536
3537 bnx2x_set_next_page_rx_bd(fp);
3538
3539
3540 bnx2x_set_next_page_rx_cq(fp);
3541
3542
3543 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3544 if (ring_size < rx_ring_size)
3545 goto alloc_mem_err;
3546 }
3547
3548 return 0;
3549
3550
3551alloc_mem_err:
3552 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3553 index, ring_size);
3554
3555
3556
3557
3558 if (ring_size < (fp->disable_tpa ?
3559 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
3560
3561 bnx2x_free_fp_mem_at(bp, index);
3562 return -ENOMEM;
3563 }
3564 return 0;
3565}
3566
3567int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3568{
3569 int i;
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579 if (bnx2x_alloc_fp_mem_at(bp, 0))
3580 return -ENOMEM;
3581
3582#ifdef BCM_CNIC
3583 if (!NO_FCOE(bp))
3584
3585 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3586
3587
3588
3589 return -ENOMEM;
3590#endif
3591
3592
3593 for_each_nondefault_eth_queue(bp, i)
3594 if (bnx2x_alloc_fp_mem_at(bp, i))
3595 break;
3596
3597
3598 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3599 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3600
3601 WARN_ON(delta < 0);
3602#ifdef BCM_CNIC
3603
3604
3605
3606
3607
3608
3609
3610 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3611#endif
3612 bp->num_queues -= delta;
3613 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3614 bp->num_queues + delta, bp->num_queues);
3615 }
3616
3617 return 0;
3618}
3619
3620void bnx2x_free_mem_bp(struct bnx2x *bp)
3621{
3622 kfree(bp->fp);
3623 kfree(bp->msix_table);
3624 kfree(bp->ilt);
3625}
3626
3627int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3628{
3629 struct bnx2x_fastpath *fp;
3630 struct msix_entry *tbl;
3631 struct bnx2x_ilt *ilt;
3632 int msix_table_size = 0;
3633
3634
3635
3636
3637
3638 msix_table_size = bp->igu_sb_cnt + 1;
3639
3640
3641 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
3642 sizeof(*fp), GFP_KERNEL);
3643 if (!fp)
3644 goto alloc_err;
3645 bp->fp = fp;
3646
3647
3648 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
3649 if (!tbl)
3650 goto alloc_err;
3651 bp->msix_table = tbl;
3652
3653
3654 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3655 if (!ilt)
3656 goto alloc_err;
3657 bp->ilt = ilt;
3658
3659 return 0;
3660alloc_err:
3661 bnx2x_free_mem_bp(bp);
3662 return -ENOMEM;
3663
3664}
3665
3666int bnx2x_reload_if_running(struct net_device *dev)
3667{
3668 struct bnx2x *bp = netdev_priv(dev);
3669
3670 if (unlikely(!netif_running(dev)))
3671 return 0;
3672
3673 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3674 return bnx2x_nic_load(bp, LOAD_NORMAL);
3675}
3676
3677int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3678{
3679 u32 sel_phy_idx = 0;
3680 if (bp->link_params.num_phys <= 1)
3681 return INT_PHY;
3682
3683 if (bp->link_vars.link_up) {
3684 sel_phy_idx = EXT_PHY1;
3685
3686 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3687 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3688 sel_phy_idx = EXT_PHY2;
3689 } else {
3690
3691 switch (bnx2x_phy_selection(&bp->link_params)) {
3692 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3693 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3694 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3695 sel_phy_idx = EXT_PHY1;
3696 break;
3697 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3698 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3699 sel_phy_idx = EXT_PHY2;
3700 break;
3701 }
3702 }
3703
3704 return sel_phy_idx;
3705
3706}
3707int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3708{
3709 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3710
3711
3712
3713
3714
3715
3716 if (bp->link_params.multi_phy_config &
3717 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3718 if (sel_phy_idx == EXT_PHY1)
3719 sel_phy_idx = EXT_PHY2;
3720 else if (sel_phy_idx == EXT_PHY2)
3721 sel_phy_idx = EXT_PHY1;
3722 }
3723 return LINK_CONFIG_IDX(sel_phy_idx);
3724}
3725
3726#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3727int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3728{
3729 struct bnx2x *bp = netdev_priv(dev);
3730 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3731
3732 switch (type) {
3733 case NETDEV_FCOE_WWNN:
3734 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3735 cp->fcoe_wwn_node_name_lo);
3736 break;
3737 case NETDEV_FCOE_WWPN:
3738 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3739 cp->fcoe_wwn_port_name_lo);
3740 break;
3741 default:
3742 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
3743 return -EINVAL;
3744 }
3745
3746 return 0;
3747}
3748#endif
3749
3750
3751int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3752{
3753 struct bnx2x *bp = netdev_priv(dev);
3754
3755 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3756 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
3757 return -EAGAIN;
3758 }
3759
3760 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3761 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3762 BNX2X_ERR("Can't support requested MTU size\n");
3763 return -EINVAL;
3764 }
3765
3766
3767
3768
3769
3770 dev->mtu = new_mtu;
3771
3772 return bnx2x_reload_if_running(dev);
3773}
3774
3775netdev_features_t bnx2x_fix_features(struct net_device *dev,
3776 netdev_features_t features)
3777{
3778 struct bnx2x *bp = netdev_priv(dev);
3779
3780
3781 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3782 features &= ~NETIF_F_LRO;
3783 features &= ~NETIF_F_GRO;
3784 }
3785
3786 return features;
3787}
3788
3789int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3790{
3791 struct bnx2x *bp = netdev_priv(dev);
3792 u32 flags = bp->flags;
3793 bool bnx2x_reload = false;
3794
3795 if (features & NETIF_F_LRO)
3796 flags |= TPA_ENABLE_FLAG;
3797 else
3798 flags &= ~TPA_ENABLE_FLAG;
3799
3800 if (features & NETIF_F_GRO)
3801 flags |= GRO_ENABLE_FLAG;
3802 else
3803 flags &= ~GRO_ENABLE_FLAG;
3804
3805 if (features & NETIF_F_LOOPBACK) {
3806 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3807 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3808 bnx2x_reload = true;
3809 }
3810 } else {
3811 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3812 bp->link_params.loopback_mode = LOOPBACK_NONE;
3813 bnx2x_reload = true;
3814 }
3815 }
3816
3817 if (flags ^ bp->flags) {
3818 bp->flags = flags;
3819 bnx2x_reload = true;
3820 }
3821
3822 if (bnx2x_reload) {
3823 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3824 return bnx2x_reload_if_running(dev);
3825
3826 }
3827
3828 return 0;
3829}
3830
3831void bnx2x_tx_timeout(struct net_device *dev)
3832{
3833 struct bnx2x *bp = netdev_priv(dev);
3834
3835#ifdef BNX2X_STOP_ON_ERROR
3836 if (!bp->panic)
3837 bnx2x_panic();
3838#endif
3839
3840 smp_mb__before_clear_bit();
3841 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3842 smp_mb__after_clear_bit();
3843
3844
3845 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3846}
3847
3848int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3849{
3850 struct net_device *dev = pci_get_drvdata(pdev);
3851 struct bnx2x *bp;
3852
3853 if (!dev) {
3854 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3855 return -ENODEV;
3856 }
3857 bp = netdev_priv(dev);
3858
3859 rtnl_lock();
3860
3861 pci_save_state(pdev);
3862
3863 if (!netif_running(dev)) {
3864 rtnl_unlock();
3865 return 0;
3866 }
3867
3868 netif_device_detach(dev);
3869
3870 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3871
3872 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3873
3874 rtnl_unlock();
3875
3876 return 0;
3877}
3878
3879int bnx2x_resume(struct pci_dev *pdev)
3880{
3881 struct net_device *dev = pci_get_drvdata(pdev);
3882 struct bnx2x *bp;
3883 int rc;
3884
3885 if (!dev) {
3886 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3887 return -ENODEV;
3888 }
3889 bp = netdev_priv(dev);
3890
3891 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3892 BNX2X_ERR("Handling parity error recovery. Try again later\n");
3893 return -EAGAIN;
3894 }
3895
3896 rtnl_lock();
3897
3898 pci_restore_state(pdev);
3899
3900 if (!netif_running(dev)) {
3901 rtnl_unlock();
3902 return 0;
3903 }
3904
3905 bnx2x_set_power_state(bp, PCI_D0);
3906 netif_device_attach(dev);
3907
3908 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3909
3910 rtnl_unlock();
3911
3912 return rc;
3913}
3914
3915
3916void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3917 u32 cid)
3918{
3919
3920 cxt->ustorm_ag_context.cdu_usage =
3921 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3922 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3923
3924 cxt->xstorm_ag_context.cdu_reserved =
3925 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3926 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3927}
3928
3929static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3930 u8 fw_sb_id, u8 sb_index,
3931 u8 ticks)
3932{
3933
3934 u32 addr = BAR_CSTRORM_INTMEM +
3935 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3936 REG_WR8(bp, addr, ticks);
3937 DP(NETIF_MSG_IFUP,
3938 "port %x fw_sb_id %d sb_index %d ticks %d\n",
3939 port, fw_sb_id, sb_index, ticks);
3940}
3941
3942static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3943 u16 fw_sb_id, u8 sb_index,
3944 u8 disable)
3945{
3946 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3947 u32 addr = BAR_CSTRORM_INTMEM +
3948 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3949 u16 flags = REG_RD16(bp, addr);
3950
3951 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3952 flags |= enable_flag;
3953 REG_WR16(bp, addr, flags);
3954 DP(NETIF_MSG_IFUP,
3955 "port %x fw_sb_id %d sb_index %d disable %d\n",
3956 port, fw_sb_id, sb_index, disable);
3957}
3958
3959void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3960 u8 sb_index, u8 disable, u16 usec)
3961{
3962 int port = BP_PORT(bp);
3963 u8 ticks = usec / BNX2X_BTR;
3964
3965 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3966
3967 disable = disable ? 1 : (usec ? 0 : 1);
3968 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3969}
3970