1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/etherdevice.h>
23#include <linux/if_vlan.h>
24#include <linux/interrupt.h>
25#include <linux/ip.h>
26#include <linux/crash_dump.h>
27#include <net/tcp.h>
28#include <net/ipv6.h>
29#include <net/ip6_checksum.h>
30#include <linux/prefetch.h>
31#include "bnx2x_cmn.h"
32#include "bnx2x_init.h"
33#include "bnx2x_sp.h"
34
35static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
36static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
37static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
38static int bnx2x_poll(struct napi_struct *napi, int budget);
39
40static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
41{
42 int i;
43
44
45 for_each_rx_queue_cnic(bp, i) {
46 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
47 bnx2x_poll, NAPI_POLL_WEIGHT);
48 }
49}
50
51static void bnx2x_add_all_napi(struct bnx2x *bp)
52{
53 int i;
54
55
56 for_each_eth_queue(bp, i) {
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58 bnx2x_poll, NAPI_POLL_WEIGHT);
59 }
60}
61
62static int bnx2x_calc_num_queues(struct bnx2x *bp)
63{
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65
66
67 if (is_kdump_kernel())
68 nq = 1;
69
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71 return nq;
72}
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88{
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
98
99
100 from_fp->napi = to_fp->napi;
101
102
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
104 to_fp->index = to;
105
106
107
108
109 to_fp->tpa_info = old_tpa_info;
110
111
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
113
114
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
116
117
118
119
120
121
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
124 (bp)->max_cos;
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 }
129
130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
134}
135
136
137
138
139
140
141
142
143
144void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
145{
146 if (IS_PF(bp)) {
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
148
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
154 "bc %d.%d.%d%s%s",
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
159 } else {
160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
161 }
162}
163
164
165
166
167
168
169
170static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
171{
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
173
174
175
176
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
181
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185 }
186 }
187}
188
189int bnx2x_load_count[2][3] = { {0} };
190
191
192
193
194static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
197{
198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203 int nbd;
204 u16 split_bd_len = 0;
205
206
207 prefetch(&skb->end);
208
209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
210 txdata->txq_index, idx, tx_buf, skb);
211
212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
213
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215#ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
218 bnx2x_panic();
219 }
220#endif
221 new_cons = nbd + tx_buf->first_bd;
222
223
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
226
227 --nbd;
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231
232 --nbd;
233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
234 }
235
236
237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
239 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
240 --nbd;
241 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
242 }
243
244
245 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
246 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
247 DMA_TO_DEVICE);
248
249
250 while (nbd > 0) {
251
252 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
255 if (--nbd)
256 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
257 }
258
259
260 WARN_ON(!skb);
261 if (likely(skb)) {
262 (*pkts_compl)++;
263 (*bytes_compl) += skb->len;
264 dev_kfree_skb_any(skb);
265 }
266
267 tx_buf->first_bd = 0;
268 tx_buf->skb = NULL;
269
270 return new_cons;
271}
272
273int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
274{
275 struct netdev_queue *txq;
276 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277 unsigned int pkts_compl = 0, bytes_compl = 0;
278
279#ifdef BNX2X_STOP_ON_ERROR
280 if (unlikely(bp->panic))
281 return -1;
282#endif
283
284 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286 sw_cons = txdata->tx_pkt_cons;
287
288 while (sw_cons != hw_cons) {
289 u16 pkt_cons;
290
291 pkt_cons = TX_BD(sw_cons);
292
293 DP(NETIF_MSG_TX_DONE,
294 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
295 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
296
297 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
298 &pkts_compl, &bytes_compl);
299
300 sw_cons++;
301 }
302
303 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
304
305 txdata->tx_pkt_cons = sw_cons;
306 txdata->tx_bd_cons = bd_cons;
307
308
309
310
311
312
313
314
315
316
317 smp_mb();
318
319 if (unlikely(netif_tx_queue_stopped(txq))) {
320
321
322
323
324
325
326
327
328
329
330 __netif_tx_lock(txq, smp_processor_id());
331
332 if ((netif_tx_queue_stopped(txq)) &&
333 (bp->state == BNX2X_STATE_OPEN) &&
334 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
335 netif_tx_wake_queue(txq);
336
337 __netif_tx_unlock(txq);
338 }
339 return 0;
340}
341
342static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
343 u16 idx)
344{
345 u16 last_max = fp->last_max_sge;
346
347 if (SUB_S16(idx, last_max) > 0)
348 fp->last_max_sge = idx;
349}
350
351static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
352 u16 sge_len,
353 struct eth_end_agg_rx_cqe *cqe)
354{
355 struct bnx2x *bp = fp->bp;
356 u16 last_max, last_elem, first_elem;
357 u16 delta = 0;
358 u16 i;
359
360 if (!sge_len)
361 return;
362
363
364 for (i = 0; i < sge_len; i++)
365 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
366 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
367
368 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
369 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
370
371
372 prefetch((void *)(fp->sge_mask));
373 bnx2x_update_last_max_sge(fp,
374 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
375
376 last_max = RX_SGE(fp->last_max_sge);
377 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
378 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
379
380
381 if (last_elem + 1 != first_elem)
382 last_elem++;
383
384
385 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
386 if (likely(fp->sge_mask[i]))
387 break;
388
389 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
390 delta += BIT_VEC64_ELEM_SZ;
391 }
392
393 if (delta > 0) {
394 fp->rx_sge_prod += delta;
395
396 bnx2x_clear_sge_mask_next_elems(fp);
397 }
398
399 DP(NETIF_MSG_RX_STATUS,
400 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
401 fp->last_max_sge, fp->rx_sge_prod);
402}
403
404
405
406
407static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
408 const struct eth_fast_path_rx_cqe *cqe,
409 enum pkt_hash_types *rxhash_type)
410{
411
412 if ((bp->dev->features & NETIF_F_RXHASH) &&
413 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
414 enum eth_rss_hash_type htype;
415
416 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
417 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
418 (htype == TCP_IPV6_HASH_TYPE)) ?
419 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
420
421 return le32_to_cpu(cqe->rss_hash_result);
422 }
423 *rxhash_type = PKT_HASH_TYPE_NONE;
424 return 0;
425}
426
427static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
428 u16 cons, u16 prod,
429 struct eth_fast_path_rx_cqe *cqe)
430{
431 struct bnx2x *bp = fp->bp;
432 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
433 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
434 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
435 dma_addr_t mapping;
436 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
437 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
438
439
440 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
441 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
442
443
444 mapping = dma_map_single(&bp->pdev->dev,
445 first_buf->data + NET_SKB_PAD,
446 fp->rx_buf_size, DMA_FROM_DEVICE);
447
448
449
450
451
452
453 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
454
455 bnx2x_reuse_rx_data(fp, cons, prod);
456 tpa_info->tpa_state = BNX2X_TPA_ERROR;
457 return;
458 }
459
460
461 prod_rx_buf->data = first_buf->data;
462 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
463
464 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
465 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
466
467
468 *first_buf = *cons_rx_buf;
469
470
471 tpa_info->parsing_flags =
472 le16_to_cpu(cqe->pars_flags.flags);
473 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
474 tpa_info->tpa_state = BNX2X_TPA_START;
475 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
476 tpa_info->placement_offset = cqe->placement_offset;
477 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
478 if (fp->mode == TPA_MODE_GRO) {
479 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
480 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
481 tpa_info->gro_size = gro_size;
482 }
483
484#ifdef BNX2X_STOP_ON_ERROR
485 fp->tpa_queue_used |= (1 << queue);
486 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
487 fp->tpa_queue_used);
488#endif
489}
490
491
492
493
494
495#define TPA_TSTAMP_OPT_LEN 12
496
497
498
499
500
501
502
503
504
505
506
507
508
509static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
510 u16 len_on_bd, unsigned int pkt_len,
511 u16 num_of_coalesced_segs)
512{
513
514
515
516 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
517
518 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
519 PRS_FLAG_OVERETH_IPV6) {
520 hdrs_len += sizeof(struct ipv6hdr);
521 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
522 } else {
523 hdrs_len += sizeof(struct iphdr);
524 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
525 }
526
527
528
529
530
531
532 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
533 hdrs_len += TPA_TSTAMP_OPT_LEN;
534
535 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
536
537
538
539
540 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
541}
542
543static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
544 u16 index, gfp_t gfp_mask)
545{
546 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
547 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
548 struct bnx2x_alloc_pool *pool = &fp->page_pool;
549 dma_addr_t mapping;
550
551 if (!pool->page) {
552 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
553 if (unlikely(!pool->page))
554 return -ENOMEM;
555
556 pool->offset = 0;
557 }
558
559 mapping = dma_map_page(&bp->pdev->dev, pool->page,
560 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
561 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
562 BNX2X_ERR("Can't map sge\n");
563 return -ENOMEM;
564 }
565
566 sw_buf->page = pool->page;
567 sw_buf->offset = pool->offset;
568
569 dma_unmap_addr_set(sw_buf, mapping, mapping);
570
571 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
572 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
573
574 pool->offset += SGE_PAGE_SIZE;
575 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
576 get_page(pool->page);
577 else
578 pool->page = NULL;
579 return 0;
580}
581
582static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
583 struct bnx2x_agg_info *tpa_info,
584 u16 pages,
585 struct sk_buff *skb,
586 struct eth_end_agg_rx_cqe *cqe,
587 u16 cqe_idx)
588{
589 struct sw_rx_page *rx_pg, old_rx_pg;
590 u32 i, frag_len, frag_size;
591 int err, j, frag_id = 0;
592 u16 len_on_bd = tpa_info->len_on_bd;
593 u16 full_page = 0, gro_size = 0;
594
595 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
596
597 if (fp->mode == TPA_MODE_GRO) {
598 gro_size = tpa_info->gro_size;
599 full_page = tpa_info->full_page;
600 }
601
602
603 if (frag_size)
604 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
605 le16_to_cpu(cqe->pkt_len),
606 le16_to_cpu(cqe->num_of_coalesced_segs));
607
608#ifdef BNX2X_STOP_ON_ERROR
609 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
610 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
611 pages, cqe_idx);
612 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
613 bnx2x_panic();
614 return -EINVAL;
615 }
616#endif
617
618
619 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
620 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
621
622
623
624 if (fp->mode == TPA_MODE_GRO)
625 frag_len = min_t(u32, frag_size, (u32)full_page);
626 else
627 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
628
629 rx_pg = &fp->rx_page_ring[sge_idx];
630 old_rx_pg = *rx_pg;
631
632
633
634 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
635 if (unlikely(err)) {
636 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
637 return err;
638 }
639
640 dma_unmap_page(&bp->pdev->dev,
641 dma_unmap_addr(&old_rx_pg, mapping),
642 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
643
644 if (fp->mode == TPA_MODE_LRO)
645 skb_fill_page_desc(skb, j, old_rx_pg.page,
646 old_rx_pg.offset, frag_len);
647 else {
648 int rem;
649 int offset = 0;
650 for (rem = frag_len; rem > 0; rem -= gro_size) {
651 int len = rem > gro_size ? gro_size : rem;
652 skb_fill_page_desc(skb, frag_id++,
653 old_rx_pg.page,
654 old_rx_pg.offset + offset,
655 len);
656 if (offset)
657 get_page(old_rx_pg.page);
658 offset += len;
659 }
660 }
661
662 skb->data_len += frag_len;
663 skb->truesize += SGE_PAGES;
664 skb->len += frag_len;
665
666 frag_size -= frag_len;
667 }
668
669 return 0;
670}
671
672static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
673{
674 if (fp->rx_frag_size)
675 skb_free_frag(data);
676 else
677 kfree(data);
678}
679
680static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
681{
682 if (fp->rx_frag_size) {
683
684 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
685 return (void *)__get_free_page(gfp_mask);
686
687 return netdev_alloc_frag(fp->rx_frag_size);
688 }
689
690 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
691}
692
693#ifdef CONFIG_INET
694static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
695{
696 const struct iphdr *iph = ip_hdr(skb);
697 struct tcphdr *th;
698
699 skb_set_transport_header(skb, sizeof(struct iphdr));
700 th = tcp_hdr(skb);
701
702 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
703 iph->saddr, iph->daddr, 0);
704}
705
706static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
707{
708 struct ipv6hdr *iph = ipv6_hdr(skb);
709 struct tcphdr *th;
710
711 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
712 th = tcp_hdr(skb);
713
714 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
715 &iph->saddr, &iph->daddr, 0);
716}
717
718static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
719 void (*gro_func)(struct bnx2x*, struct sk_buff*))
720{
721 skb_reset_network_header(skb);
722 gro_func(bp, skb);
723 tcp_gro_complete(skb);
724}
725#endif
726
727static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
728 struct sk_buff *skb)
729{
730#ifdef CONFIG_INET
731 if (skb_shinfo(skb)->gso_size) {
732 switch (be16_to_cpu(skb->protocol)) {
733 case ETH_P_IP:
734 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
735 break;
736 case ETH_P_IPV6:
737 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
738 break;
739 default:
740 netdev_WARN_ONCE(bp->dev,
741 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
742 be16_to_cpu(skb->protocol));
743 }
744 }
745#endif
746 skb_record_rx_queue(skb, fp->rx_queue);
747 napi_gro_receive(&fp->napi, skb);
748}
749
750static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751 struct bnx2x_agg_info *tpa_info,
752 u16 pages,
753 struct eth_end_agg_rx_cqe *cqe,
754 u16 cqe_idx)
755{
756 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
757 u8 pad = tpa_info->placement_offset;
758 u16 len = tpa_info->len_on_bd;
759 struct sk_buff *skb = NULL;
760 u8 *new_data, *data = rx_buf->data;
761 u8 old_tpa_state = tpa_info->tpa_state;
762
763 tpa_info->tpa_state = BNX2X_TPA_STOP;
764
765
766
767
768 if (old_tpa_state == BNX2X_TPA_ERROR)
769 goto drop;
770
771
772 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
773
774
775
776 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
777 fp->rx_buf_size, DMA_FROM_DEVICE);
778 if (likely(new_data))
779 skb = build_skb(data, fp->rx_frag_size);
780
781 if (likely(skb)) {
782#ifdef BNX2X_STOP_ON_ERROR
783 if (pad + len > fp->rx_buf_size) {
784 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
785 pad, len, fp->rx_buf_size);
786 bnx2x_panic();
787 return;
788 }
789#endif
790
791 skb_reserve(skb, pad + NET_SKB_PAD);
792 skb_put(skb, len);
793 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
794
795 skb->protocol = eth_type_trans(skb, bp->dev);
796 skb->ip_summed = CHECKSUM_UNNECESSARY;
797
798 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
799 skb, cqe, cqe_idx)) {
800 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
801 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
802 bnx2x_gro_receive(bp, fp, skb);
803 } else {
804 DP(NETIF_MSG_RX_STATUS,
805 "Failed to allocate new pages - dropping packet!\n");
806 dev_kfree_skb_any(skb);
807 }
808
809
810 rx_buf->data = new_data;
811
812 return;
813 }
814 if (new_data)
815 bnx2x_frag_free(fp, new_data);
816drop:
817
818 DP(NETIF_MSG_RX_STATUS,
819 "Failed to allocate or map a new skb - dropping packet!\n");
820 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
821}
822
823static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
824 u16 index, gfp_t gfp_mask)
825{
826 u8 *data;
827 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
828 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
829 dma_addr_t mapping;
830
831 data = bnx2x_frag_alloc(fp, gfp_mask);
832 if (unlikely(data == NULL))
833 return -ENOMEM;
834
835 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
836 fp->rx_buf_size,
837 DMA_FROM_DEVICE);
838 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
839 bnx2x_frag_free(fp, data);
840 BNX2X_ERR("Can't map rx data\n");
841 return -ENOMEM;
842 }
843
844 rx_buf->data = data;
845 dma_unmap_addr_set(rx_buf, mapping, mapping);
846
847 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
848 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
849
850 return 0;
851}
852
853static
854void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
855 struct bnx2x_fastpath *fp,
856 struct bnx2x_eth_q_stats *qstats)
857{
858
859
860
861
862
863 if (cqe->fast_path_cqe.status_flags &
864 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
865 return;
866
867
868
869 if (cqe->fast_path_cqe.type_error_flags &
870 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
871 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
872 qstats->hw_csum_err++;
873 else
874 skb->ip_summed = CHECKSUM_UNNECESSARY;
875}
876
877static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
878{
879 struct bnx2x *bp = fp->bp;
880 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
881 u16 sw_comp_cons, sw_comp_prod;
882 int rx_pkt = 0;
883 union eth_rx_cqe *cqe;
884 struct eth_fast_path_rx_cqe *cqe_fp;
885
886#ifdef BNX2X_STOP_ON_ERROR
887 if (unlikely(bp->panic))
888 return 0;
889#endif
890 if (budget <= 0)
891 return rx_pkt;
892
893 bd_cons = fp->rx_bd_cons;
894 bd_prod = fp->rx_bd_prod;
895 bd_prod_fw = bd_prod;
896 sw_comp_cons = fp->rx_comp_cons;
897 sw_comp_prod = fp->rx_comp_prod;
898
899 comp_ring_cons = RCQ_BD(sw_comp_cons);
900 cqe = &fp->rx_comp_ring[comp_ring_cons];
901 cqe_fp = &cqe->fast_path_cqe;
902
903 DP(NETIF_MSG_RX_STATUS,
904 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
905
906 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
907 struct sw_rx_bd *rx_buf = NULL;
908 struct sk_buff *skb;
909 u8 cqe_fp_flags;
910 enum eth_rx_cqe_type cqe_fp_type;
911 u16 len, pad, queue;
912 u8 *data;
913 u32 rxhash;
914 enum pkt_hash_types rxhash_type;
915
916#ifdef BNX2X_STOP_ON_ERROR
917 if (unlikely(bp->panic))
918 return 0;
919#endif
920
921 bd_prod = RX_BD(bd_prod);
922 bd_cons = RX_BD(bd_cons);
923
924
925
926
927
928
929
930
931
932
933
934 rmb();
935
936 cqe_fp_flags = cqe_fp->type_error_flags;
937 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
938
939 DP(NETIF_MSG_RX_STATUS,
940 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
941 CQE_TYPE(cqe_fp_flags),
942 cqe_fp_flags, cqe_fp->status_flags,
943 le32_to_cpu(cqe_fp->rss_hash_result),
944 le16_to_cpu(cqe_fp->vlan_tag),
945 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
946
947
948 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
949 bnx2x_sp_event(fp, cqe);
950 goto next_cqe;
951 }
952
953 rx_buf = &fp->rx_buf_ring[bd_cons];
954 data = rx_buf->data;
955
956 if (!CQE_TYPE_FAST(cqe_fp_type)) {
957 struct bnx2x_agg_info *tpa_info;
958 u16 frag_size, pages;
959#ifdef BNX2X_STOP_ON_ERROR
960
961 if (fp->mode == TPA_MODE_DISABLED &&
962 (CQE_TYPE_START(cqe_fp_type) ||
963 CQE_TYPE_STOP(cqe_fp_type)))
964 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
965 CQE_TYPE(cqe_fp_type));
966#endif
967
968 if (CQE_TYPE_START(cqe_fp_type)) {
969 u16 queue = cqe_fp->queue_index;
970 DP(NETIF_MSG_RX_STATUS,
971 "calling tpa_start on queue %d\n",
972 queue);
973
974 bnx2x_tpa_start(fp, queue,
975 bd_cons, bd_prod,
976 cqe_fp);
977
978 goto next_rx;
979 }
980 queue = cqe->end_agg_cqe.queue_index;
981 tpa_info = &fp->tpa_info[queue];
982 DP(NETIF_MSG_RX_STATUS,
983 "calling tpa_stop on queue %d\n",
984 queue);
985
986 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
987 tpa_info->len_on_bd;
988
989 if (fp->mode == TPA_MODE_GRO)
990 pages = (frag_size + tpa_info->full_page - 1) /
991 tpa_info->full_page;
992 else
993 pages = SGE_PAGE_ALIGN(frag_size) >>
994 SGE_PAGE_SHIFT;
995
996 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
997 &cqe->end_agg_cqe, comp_ring_cons);
998#ifdef BNX2X_STOP_ON_ERROR
999 if (bp->panic)
1000 return 0;
1001#endif
1002
1003 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1004 goto next_cqe;
1005 }
1006
1007 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1008 pad = cqe_fp->placement_offset;
1009 dma_sync_single_for_cpu(&bp->pdev->dev,
1010 dma_unmap_addr(rx_buf, mapping),
1011 pad + RX_COPY_THRESH,
1012 DMA_FROM_DEVICE);
1013 pad += NET_SKB_PAD;
1014 prefetch(data + pad);
1015
1016 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1017 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1018 "ERROR flags %x rx packet %u\n",
1019 cqe_fp_flags, sw_comp_cons);
1020 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1021 goto reuse_rx;
1022 }
1023
1024
1025
1026
1027 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1028 (len <= RX_COPY_THRESH)) {
1029 skb = napi_alloc_skb(&fp->napi, len);
1030 if (skb == NULL) {
1031 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1032 "ERROR packet dropped because of alloc failure\n");
1033 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1034 goto reuse_rx;
1035 }
1036 memcpy(skb->data, data + pad, len);
1037 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1038 } else {
1039 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1040 GFP_ATOMIC) == 0)) {
1041 dma_unmap_single(&bp->pdev->dev,
1042 dma_unmap_addr(rx_buf, mapping),
1043 fp->rx_buf_size,
1044 DMA_FROM_DEVICE);
1045 skb = build_skb(data, fp->rx_frag_size);
1046 if (unlikely(!skb)) {
1047 bnx2x_frag_free(fp, data);
1048 bnx2x_fp_qstats(bp, fp)->
1049 rx_skb_alloc_failed++;
1050 goto next_rx;
1051 }
1052 skb_reserve(skb, pad);
1053 } else {
1054 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1055 "ERROR packet dropped because of alloc failure\n");
1056 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1057reuse_rx:
1058 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1059 goto next_rx;
1060 }
1061 }
1062
1063 skb_put(skb, len);
1064 skb->protocol = eth_type_trans(skb, bp->dev);
1065
1066
1067 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1068 skb_set_hash(skb, rxhash, rxhash_type);
1069
1070 skb_checksum_none_assert(skb);
1071
1072 if (bp->dev->features & NETIF_F_RXCSUM)
1073 bnx2x_csum_validate(skb, cqe, fp,
1074 bnx2x_fp_qstats(bp, fp));
1075
1076 skb_record_rx_queue(skb, fp->rx_queue);
1077
1078
1079 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1080 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1081 bnx2x_set_rx_ts(bp, skb);
1082
1083 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1084 PARSING_FLAGS_VLAN)
1085 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1086 le16_to_cpu(cqe_fp->vlan_tag));
1087
1088 napi_gro_receive(&fp->napi, skb);
1089next_rx:
1090 rx_buf->data = NULL;
1091
1092 bd_cons = NEXT_RX_IDX(bd_cons);
1093 bd_prod = NEXT_RX_IDX(bd_prod);
1094 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1095 rx_pkt++;
1096next_cqe:
1097 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1098 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1099
1100
1101 BNX2X_SEED_CQE(cqe_fp);
1102
1103 if (rx_pkt == budget)
1104 break;
1105
1106 comp_ring_cons = RCQ_BD(sw_comp_cons);
1107 cqe = &fp->rx_comp_ring[comp_ring_cons];
1108 cqe_fp = &cqe->fast_path_cqe;
1109 }
1110
1111 fp->rx_bd_cons = bd_cons;
1112 fp->rx_bd_prod = bd_prod_fw;
1113 fp->rx_comp_cons = sw_comp_cons;
1114 fp->rx_comp_prod = sw_comp_prod;
1115
1116
1117 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1118 fp->rx_sge_prod);
1119
1120 return rx_pkt;
1121}
1122
1123static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1124{
1125 struct bnx2x_fastpath *fp = fp_cookie;
1126 struct bnx2x *bp = fp->bp;
1127 u8 cos;
1128
1129 DP(NETIF_MSG_INTR,
1130 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1131 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1132
1133 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1134
1135#ifdef BNX2X_STOP_ON_ERROR
1136 if (unlikely(bp->panic))
1137 return IRQ_HANDLED;
1138#endif
1139
1140
1141 for_each_cos_in_tx_queue(fp, cos)
1142 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1143
1144 prefetch(&fp->sb_running_index[SM_RX_ID]);
1145 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1146
1147 return IRQ_HANDLED;
1148}
1149
1150
1151void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1152{
1153 mutex_lock(&bp->port.phy_mutex);
1154
1155 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1156}
1157
1158void bnx2x_release_phy_lock(struct bnx2x *bp)
1159{
1160 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1161
1162 mutex_unlock(&bp->port.phy_mutex);
1163}
1164
1165
1166u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1167{
1168 u16 line_speed = bp->link_vars.line_speed;
1169 if (IS_MF(bp)) {
1170 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1171 bp->mf_config[BP_VN(bp)]);
1172
1173
1174
1175
1176 if (IS_MF_PERCENT_BW(bp))
1177 line_speed = (line_speed * maxCfg) / 100;
1178 else {
1179 u16 vn_max_rate = maxCfg * 100;
1180
1181 if (vn_max_rate < line_speed)
1182 line_speed = vn_max_rate;
1183 }
1184 }
1185
1186 return line_speed;
1187}
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197static void bnx2x_fill_report_data(struct bnx2x *bp,
1198 struct bnx2x_link_report_data *data)
1199{
1200 memset(data, 0, sizeof(*data));
1201
1202 if (IS_PF(bp)) {
1203
1204 data->line_speed = bnx2x_get_mf_speed(bp);
1205
1206
1207 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1208 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1209 &data->link_report_flags);
1210
1211 if (!BNX2X_NUM_ETH_QUEUES(bp))
1212 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1213 &data->link_report_flags);
1214
1215
1216 if (bp->link_vars.duplex == DUPLEX_FULL)
1217 __set_bit(BNX2X_LINK_REPORT_FD,
1218 &data->link_report_flags);
1219
1220
1221 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1222 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1223 &data->link_report_flags);
1224
1225
1226 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1227 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1228 &data->link_report_flags);
1229 } else {
1230 *data = bp->vf_link_vars;
1231 }
1232}
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244void bnx2x_link_report(struct bnx2x *bp)
1245{
1246 bnx2x_acquire_phy_lock(bp);
1247 __bnx2x_link_report(bp);
1248 bnx2x_release_phy_lock(bp);
1249}
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259void __bnx2x_link_report(struct bnx2x *bp)
1260{
1261 struct bnx2x_link_report_data cur_data;
1262
1263 if (bp->force_link_down) {
1264 bp->link_vars.link_up = 0;
1265 return;
1266 }
1267
1268
1269 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1270 bnx2x_read_mf_cfg(bp);
1271
1272
1273 bnx2x_fill_report_data(bp, &cur_data);
1274
1275
1276 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1277 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1278 &bp->last_reported_link.link_report_flags) &&
1279 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1280 &cur_data.link_report_flags)))
1281 return;
1282
1283 bp->link_cnt++;
1284
1285
1286
1287
1288 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1289
1290
1291 if (IS_PF(bp))
1292 bnx2x_iov_link_update(bp);
1293
1294 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1295 &cur_data.link_report_flags)) {
1296 netif_carrier_off(bp->dev);
1297 netdev_err(bp->dev, "NIC Link is Down\n");
1298 return;
1299 } else {
1300 const char *duplex;
1301 const char *flow;
1302
1303 netif_carrier_on(bp->dev);
1304
1305 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1306 &cur_data.link_report_flags))
1307 duplex = "full";
1308 else
1309 duplex = "half";
1310
1311
1312
1313
1314
1315 if (cur_data.link_report_flags) {
1316 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1317 &cur_data.link_report_flags)) {
1318 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1319 &cur_data.link_report_flags))
1320 flow = "ON - receive & transmit";
1321 else
1322 flow = "ON - receive";
1323 } else {
1324 flow = "ON - transmit";
1325 }
1326 } else {
1327 flow = "none";
1328 }
1329 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1330 cur_data.line_speed, duplex, flow);
1331 }
1332}
1333
1334static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1335{
1336 int i;
1337
1338 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1339 struct eth_rx_sge *sge;
1340
1341 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1342 sge->addr_hi =
1343 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1344 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1345
1346 sge->addr_lo =
1347 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1348 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1349 }
1350}
1351
1352static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1353 struct bnx2x_fastpath *fp, int last)
1354{
1355 int i;
1356
1357 for (i = 0; i < last; i++) {
1358 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1359 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1360 u8 *data = first_buf->data;
1361
1362 if (data == NULL) {
1363 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1364 continue;
1365 }
1366 if (tpa_info->tpa_state == BNX2X_TPA_START)
1367 dma_unmap_single(&bp->pdev->dev,
1368 dma_unmap_addr(first_buf, mapping),
1369 fp->rx_buf_size, DMA_FROM_DEVICE);
1370 bnx2x_frag_free(fp, data);
1371 first_buf->data = NULL;
1372 }
1373}
1374
1375void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1376{
1377 int j;
1378
1379 for_each_rx_queue_cnic(bp, j) {
1380 struct bnx2x_fastpath *fp = &bp->fp[j];
1381
1382 fp->rx_bd_cons = 0;
1383
1384
1385
1386
1387
1388
1389 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1390 fp->rx_sge_prod);
1391 }
1392}
1393
1394void bnx2x_init_rx_rings(struct bnx2x *bp)
1395{
1396 int func = BP_FUNC(bp);
1397 u16 ring_prod;
1398 int i, j;
1399
1400
1401 for_each_eth_queue(bp, j) {
1402 struct bnx2x_fastpath *fp = &bp->fp[j];
1403
1404 DP(NETIF_MSG_IFUP,
1405 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1406
1407 if (fp->mode != TPA_MODE_DISABLED) {
1408
1409 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1410 struct bnx2x_agg_info *tpa_info =
1411 &fp->tpa_info[i];
1412 struct sw_rx_bd *first_buf =
1413 &tpa_info->first_buf;
1414
1415 first_buf->data =
1416 bnx2x_frag_alloc(fp, GFP_KERNEL);
1417 if (!first_buf->data) {
1418 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1419 j);
1420 bnx2x_free_tpa_pool(bp, fp, i);
1421 fp->mode = TPA_MODE_DISABLED;
1422 break;
1423 }
1424 dma_unmap_addr_set(first_buf, mapping, 0);
1425 tpa_info->tpa_state = BNX2X_TPA_STOP;
1426 }
1427
1428
1429 bnx2x_set_next_page_sgl(fp);
1430
1431
1432 bnx2x_init_sge_ring_bit_mask(fp);
1433
1434
1435 for (i = 0, ring_prod = 0;
1436 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1437
1438 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1439 GFP_KERNEL) < 0) {
1440 BNX2X_ERR("was only able to allocate %d rx sges\n",
1441 i);
1442 BNX2X_ERR("disabling TPA for queue[%d]\n",
1443 j);
1444
1445 bnx2x_free_rx_sge_range(bp, fp,
1446 ring_prod);
1447 bnx2x_free_tpa_pool(bp, fp,
1448 MAX_AGG_QS(bp));
1449 fp->mode = TPA_MODE_DISABLED;
1450 ring_prod = 0;
1451 break;
1452 }
1453 ring_prod = NEXT_SGE_IDX(ring_prod);
1454 }
1455
1456 fp->rx_sge_prod = ring_prod;
1457 }
1458 }
1459
1460 for_each_eth_queue(bp, j) {
1461 struct bnx2x_fastpath *fp = &bp->fp[j];
1462
1463 fp->rx_bd_cons = 0;
1464
1465
1466
1467
1468
1469
1470 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1471 fp->rx_sge_prod);
1472
1473 if (j != 0)
1474 continue;
1475
1476 if (CHIP_IS_E1(bp)) {
1477 REG_WR(bp, BAR_USTRORM_INTMEM +
1478 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1479 U64_LO(fp->rx_comp_mapping));
1480 REG_WR(bp, BAR_USTRORM_INTMEM +
1481 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1482 U64_HI(fp->rx_comp_mapping));
1483 }
1484 }
1485}
1486
1487static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1488{
1489 u8 cos;
1490 struct bnx2x *bp = fp->bp;
1491
1492 for_each_cos_in_tx_queue(fp, cos) {
1493 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1494 unsigned pkts_compl = 0, bytes_compl = 0;
1495
1496 u16 sw_prod = txdata->tx_pkt_prod;
1497 u16 sw_cons = txdata->tx_pkt_cons;
1498
1499 while (sw_cons != sw_prod) {
1500 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1501 &pkts_compl, &bytes_compl);
1502 sw_cons++;
1503 }
1504
1505 netdev_tx_reset_queue(
1506 netdev_get_tx_queue(bp->dev,
1507 txdata->txq_index));
1508 }
1509}
1510
1511static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1512{
1513 int i;
1514
1515 for_each_tx_queue_cnic(bp, i) {
1516 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1517 }
1518}
1519
1520static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1521{
1522 int i;
1523
1524 for_each_eth_queue(bp, i) {
1525 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1526 }
1527}
1528
1529static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1530{
1531 struct bnx2x *bp = fp->bp;
1532 int i;
1533
1534
1535 if (fp->rx_buf_ring == NULL)
1536 return;
1537
1538 for (i = 0; i < NUM_RX_BD; i++) {
1539 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1540 u8 *data = rx_buf->data;
1541
1542 if (data == NULL)
1543 continue;
1544 dma_unmap_single(&bp->pdev->dev,
1545 dma_unmap_addr(rx_buf, mapping),
1546 fp->rx_buf_size, DMA_FROM_DEVICE);
1547
1548 rx_buf->data = NULL;
1549 bnx2x_frag_free(fp, data);
1550 }
1551}
1552
1553static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1554{
1555 int j;
1556
1557 for_each_rx_queue_cnic(bp, j) {
1558 bnx2x_free_rx_bds(&bp->fp[j]);
1559 }
1560}
1561
1562static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1563{
1564 int j;
1565
1566 for_each_eth_queue(bp, j) {
1567 struct bnx2x_fastpath *fp = &bp->fp[j];
1568
1569 bnx2x_free_rx_bds(fp);
1570
1571 if (fp->mode != TPA_MODE_DISABLED)
1572 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1573 }
1574}
1575
1576static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1577{
1578 bnx2x_free_tx_skbs_cnic(bp);
1579 bnx2x_free_rx_skbs_cnic(bp);
1580}
1581
1582void bnx2x_free_skbs(struct bnx2x *bp)
1583{
1584 bnx2x_free_tx_skbs(bp);
1585 bnx2x_free_rx_skbs(bp);
1586}
1587
1588void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1589{
1590
1591 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1592
1593 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1594
1595 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1596
1597
1598 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1599 & FUNC_MF_CFG_MAX_BW_MASK;
1600
1601 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1602 }
1603}
1604
1605
1606
1607
1608
1609
1610
1611static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1612{
1613 int i, offset = 0;
1614
1615 if (nvecs == offset)
1616 return;
1617
1618
1619 if (IS_PF(bp)) {
1620 free_irq(bp->msix_table[offset].vector, bp->dev);
1621 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1622 bp->msix_table[offset].vector);
1623 offset++;
1624 }
1625
1626 if (CNIC_SUPPORT(bp)) {
1627 if (nvecs == offset)
1628 return;
1629 offset++;
1630 }
1631
1632 for_each_eth_queue(bp, i) {
1633 if (nvecs == offset)
1634 return;
1635 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1636 i, bp->msix_table[offset].vector);
1637
1638 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1639 }
1640}
1641
1642void bnx2x_free_irq(struct bnx2x *bp)
1643{
1644 if (bp->flags & USING_MSIX_FLAG &&
1645 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1646 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1647
1648
1649 if (IS_PF(bp))
1650 nvecs++;
1651
1652 bnx2x_free_msix_irqs(bp, nvecs);
1653 } else {
1654 free_irq(bp->dev->irq, bp->dev);
1655 }
1656}
1657
1658int bnx2x_enable_msix(struct bnx2x *bp)
1659{
1660 int msix_vec = 0, i, rc;
1661
1662
1663 if (IS_PF(bp)) {
1664 bp->msix_table[msix_vec].entry = msix_vec;
1665 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1666 bp->msix_table[0].entry);
1667 msix_vec++;
1668 }
1669
1670
1671 if (CNIC_SUPPORT(bp)) {
1672 bp->msix_table[msix_vec].entry = msix_vec;
1673 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1674 msix_vec, bp->msix_table[msix_vec].entry);
1675 msix_vec++;
1676 }
1677
1678
1679 for_each_eth_queue(bp, i) {
1680 bp->msix_table[msix_vec].entry = msix_vec;
1681 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1682 msix_vec, msix_vec, i);
1683 msix_vec++;
1684 }
1685
1686 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1687 msix_vec);
1688
1689 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1690 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1691
1692
1693
1694
1695 if (rc == -ENOSPC) {
1696
1697 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1698 if (rc < 0) {
1699 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1700 rc);
1701 goto no_msix;
1702 }
1703
1704 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1705 bp->flags |= USING_SINGLE_MSIX_FLAG;
1706
1707 BNX2X_DEV_INFO("set number of queues to 1\n");
1708 bp->num_ethernet_queues = 1;
1709 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1710 } else if (rc < 0) {
1711 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1712 goto no_msix;
1713 } else if (rc < msix_vec) {
1714
1715 int diff = msix_vec - rc;
1716
1717 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1718
1719
1720
1721
1722 bp->num_ethernet_queues -= diff;
1723 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1724
1725 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1726 bp->num_queues);
1727 }
1728
1729 bp->flags |= USING_MSIX_FLAG;
1730
1731 return 0;
1732
1733no_msix:
1734
1735 if (rc == -ENOMEM)
1736 bp->flags |= DISABLE_MSI_FLAG;
1737
1738 return rc;
1739}
1740
1741static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1742{
1743 int i, rc, offset = 0;
1744
1745
1746 if (IS_PF(bp)) {
1747 rc = request_irq(bp->msix_table[offset++].vector,
1748 bnx2x_msix_sp_int, 0,
1749 bp->dev->name, bp->dev);
1750 if (rc) {
1751 BNX2X_ERR("request sp irq failed\n");
1752 return -EBUSY;
1753 }
1754 }
1755
1756 if (CNIC_SUPPORT(bp))
1757 offset++;
1758
1759 for_each_eth_queue(bp, i) {
1760 struct bnx2x_fastpath *fp = &bp->fp[i];
1761 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1762 bp->dev->name, i);
1763
1764 rc = request_irq(bp->msix_table[offset].vector,
1765 bnx2x_msix_fp_int, 0, fp->name, fp);
1766 if (rc) {
1767 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1768 bp->msix_table[offset].vector, rc);
1769 bnx2x_free_msix_irqs(bp, offset);
1770 return -EBUSY;
1771 }
1772
1773 offset++;
1774 }
1775
1776 i = BNX2X_NUM_ETH_QUEUES(bp);
1777 if (IS_PF(bp)) {
1778 offset = 1 + CNIC_SUPPORT(bp);
1779 netdev_info(bp->dev,
1780 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1781 bp->msix_table[0].vector,
1782 0, bp->msix_table[offset].vector,
1783 i - 1, bp->msix_table[offset + i - 1].vector);
1784 } else {
1785 offset = CNIC_SUPPORT(bp);
1786 netdev_info(bp->dev,
1787 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1788 0, bp->msix_table[offset].vector,
1789 i - 1, bp->msix_table[offset + i - 1].vector);
1790 }
1791 return 0;
1792}
1793
1794int bnx2x_enable_msi(struct bnx2x *bp)
1795{
1796 int rc;
1797
1798 rc = pci_enable_msi(bp->pdev);
1799 if (rc) {
1800 BNX2X_DEV_INFO("MSI is not attainable\n");
1801 return -1;
1802 }
1803 bp->flags |= USING_MSI_FLAG;
1804
1805 return 0;
1806}
1807
1808static int bnx2x_req_irq(struct bnx2x *bp)
1809{
1810 unsigned long flags;
1811 unsigned int irq;
1812
1813 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1814 flags = 0;
1815 else
1816 flags = IRQF_SHARED;
1817
1818 if (bp->flags & USING_MSIX_FLAG)
1819 irq = bp->msix_table[0].vector;
1820 else
1821 irq = bp->pdev->irq;
1822
1823 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1824}
1825
1826static int bnx2x_setup_irqs(struct bnx2x *bp)
1827{
1828 int rc = 0;
1829 if (bp->flags & USING_MSIX_FLAG &&
1830 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1831 rc = bnx2x_req_msix_irqs(bp);
1832 if (rc)
1833 return rc;
1834 } else {
1835 rc = bnx2x_req_irq(bp);
1836 if (rc) {
1837 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1838 return rc;
1839 }
1840 if (bp->flags & USING_MSI_FLAG) {
1841 bp->dev->irq = bp->pdev->irq;
1842 netdev_info(bp->dev, "using MSI IRQ %d\n",
1843 bp->dev->irq);
1844 }
1845 if (bp->flags & USING_MSIX_FLAG) {
1846 bp->dev->irq = bp->msix_table[0].vector;
1847 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1848 bp->dev->irq);
1849 }
1850 }
1851
1852 return 0;
1853}
1854
1855static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1856{
1857 int i;
1858
1859 for_each_rx_queue_cnic(bp, i) {
1860 napi_enable(&bnx2x_fp(bp, i, napi));
1861 }
1862}
1863
1864static void bnx2x_napi_enable(struct bnx2x *bp)
1865{
1866 int i;
1867
1868 for_each_eth_queue(bp, i) {
1869 napi_enable(&bnx2x_fp(bp, i, napi));
1870 }
1871}
1872
1873static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1874{
1875 int i;
1876
1877 for_each_rx_queue_cnic(bp, i) {
1878 napi_disable(&bnx2x_fp(bp, i, napi));
1879 }
1880}
1881
1882static void bnx2x_napi_disable(struct bnx2x *bp)
1883{
1884 int i;
1885
1886 for_each_eth_queue(bp, i) {
1887 napi_disable(&bnx2x_fp(bp, i, napi));
1888 }
1889}
1890
1891void bnx2x_netif_start(struct bnx2x *bp)
1892{
1893 if (netif_running(bp->dev)) {
1894 bnx2x_napi_enable(bp);
1895 if (CNIC_LOADED(bp))
1896 bnx2x_napi_enable_cnic(bp);
1897 bnx2x_int_enable(bp);
1898 if (bp->state == BNX2X_STATE_OPEN)
1899 netif_tx_wake_all_queues(bp->dev);
1900 }
1901}
1902
1903void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1904{
1905 bnx2x_int_disable_sync(bp, disable_hw);
1906 bnx2x_napi_disable(bp);
1907 if (CNIC_LOADED(bp))
1908 bnx2x_napi_disable_cnic(bp);
1909}
1910
1911u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1912 struct net_device *sb_dev,
1913 select_queue_fallback_t fallback)
1914{
1915 struct bnx2x *bp = netdev_priv(dev);
1916
1917 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1918 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1919 u16 ether_type = ntohs(hdr->h_proto);
1920
1921
1922 if (ether_type == ETH_P_8021Q) {
1923 struct vlan_ethhdr *vhdr =
1924 (struct vlan_ethhdr *)skb->data;
1925
1926 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1927 }
1928
1929
1930 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1931 return bnx2x_fcoe_tx(bp, txq_index);
1932 }
1933
1934
1935 return fallback(dev, skb, NULL) %
1936 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1937}
1938
1939void bnx2x_set_num_queues(struct bnx2x *bp)
1940{
1941
1942 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1943
1944
1945 if (IS_MF_STORAGE_ONLY(bp))
1946 bp->num_ethernet_queues = 1;
1947
1948
1949 bp->num_cnic_queues = CNIC_SUPPORT(bp);
1950 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1951
1952 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1978{
1979 int rc, tx, rx;
1980
1981 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1982 rx = BNX2X_NUM_ETH_QUEUES(bp);
1983
1984
1985 if (include_cnic && !NO_FCOE(bp)) {
1986 rx++;
1987 tx++;
1988 }
1989
1990 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1991 if (rc) {
1992 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1993 return rc;
1994 }
1995 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1996 if (rc) {
1997 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1998 return rc;
1999 }
2000
2001 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2002 tx, rx);
2003
2004 return rc;
2005}
2006
2007static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2008{
2009 int i;
2010
2011 for_each_queue(bp, i) {
2012 struct bnx2x_fastpath *fp = &bp->fp[i];
2013 u32 mtu;
2014
2015
2016 if (IS_FCOE_IDX(i))
2017
2018
2019
2020
2021
2022
2023 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2024 else
2025 mtu = bp->dev->mtu;
2026 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2027 IP_HEADER_ALIGNMENT_PADDING +
2028 ETH_OVERHEAD +
2029 mtu +
2030 BNX2X_FW_RX_ALIGN_END;
2031 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2032
2033 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2034 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2035 else
2036 fp->rx_frag_size = 0;
2037 }
2038}
2039
2040static int bnx2x_init_rss(struct bnx2x *bp)
2041{
2042 int i;
2043 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2044
2045
2046
2047
2048 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2049 bp->rss_conf_obj.ind_table[i] =
2050 bp->fp->cl_id +
2051 ethtool_rxfh_indir_default(i, num_eth_queues);
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2062}
2063
2064int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2065 bool config_hash, bool enable)
2066{
2067 struct bnx2x_config_rss_params params = {NULL};
2068
2069
2070
2071
2072
2073
2074
2075
2076 params.rss_obj = rss_obj;
2077
2078 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2079
2080 if (enable) {
2081 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2082
2083
2084 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2085 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2086 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2087 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2088 if (rss_obj->udp_rss_v4)
2089 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2090 if (rss_obj->udp_rss_v6)
2091 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2092
2093 if (!CHIP_IS_E1x(bp)) {
2094
2095 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2096 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2097
2098
2099 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2100 }
2101 } else {
2102 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2103 }
2104
2105
2106 params.rss_result_mask = MULTI_MASK;
2107
2108 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2109
2110 if (config_hash) {
2111
2112 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2113 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2114 }
2115
2116 if (IS_PF(bp))
2117 return bnx2x_config_rss(bp, ¶ms);
2118 else
2119 return bnx2x_vfpf_config_rss(bp, ¶ms);
2120}
2121
2122static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2123{
2124 struct bnx2x_func_state_params func_params = {NULL};
2125
2126
2127 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2128
2129 func_params.f_obj = &bp->func_obj;
2130 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2131
2132 func_params.params.hw_init.load_phase = load_code;
2133
2134 return bnx2x_func_state_change(bp, &func_params);
2135}
2136
2137
2138
2139
2140
2141void bnx2x_squeeze_objects(struct bnx2x *bp)
2142{
2143 int rc;
2144 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2145 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2146 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2147
2148
2149
2150
2151 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2152
2153 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2154
2155
2156 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2157 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2158 &ramrod_flags);
2159 if (rc != 0)
2160 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2161
2162
2163 vlan_mac_flags = 0;
2164 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2165 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2166 &ramrod_flags);
2167 if (rc != 0)
2168 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2169
2170
2171 rparam.mcast_obj = &bp->mcast_obj;
2172 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2173
2174
2175
2176
2177
2178 netif_addr_lock_bh(bp->dev);
2179 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2180 if (rc < 0)
2181 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2182 rc);
2183
2184
2185 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2186 while (rc != 0) {
2187 if (rc < 0) {
2188 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2189 rc);
2190 netif_addr_unlock_bh(bp->dev);
2191 return;
2192 }
2193
2194 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2195 }
2196 netif_addr_unlock_bh(bp->dev);
2197}
2198
2199#ifndef BNX2X_STOP_ON_ERROR
2200#define LOAD_ERROR_EXIT(bp, label) \
2201 do { \
2202 (bp)->state = BNX2X_STATE_ERROR; \
2203 goto label; \
2204 } while (0)
2205
2206#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2207 do { \
2208 bp->cnic_loaded = false; \
2209 goto label; \
2210 } while (0)
2211#else
2212#define LOAD_ERROR_EXIT(bp, label) \
2213 do { \
2214 (bp)->state = BNX2X_STATE_ERROR; \
2215 (bp)->panic = 1; \
2216 return -EBUSY; \
2217 } while (0)
2218#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2219 do { \
2220 bp->cnic_loaded = false; \
2221 (bp)->panic = 1; \
2222 return -EBUSY; \
2223 } while (0)
2224#endif
2225
2226static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2227{
2228 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2229 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2230 return;
2231}
2232
2233static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2234{
2235 int num_groups, vf_headroom = 0;
2236 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2237
2238
2239 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2240
2241
2242
2243
2244
2245
2246 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2247
2248
2249
2250
2251
2252
2253 if (IS_SRIOV(bp))
2254 vf_headroom = bnx2x_vf_headroom(bp);
2255
2256
2257
2258
2259
2260
2261 num_groups =
2262 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2263 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2264 1 : 0));
2265
2266 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2267 bp->fw_stats_num, vf_headroom, num_groups);
2268 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2269 num_groups * sizeof(struct stats_query_cmd_group);
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2280 sizeof(struct per_pf_stats) +
2281 sizeof(struct fcoe_statistics_params) +
2282 sizeof(struct per_queue_stats) * num_queue_stats +
2283 sizeof(struct stats_counter);
2284
2285 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2286 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2287 if (!bp->fw_stats)
2288 goto alloc_mem_err;
2289
2290
2291 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2292 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2293 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2294 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2295 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2296 bp->fw_stats_req_sz;
2297
2298 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2299 U64_HI(bp->fw_stats_req_mapping),
2300 U64_LO(bp->fw_stats_req_mapping));
2301 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2302 U64_HI(bp->fw_stats_data_mapping),
2303 U64_LO(bp->fw_stats_data_mapping));
2304 return 0;
2305
2306alloc_mem_err:
2307 bnx2x_free_fw_stats_mem(bp);
2308 BNX2X_ERR("Can't allocate FW stats memory\n");
2309 return -ENOMEM;
2310}
2311
2312
2313static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2314{
2315 u32 param;
2316
2317
2318 bp->fw_seq =
2319 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2320 DRV_MSG_SEQ_NUMBER_MASK);
2321 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2322
2323
2324 bp->fw_drv_pulse_wr_seq =
2325 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2326 DRV_PULSE_SEQ_MASK);
2327 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2328
2329 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2330
2331 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2332 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2333
2334
2335 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2336
2337
2338 if (!(*load_code)) {
2339 BNX2X_ERR("MCP response failure, aborting\n");
2340 return -EBUSY;
2341 }
2342
2343
2344
2345
2346 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2347 BNX2X_ERR("MCP refused load request, aborting\n");
2348 return -EBUSY;
2349 }
2350 return 0;
2351}
2352
2353
2354
2355
2356
2357int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2358{
2359
2360 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2361 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2362
2363 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2364 (BCM_5710_FW_MINOR_VERSION << 8) +
2365 (BCM_5710_FW_REVISION_VERSION << 16) +
2366 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2367
2368
2369 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2370
2371 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2372 loaded_fw, my_fw);
2373
2374
2375 if (my_fw != loaded_fw) {
2376 if (print_err)
2377 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2378 loaded_fw, my_fw);
2379 else
2380 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2381 loaded_fw, my_fw);
2382 return -EBUSY;
2383 }
2384 }
2385 return 0;
2386}
2387
2388
2389static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2390{
2391 int path = BP_PATH(bp);
2392
2393 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2394 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2395 bnx2x_load_count[path][2]);
2396 bnx2x_load_count[path][0]++;
2397 bnx2x_load_count[path][1 + port]++;
2398 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2399 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2400 bnx2x_load_count[path][2]);
2401 if (bnx2x_load_count[path][0] == 1)
2402 return FW_MSG_CODE_DRV_LOAD_COMMON;
2403 else if (bnx2x_load_count[path][1 + port] == 1)
2404 return FW_MSG_CODE_DRV_LOAD_PORT;
2405 else
2406 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2407}
2408
2409
2410static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2411{
2412 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2413 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2414 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2415 bp->port.pmf = 1;
2416
2417
2418
2419
2420 smp_mb();
2421 } else {
2422 bp->port.pmf = 0;
2423 }
2424
2425 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2426}
2427
2428static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2429{
2430 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2431 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2432 (bp->common.shmem2_base)) {
2433 if (SHMEM2_HAS(bp, dcc_support))
2434 SHMEM2_WR(bp, dcc_support,
2435 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2436 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2437 if (SHMEM2_HAS(bp, afex_driver_support))
2438 SHMEM2_WR(bp, afex_driver_support,
2439 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2440 }
2441
2442
2443 bp->afex_def_vlan_tag = -1;
2444}
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2456{
2457 struct bnx2x_fastpath *fp = &bp->fp[index];
2458 int cos;
2459 struct napi_struct orig_napi = fp->napi;
2460 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2461
2462
2463 if (fp->tpa_info)
2464 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2465 sizeof(struct bnx2x_agg_info));
2466 memset(fp, 0, sizeof(*fp));
2467
2468
2469 fp->napi = orig_napi;
2470 fp->tpa_info = orig_tpa_info;
2471 fp->bp = bp;
2472 fp->index = index;
2473 if (IS_ETH_FP(fp))
2474 fp->max_cos = bp->max_cos;
2475 else
2476
2477 fp->max_cos = 1;
2478
2479
2480 if (IS_FCOE_FP(fp))
2481 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2482 if (IS_ETH_FP(fp))
2483 for_each_cos_in_tx_queue(fp, cos)
2484 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2485 BNX2X_NUM_ETH_QUEUES(bp) + index];
2486
2487
2488
2489
2490 if (bp->dev->features & NETIF_F_LRO)
2491 fp->mode = TPA_MODE_LRO;
2492 else if (bp->dev->features & NETIF_F_GRO_HW)
2493 fp->mode = TPA_MODE_GRO;
2494 else
2495 fp->mode = TPA_MODE_DISABLED;
2496
2497
2498
2499
2500 if (bp->disable_tpa || IS_FCOE_FP(fp))
2501 fp->mode = TPA_MODE_DISABLED;
2502}
2503
2504void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2505{
2506 u32 cur;
2507
2508 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2509 return;
2510
2511 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2512 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2513 cur, state);
2514
2515 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2516}
2517
2518int bnx2x_load_cnic(struct bnx2x *bp)
2519{
2520 int i, rc, port = BP_PORT(bp);
2521
2522 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2523
2524 mutex_init(&bp->cnic_mutex);
2525
2526 if (IS_PF(bp)) {
2527 rc = bnx2x_alloc_mem_cnic(bp);
2528 if (rc) {
2529 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2530 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2531 }
2532 }
2533
2534 rc = bnx2x_alloc_fp_mem_cnic(bp);
2535 if (rc) {
2536 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2537 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2538 }
2539
2540
2541 rc = bnx2x_set_real_num_queues(bp, 1);
2542 if (rc) {
2543 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2544 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2545 }
2546
2547
2548 bnx2x_add_all_napi_cnic(bp);
2549 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2550 bnx2x_napi_enable_cnic(bp);
2551
2552 rc = bnx2x_init_hw_func_cnic(bp);
2553 if (rc)
2554 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2555
2556 bnx2x_nic_init_cnic(bp);
2557
2558 if (IS_PF(bp)) {
2559
2560 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2561
2562
2563 for_each_cnic_queue(bp, i) {
2564 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2565 if (rc) {
2566 BNX2X_ERR("Queue setup failed\n");
2567 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2568 }
2569 }
2570 }
2571
2572
2573 bnx2x_set_rx_mode_inner(bp);
2574
2575
2576 bnx2x_get_iscsi_info(bp);
2577 bnx2x_setup_cnic_irq_info(bp);
2578 bnx2x_setup_cnic_info(bp);
2579 bp->cnic_loaded = true;
2580 if (bp->state == BNX2X_STATE_OPEN)
2581 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2582
2583 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2584
2585 return 0;
2586
2587#ifndef BNX2X_STOP_ON_ERROR
2588load_error_cnic2:
2589
2590 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2591
2592load_error_cnic1:
2593 bnx2x_napi_disable_cnic(bp);
2594
2595 if (bnx2x_set_real_num_queues(bp, 0))
2596 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2597load_error_cnic0:
2598 BNX2X_ERR("CNIC-related load failed\n");
2599 bnx2x_free_fp_mem_cnic(bp);
2600 bnx2x_free_mem_cnic(bp);
2601 return rc;
2602#endif
2603}
2604
2605
2606int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2607{
2608 int port = BP_PORT(bp);
2609 int i, rc = 0, load_code = 0;
2610
2611 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2612 DP(NETIF_MSG_IFUP,
2613 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2614
2615#ifdef BNX2X_STOP_ON_ERROR
2616 if (unlikely(bp->panic)) {
2617 BNX2X_ERR("Can't load NIC when there is panic\n");
2618 return -EPERM;
2619 }
2620#endif
2621
2622 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2623
2624
2625 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2626 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2627 &bp->last_reported_link.link_report_flags);
2628
2629 if (IS_PF(bp))
2630
2631 bnx2x_ilt_set_info(bp);
2632
2633
2634
2635
2636
2637
2638 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2639 for_each_queue(bp, i)
2640 bnx2x_bz_fp(bp, i);
2641 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2642 bp->num_cnic_queues) *
2643 sizeof(struct bnx2x_fp_txdata));
2644
2645 bp->fcoe_init = false;
2646
2647
2648 bnx2x_set_rx_buf_size(bp);
2649
2650 if (IS_PF(bp)) {
2651 rc = bnx2x_alloc_mem(bp);
2652 if (rc) {
2653 BNX2X_ERR("Unable to allocate bp memory\n");
2654 return rc;
2655 }
2656 }
2657
2658
2659
2660
2661 rc = bnx2x_alloc_fp_mem(bp);
2662 if (rc) {
2663 BNX2X_ERR("Unable to allocate memory for fps\n");
2664 LOAD_ERROR_EXIT(bp, load_error0);
2665 }
2666
2667
2668 if (bnx2x_alloc_fw_stats_mem(bp))
2669 LOAD_ERROR_EXIT(bp, load_error0);
2670
2671
2672 if (IS_VF(bp)) {
2673 rc = bnx2x_vfpf_init(bp);
2674 if (rc)
2675 LOAD_ERROR_EXIT(bp, load_error0);
2676 }
2677
2678
2679
2680
2681
2682 rc = bnx2x_set_real_num_queues(bp, 0);
2683 if (rc) {
2684 BNX2X_ERR("Unable to set real_num_queues\n");
2685 LOAD_ERROR_EXIT(bp, load_error0);
2686 }
2687
2688
2689
2690
2691
2692 bnx2x_setup_tc(bp->dev, bp->max_cos);
2693
2694
2695 bnx2x_add_all_napi(bp);
2696 DP(NETIF_MSG_IFUP, "napi added\n");
2697 bnx2x_napi_enable(bp);
2698
2699 if (IS_PF(bp)) {
2700
2701 bnx2x_set_pf_load(bp);
2702
2703
2704 if (!BP_NOMCP(bp)) {
2705
2706 rc = bnx2x_nic_load_request(bp, &load_code);
2707 if (rc)
2708 LOAD_ERROR_EXIT(bp, load_error1);
2709
2710
2711 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2712 if (rc) {
2713 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2714 LOAD_ERROR_EXIT(bp, load_error2);
2715 }
2716 } else {
2717 load_code = bnx2x_nic_load_no_mcp(bp, port);
2718 }
2719
2720
2721 bnx2x_nic_load_pmf(bp, load_code);
2722
2723
2724 bnx2x__init_func_obj(bp);
2725
2726
2727 rc = bnx2x_init_hw(bp, load_code);
2728 if (rc) {
2729 BNX2X_ERR("HW init failed, aborting\n");
2730 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2731 LOAD_ERROR_EXIT(bp, load_error2);
2732 }
2733 }
2734
2735 bnx2x_pre_irq_nic_init(bp);
2736
2737
2738 rc = bnx2x_setup_irqs(bp);
2739 if (rc) {
2740 BNX2X_ERR("setup irqs failed\n");
2741 if (IS_PF(bp))
2742 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2743 LOAD_ERROR_EXIT(bp, load_error2);
2744 }
2745
2746
2747 if (IS_PF(bp)) {
2748
2749 bnx2x_post_irq_nic_init(bp, load_code);
2750
2751 bnx2x_init_bp_objs(bp);
2752 bnx2x_iov_nic_init(bp);
2753
2754
2755 bp->afex_def_vlan_tag = -1;
2756 bnx2x_nic_load_afex_dcc(bp, load_code);
2757 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2758 rc = bnx2x_func_start(bp);
2759 if (rc) {
2760 BNX2X_ERR("Function start failed!\n");
2761 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2762
2763 LOAD_ERROR_EXIT(bp, load_error3);
2764 }
2765
2766
2767 if (!BP_NOMCP(bp)) {
2768 load_code = bnx2x_fw_command(bp,
2769 DRV_MSG_CODE_LOAD_DONE, 0);
2770 if (!load_code) {
2771 BNX2X_ERR("MCP response failure, aborting\n");
2772 rc = -EBUSY;
2773 LOAD_ERROR_EXIT(bp, load_error3);
2774 }
2775 }
2776
2777
2778 bnx2x_update_coalesce(bp);
2779 }
2780
2781
2782 rc = bnx2x_setup_leading(bp);
2783 if (rc) {
2784 BNX2X_ERR("Setup leading failed!\n");
2785 LOAD_ERROR_EXIT(bp, load_error3);
2786 }
2787
2788
2789 for_each_nondefault_eth_queue(bp, i) {
2790 if (IS_PF(bp))
2791 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2792 else
2793 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2794 if (rc) {
2795 BNX2X_ERR("Queue %d setup failed\n", i);
2796 LOAD_ERROR_EXIT(bp, load_error3);
2797 }
2798 }
2799
2800
2801 rc = bnx2x_init_rss(bp);
2802 if (rc) {
2803 BNX2X_ERR("PF RSS init failed\n");
2804 LOAD_ERROR_EXIT(bp, load_error3);
2805 }
2806
2807
2808 bp->state = BNX2X_STATE_OPEN;
2809
2810
2811 if (IS_PF(bp))
2812 rc = bnx2x_set_eth_mac(bp, true);
2813 else
2814 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2815 true);
2816 if (rc) {
2817 BNX2X_ERR("Setting Ethernet MAC failed\n");
2818 LOAD_ERROR_EXIT(bp, load_error3);
2819 }
2820
2821 if (IS_PF(bp) && bp->pending_max) {
2822 bnx2x_update_max_mf_config(bp, bp->pending_max);
2823 bp->pending_max = 0;
2824 }
2825
2826 bp->force_link_down = false;
2827 if (bp->port.pmf) {
2828 rc = bnx2x_initial_phy_init(bp, load_mode);
2829 if (rc)
2830 LOAD_ERROR_EXIT(bp, load_error3);
2831 }
2832 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2833
2834
2835
2836
2837 rc = bnx2x_vlan_reconfigure_vid(bp);
2838 if (rc)
2839 LOAD_ERROR_EXIT(bp, load_error3);
2840
2841
2842 bnx2x_set_rx_mode_inner(bp);
2843
2844 if (bp->flags & PTP_SUPPORTED) {
2845 bnx2x_register_phc(bp);
2846 bnx2x_init_ptp(bp);
2847 bnx2x_configure_ptp_filters(bp);
2848 }
2849
2850 switch (load_mode) {
2851 case LOAD_NORMAL:
2852
2853 netif_tx_wake_all_queues(bp->dev);
2854 break;
2855
2856 case LOAD_OPEN:
2857 netif_tx_start_all_queues(bp->dev);
2858 smp_mb__after_atomic();
2859 break;
2860
2861 case LOAD_DIAG:
2862 case LOAD_LOOPBACK_EXT:
2863 bp->state = BNX2X_STATE_DIAG;
2864 break;
2865
2866 default:
2867 break;
2868 }
2869
2870 if (bp->port.pmf)
2871 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2872 else
2873 bnx2x__link_status_update(bp);
2874
2875
2876 mod_timer(&bp->timer, jiffies + bp->current_interval);
2877
2878 if (CNIC_ENABLED(bp))
2879 bnx2x_load_cnic(bp);
2880
2881 if (IS_PF(bp))
2882 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2883
2884 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2885
2886 u32 val;
2887 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2888 val &= ~DRV_FLAGS_MTU_MASK;
2889 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2890 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2891 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2892 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2893 }
2894
2895
2896 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2897 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2898 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2899 return -EBUSY;
2900 }
2901
2902
2903 if (IS_PF(bp))
2904 bnx2x_update_mfw_dump(bp);
2905
2906
2907 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2908 bnx2x_dcbx_init(bp, false);
2909
2910 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2911 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2912
2913 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2914
2915 return 0;
2916
2917#ifndef BNX2X_STOP_ON_ERROR
2918load_error3:
2919 if (IS_PF(bp)) {
2920 bnx2x_int_disable_sync(bp, 1);
2921
2922
2923 bnx2x_squeeze_objects(bp);
2924 }
2925
2926
2927 bnx2x_free_skbs(bp);
2928 for_each_rx_queue(bp, i)
2929 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2930
2931
2932 bnx2x_free_irq(bp);
2933load_error2:
2934 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2935 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2936 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2937 }
2938
2939 bp->port.pmf = 0;
2940load_error1:
2941 bnx2x_napi_disable(bp);
2942 bnx2x_del_all_napi(bp);
2943
2944
2945 if (IS_PF(bp))
2946 bnx2x_clear_pf_load(bp);
2947load_error0:
2948 bnx2x_free_fw_stats_mem(bp);
2949 bnx2x_free_fp_mem(bp);
2950 bnx2x_free_mem(bp);
2951
2952 return rc;
2953#endif
2954}
2955
2956int bnx2x_drain_tx_queues(struct bnx2x *bp)
2957{
2958 u8 rc = 0, cos, i;
2959
2960
2961 for_each_tx_queue(bp, i) {
2962 struct bnx2x_fastpath *fp = &bp->fp[i];
2963
2964 for_each_cos_in_tx_queue(fp, cos)
2965 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2966 if (rc)
2967 return rc;
2968 }
2969 return 0;
2970}
2971
2972
2973int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2974{
2975 int i;
2976 bool global = false;
2977
2978 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2979
2980 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2981 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2982
2983
2984 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2985 u32 val;
2986 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2987 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2988 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2989 }
2990
2991 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2992 (bp->state == BNX2X_STATE_CLOSED ||
2993 bp->state == BNX2X_STATE_ERROR)) {
2994
2995
2996
2997
2998
2999
3000
3001 bp->recovery_state = BNX2X_RECOVERY_DONE;
3002 bp->is_leader = 0;
3003 bnx2x_release_leader_lock(bp);
3004 smp_mb();
3005
3006 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3007 BNX2X_ERR("Can't unload in closed or error state\n");
3008 return -EINVAL;
3009 }
3010
3011
3012
3013
3014
3015
3016
3017 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3018 return 0;
3019
3020
3021
3022
3023
3024 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3025 smp_mb();
3026
3027
3028 bnx2x_iov_channel_down(bp);
3029
3030 if (CNIC_LOADED(bp))
3031 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3032
3033
3034 bnx2x_tx_disable(bp);
3035 netdev_reset_tc(bp->dev);
3036
3037 bp->rx_mode = BNX2X_RX_MODE_NONE;
3038
3039 del_timer_sync(&bp->timer);
3040
3041 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3042
3043 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3044 bnx2x_drv_pulse(bp);
3045 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3046 bnx2x_save_statistics(bp);
3047 }
3048
3049
3050
3051
3052
3053 if (unload_mode != UNLOAD_RECOVERY)
3054 bnx2x_drain_tx_queues(bp);
3055
3056
3057
3058
3059 if (IS_VF(bp))
3060 bnx2x_vfpf_close_vf(bp);
3061 else if (unload_mode != UNLOAD_RECOVERY)
3062
3063 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3064 else {
3065
3066 bnx2x_send_unload_req(bp, unload_mode);
3067
3068
3069
3070
3071
3072
3073
3074 if (!CHIP_IS_E1x(bp))
3075 bnx2x_pf_disable(bp);
3076
3077
3078 bnx2x_netif_stop(bp, 1);
3079
3080 bnx2x_del_all_napi(bp);
3081 if (CNIC_LOADED(bp))
3082 bnx2x_del_all_napi_cnic(bp);
3083
3084 bnx2x_free_irq(bp);
3085
3086
3087 bnx2x_send_unload_done(bp, false);
3088 }
3089
3090
3091
3092
3093
3094 if (IS_PF(bp))
3095 bnx2x_squeeze_objects(bp);
3096
3097
3098 bp->sp_state = 0;
3099
3100 bp->port.pmf = 0;
3101
3102
3103 bp->sp_rtnl_state = 0;
3104 smp_mb();
3105
3106
3107 bnx2x_free_skbs(bp);
3108 if (CNIC_LOADED(bp))
3109 bnx2x_free_skbs_cnic(bp);
3110 for_each_rx_queue(bp, i)
3111 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3112
3113 bnx2x_free_fp_mem(bp);
3114 if (CNIC_LOADED(bp))
3115 bnx2x_free_fp_mem_cnic(bp);
3116
3117 if (IS_PF(bp)) {
3118 if (CNIC_LOADED(bp))
3119 bnx2x_free_mem_cnic(bp);
3120 }
3121 bnx2x_free_mem(bp);
3122
3123 bp->state = BNX2X_STATE_CLOSED;
3124 bp->cnic_loaded = false;
3125
3126
3127 if (IS_PF(bp) && !BP_NOMCP(bp))
3128 bnx2x_update_mng_version(bp);
3129
3130
3131
3132
3133 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3134 bnx2x_set_reset_in_progress(bp);
3135
3136
3137 if (global)
3138 bnx2x_set_reset_global(bp);
3139 }
3140
3141
3142
3143
3144 if (IS_PF(bp) &&
3145 !bnx2x_clear_pf_load(bp) &&
3146 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3147 bnx2x_disable_close_the_gate(bp);
3148
3149 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3150
3151 return 0;
3152}
3153
3154int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3155{
3156 u16 pmcsr;
3157
3158
3159 if (!bp->pdev->pm_cap) {
3160 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3161 return 0;
3162 }
3163
3164 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3165
3166 switch (state) {
3167 case PCI_D0:
3168 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3169 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3170 PCI_PM_CTRL_PME_STATUS));
3171
3172 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3173
3174 msleep(20);
3175 break;
3176
3177 case PCI_D3hot:
3178
3179
3180 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3181 return 0;
3182
3183 if (CHIP_REV_IS_SLOW(bp))
3184 return 0;
3185
3186 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3187 pmcsr |= 3;
3188
3189 if (bp->wol)
3190 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3191
3192 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3193 pmcsr);
3194
3195
3196
3197
3198 break;
3199
3200 default:
3201 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3202 return -EINVAL;
3203 }
3204 return 0;
3205}
3206
3207
3208
3209
3210static int bnx2x_poll(struct napi_struct *napi, int budget)
3211{
3212 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3213 napi);
3214 struct bnx2x *bp = fp->bp;
3215 int rx_work_done;
3216 u8 cos;
3217
3218#ifdef BNX2X_STOP_ON_ERROR
3219 if (unlikely(bp->panic)) {
3220 napi_complete(napi);
3221 return 0;
3222 }
3223#endif
3224 for_each_cos_in_tx_queue(fp, cos)
3225 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3226 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3227
3228 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3229
3230 if (rx_work_done < budget) {
3231
3232
3233
3234
3235 if (IS_FCOE_FP(fp)) {
3236 napi_complete_done(napi, rx_work_done);
3237 } else {
3238 bnx2x_update_fpsb_idx(fp);
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252 rmb();
3253
3254 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3255 if (napi_complete_done(napi, rx_work_done)) {
3256
3257 DP(NETIF_MSG_RX_STATUS,
3258 "Update index to %d\n", fp->fp_hc_idx);
3259 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3260 le16_to_cpu(fp->fp_hc_idx),
3261 IGU_INT_ENABLE, 1);
3262 }
3263 } else {
3264 rx_work_done = budget;
3265 }
3266 }
3267 }
3268
3269 return rx_work_done;
3270}
3271
3272
3273
3274
3275
3276static u16 bnx2x_tx_split(struct bnx2x *bp,
3277 struct bnx2x_fp_txdata *txdata,
3278 struct sw_tx_bd *tx_buf,
3279 struct eth_tx_start_bd **tx_bd, u16 hlen,
3280 u16 bd_prod)
3281{
3282 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3283 struct eth_tx_bd *d_tx_bd;
3284 dma_addr_t mapping;
3285 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3286
3287
3288 h_tx_bd->nbytes = cpu_to_le16(hlen);
3289
3290 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3291 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3292
3293
3294
3295 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3296 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3297
3298 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3299 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3300
3301 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3302 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3303 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3304
3305
3306 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3307
3308 DP(NETIF_MSG_TX_QUEUED,
3309 "TSO split data size is %d (%x:%x)\n",
3310 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3311
3312
3313 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3314
3315 return bd_prod;
3316}
3317
3318#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3319#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3320static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3321{
3322 __sum16 tsum = (__force __sum16) csum;
3323
3324 if (fix > 0)
3325 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3326 csum_partial(t_header - fix, fix, 0)));
3327
3328 else if (fix < 0)
3329 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3330 csum_partial(t_header, -fix, 0)));
3331
3332 return bswab16(tsum);
3333}
3334
3335static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3336{
3337 u32 rc;
3338 __u8 prot = 0;
3339 __be16 protocol;
3340
3341 if (skb->ip_summed != CHECKSUM_PARTIAL)
3342 return XMIT_PLAIN;
3343
3344 protocol = vlan_get_protocol(skb);
3345 if (protocol == htons(ETH_P_IPV6)) {
3346 rc = XMIT_CSUM_V6;
3347 prot = ipv6_hdr(skb)->nexthdr;
3348 } else {
3349 rc = XMIT_CSUM_V4;
3350 prot = ip_hdr(skb)->protocol;
3351 }
3352
3353 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3354 if (inner_ip_hdr(skb)->version == 6) {
3355 rc |= XMIT_CSUM_ENC_V6;
3356 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3357 rc |= XMIT_CSUM_TCP;
3358 } else {
3359 rc |= XMIT_CSUM_ENC_V4;
3360 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3361 rc |= XMIT_CSUM_TCP;
3362 }
3363 }
3364 if (prot == IPPROTO_TCP)
3365 rc |= XMIT_CSUM_TCP;
3366
3367 if (skb_is_gso(skb)) {
3368 if (skb_is_gso_v6(skb)) {
3369 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3370 if (rc & XMIT_CSUM_ENC)
3371 rc |= XMIT_GSO_ENC_V6;
3372 } else {
3373 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3374 if (rc & XMIT_CSUM_ENC)
3375 rc |= XMIT_GSO_ENC_V4;
3376 }
3377 }
3378
3379 return rc;
3380}
3381
3382
3383#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3384
3385
3386#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3387
3388#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3389
3390
3391
3392static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3393 u32 xmit_type)
3394{
3395 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3396 int to_copy = 0, hlen = 0;
3397
3398 if (xmit_type & XMIT_GSO_ENC)
3399 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3400
3401 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3402 if (xmit_type & XMIT_GSO) {
3403 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3404 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3405
3406 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3407 int wnd_idx = 0;
3408 int frag_idx = 0;
3409 u32 wnd_sum = 0;
3410
3411
3412 if (xmit_type & XMIT_GSO_ENC)
3413 hlen = (int)(skb_inner_transport_header(skb) -
3414 skb->data) +
3415 inner_tcp_hdrlen(skb);
3416 else
3417 hlen = (int)(skb_transport_header(skb) -
3418 skb->data) + tcp_hdrlen(skb);
3419
3420
3421 first_bd_sz = skb_headlen(skb) - hlen;
3422
3423 wnd_sum = first_bd_sz;
3424
3425
3426 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3427 wnd_sum +=
3428 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3429
3430
3431 if (first_bd_sz > 0) {
3432 if (unlikely(wnd_sum < lso_mss)) {
3433 to_copy = 1;
3434 goto exit_lbl;
3435 }
3436
3437 wnd_sum -= first_bd_sz;
3438 }
3439
3440
3441
3442 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3443 wnd_sum +=
3444 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3445
3446 if (unlikely(wnd_sum < lso_mss)) {
3447 to_copy = 1;
3448 break;
3449 }
3450 wnd_sum -=
3451 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3452 }
3453 } else {
3454
3455
3456 to_copy = 1;
3457 }
3458 }
3459
3460exit_lbl:
3461 if (unlikely(to_copy))
3462 DP(NETIF_MSG_TX_QUEUED,
3463 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3464 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3465 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3466
3467 return to_copy;
3468}
3469#endif
3470
3471
3472
3473
3474
3475
3476
3477
3478static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3479 struct eth_tx_parse_bd_e1x *pbd,
3480 u32 xmit_type)
3481{
3482 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3483 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3484 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3485
3486 if (xmit_type & XMIT_GSO_V4) {
3487 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3488 pbd->tcp_pseudo_csum =
3489 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3490 ip_hdr(skb)->daddr,
3491 0, IPPROTO_TCP, 0));
3492 } else {
3493 pbd->tcp_pseudo_csum =
3494 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3495 &ipv6_hdr(skb)->daddr,
3496 0, IPPROTO_TCP, 0));
3497 }
3498
3499 pbd->global_data |=
3500 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3501}
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3514 u32 *parsing_data, u32 xmit_type)
3515{
3516 *parsing_data |=
3517 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3518 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3519 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3520
3521 if (xmit_type & XMIT_CSUM_TCP) {
3522 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3523 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3524 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3525
3526 return skb_inner_transport_header(skb) +
3527 inner_tcp_hdrlen(skb) - skb->data;
3528 }
3529
3530
3531
3532
3533 return skb_inner_transport_header(skb) +
3534 sizeof(struct udphdr) - skb->data;
3535}
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3548 u32 *parsing_data, u32 xmit_type)
3549{
3550 *parsing_data |=
3551 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3552 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3553 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3554
3555 if (xmit_type & XMIT_CSUM_TCP) {
3556 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3557 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3558 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3559
3560 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3561 }
3562
3563
3564
3565 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3566}
3567
3568
3569static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3570 struct eth_tx_start_bd *tx_start_bd,
3571 u32 xmit_type)
3572{
3573 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3574
3575 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3576 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3577
3578 if (!(xmit_type & XMIT_CSUM_TCP))
3579 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3580}
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3591 struct eth_tx_parse_bd_e1x *pbd,
3592 u32 xmit_type)
3593{
3594 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3595
3596
3597 pbd->global_data =
3598 cpu_to_le16(hlen |
3599 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3600 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3601
3602 pbd->ip_hlen_w = (skb_transport_header(skb) -
3603 skb_network_header(skb)) >> 1;
3604
3605 hlen += pbd->ip_hlen_w;
3606
3607
3608 if (xmit_type & XMIT_CSUM_TCP)
3609 hlen += tcp_hdrlen(skb) / 2;
3610 else
3611 hlen += sizeof(struct udphdr) / 2;
3612
3613 pbd->total_hlen_w = cpu_to_le16(hlen);
3614 hlen = hlen*2;
3615
3616 if (xmit_type & XMIT_CSUM_TCP) {
3617 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3618
3619 } else {
3620 s8 fix = SKB_CS_OFF(skb);
3621
3622 DP(NETIF_MSG_TX_QUEUED,
3623 "hlen %d fix %d csum before fix %x\n",
3624 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3625
3626
3627 pbd->tcp_pseudo_csum =
3628 bnx2x_csum_fix(skb_transport_header(skb),
3629 SKB_CS(skb), fix);
3630
3631 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3632 pbd->tcp_pseudo_csum);
3633 }
3634
3635 return hlen;
3636}
3637
3638static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3639 struct eth_tx_parse_bd_e2 *pbd_e2,
3640 struct eth_tx_parse_2nd_bd *pbd2,
3641 u16 *global_data,
3642 u32 xmit_type)
3643{
3644 u16 hlen_w = 0;
3645 u8 outerip_off, outerip_len = 0;
3646
3647
3648 hlen_w = (skb_inner_transport_header(skb) -
3649 skb_network_header(skb)) >> 1;
3650
3651
3652 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3653
3654 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3655
3656
3657 if (xmit_type & XMIT_CSUM_V4) {
3658 struct iphdr *iph = ip_hdr(skb);
3659 u32 csum = (__force u32)(~iph->check) -
3660 (__force u32)iph->tot_len -
3661 (__force u32)iph->frag_off;
3662
3663 outerip_len = iph->ihl << 1;
3664
3665 pbd2->fw_ip_csum_wo_len_flags_frag =
3666 bswab16(csum_fold((__force __wsum)csum));
3667 } else {
3668 pbd2->fw_ip_hdr_to_payload_w =
3669 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3670 pbd_e2->data.tunnel_data.flags |=
3671 ETH_TUNNEL_DATA_IPV6_OUTER;
3672 }
3673
3674 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3675
3676 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3677
3678
3679 if (xmit_type & XMIT_CSUM_ENC_V4) {
3680 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3681
3682 pbd_e2->data.tunnel_data.pseudo_csum =
3683 bswab16(~csum_tcpudp_magic(
3684 inner_ip_hdr(skb)->saddr,
3685 inner_ip_hdr(skb)->daddr,
3686 0, IPPROTO_TCP, 0));
3687 } else {
3688 pbd_e2->data.tunnel_data.pseudo_csum =
3689 bswab16(~csum_ipv6_magic(
3690 &inner_ipv6_hdr(skb)->saddr,
3691 &inner_ipv6_hdr(skb)->daddr,
3692 0, IPPROTO_TCP, 0));
3693 }
3694
3695 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3696
3697 *global_data |=
3698 outerip_off |
3699 (outerip_len <<
3700 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3701 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3702 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3703
3704 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3705 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3706 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3707 }
3708}
3709
3710static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3711 u32 xmit_type)
3712{
3713 struct ipv6hdr *ipv6;
3714
3715 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3716 return;
3717
3718 if (xmit_type & XMIT_GSO_ENC_V6)
3719 ipv6 = inner_ipv6_hdr(skb);
3720 else
3721 ipv6 = ipv6_hdr(skb);
3722
3723 if (ipv6->nexthdr == NEXTHDR_IPV6)
3724 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3725}
3726
3727
3728
3729
3730
3731netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3732{
3733 struct bnx2x *bp = netdev_priv(dev);
3734
3735 struct netdev_queue *txq;
3736 struct bnx2x_fp_txdata *txdata;
3737 struct sw_tx_bd *tx_buf;
3738 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3739 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3740 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3741 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3742 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3743 u32 pbd_e2_parsing_data = 0;
3744 u16 pkt_prod, bd_prod;
3745 int nbd, txq_index;
3746 dma_addr_t mapping;
3747 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3748 int i;
3749 u8 hlen = 0;
3750 __le16 pkt_size = 0;
3751 struct ethhdr *eth;
3752 u8 mac_type = UNICAST_ADDRESS;
3753
3754#ifdef BNX2X_STOP_ON_ERROR
3755 if (unlikely(bp->panic))
3756 return NETDEV_TX_BUSY;
3757#endif
3758
3759 txq_index = skb_get_queue_mapping(skb);
3760 txq = netdev_get_tx_queue(dev, txq_index);
3761
3762 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3763
3764 txdata = &bp->bnx2x_txq[txq_index];
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3776 skb_shinfo(skb)->nr_frags +
3777 BDS_PER_TX_PKT +
3778 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3779
3780 if (txdata->tx_ring_size == 0) {
3781 struct bnx2x_eth_q_stats *q_stats =
3782 bnx2x_fp_qstats(bp, txdata->parent_fp);
3783 q_stats->driver_filtered_tx_pkt++;
3784 dev_kfree_skb(skb);
3785 return NETDEV_TX_OK;
3786 }
3787 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3788 netif_tx_stop_queue(txq);
3789 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3790
3791 return NETDEV_TX_BUSY;
3792 }
3793
3794 DP(NETIF_MSG_TX_QUEUED,
3795 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3796 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3797 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3798 skb->len);
3799
3800 eth = (struct ethhdr *)skb->data;
3801
3802
3803 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3804 if (is_broadcast_ether_addr(eth->h_dest))
3805 mac_type = BROADCAST_ADDRESS;
3806 else
3807 mac_type = MULTICAST_ADDRESS;
3808 }
3809
3810#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3811
3812
3813
3814 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3815
3816 bp->lin_cnt++;
3817 if (skb_linearize(skb) != 0) {
3818 DP(NETIF_MSG_TX_QUEUED,
3819 "SKB linearization failed - silently dropping this SKB\n");
3820 dev_kfree_skb_any(skb);
3821 return NETDEV_TX_OK;
3822 }
3823 }
3824#endif
3825
3826 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3827 skb_headlen(skb), DMA_TO_DEVICE);
3828 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3829 DP(NETIF_MSG_TX_QUEUED,
3830 "SKB mapping failed - silently dropping this SKB\n");
3831 dev_kfree_skb_any(skb);
3832 return NETDEV_TX_OK;
3833 }
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846 pkt_prod = txdata->tx_pkt_prod;
3847 bd_prod = TX_BD(txdata->tx_bd_prod);
3848
3849
3850
3851
3852
3853 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3854 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3855 first_bd = tx_start_bd;
3856
3857 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3858
3859 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3860 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3861 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3862 } else if (bp->ptp_tx_skb) {
3863 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3864 } else {
3865 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3866
3867 bp->ptp_tx_skb = skb_get(skb);
3868 bp->ptp_tx_start = jiffies;
3869 schedule_work(&bp->ptp_task);
3870 }
3871 }
3872
3873
3874 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3875
3876
3877 tx_buf->first_bd = txdata->tx_bd_prod;
3878 tx_buf->skb = skb;
3879 tx_buf->flags = 0;
3880
3881 DP(NETIF_MSG_TX_QUEUED,
3882 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3883 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3884
3885 if (skb_vlan_tag_present(skb)) {
3886 tx_start_bd->vlan_or_ethertype =
3887 cpu_to_le16(skb_vlan_tag_get(skb));
3888 tx_start_bd->bd_flags.as_bitfield |=
3889 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3890 } else {
3891
3892
3893
3894 u16 vlan_tci = 0;
3895#ifndef BNX2X_STOP_ON_ERROR
3896 if (IS_VF(bp)) {
3897#endif
3898
3899 if (__vlan_get_tag(skb, &vlan_tci)) {
3900 tx_start_bd->vlan_or_ethertype =
3901 cpu_to_le16(ntohs(eth->h_proto));
3902 } else {
3903 tx_start_bd->bd_flags.as_bitfield |=
3904 (X_ETH_INBAND_VLAN <<
3905 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3906 tx_start_bd->vlan_or_ethertype =
3907 cpu_to_le16(vlan_tci);
3908 }
3909#ifndef BNX2X_STOP_ON_ERROR
3910 } else {
3911
3912 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3913 }
3914#endif
3915 }
3916
3917 nbd = 2;
3918
3919
3920 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3921
3922 if (xmit_type & XMIT_CSUM)
3923 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3924
3925 if (!CHIP_IS_E1x(bp)) {
3926 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3927 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3928
3929 if (xmit_type & XMIT_CSUM_ENC) {
3930 u16 global_data = 0;
3931
3932
3933 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3934 &pbd_e2_parsing_data,
3935 xmit_type);
3936
3937
3938 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3939
3940 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3941
3942 memset(pbd2, 0, sizeof(*pbd2));
3943
3944 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3945 (skb_inner_network_header(skb) -
3946 skb->data) >> 1;
3947
3948 if (xmit_type & XMIT_GSO_ENC)
3949 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3950 &global_data,
3951 xmit_type);
3952
3953 pbd2->global_data = cpu_to_le16(global_data);
3954
3955
3956 SET_FLAG(tx_start_bd->general_data,
3957 ETH_TX_START_BD_PARSE_NBDS, 1);
3958
3959 SET_FLAG(tx_start_bd->general_data,
3960 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3961
3962 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3963
3964 nbd++;
3965 } else if (xmit_type & XMIT_CSUM) {
3966
3967 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3968 &pbd_e2_parsing_data,
3969 xmit_type);
3970 }
3971
3972 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3973
3974
3975
3976 if (IS_VF(bp)) {
3977
3978 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3979 &pbd_e2->data.mac_addr.src_mid,
3980 &pbd_e2->data.mac_addr.src_lo,
3981 eth->h_source);
3982
3983 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3984 &pbd_e2->data.mac_addr.dst_mid,
3985 &pbd_e2->data.mac_addr.dst_lo,
3986 eth->h_dest);
3987 } else {
3988 if (bp->flags & TX_SWITCHING)
3989 bnx2x_set_fw_mac_addr(
3990 &pbd_e2->data.mac_addr.dst_hi,
3991 &pbd_e2->data.mac_addr.dst_mid,
3992 &pbd_e2->data.mac_addr.dst_lo,
3993 eth->h_dest);
3994#ifdef BNX2X_STOP_ON_ERROR
3995
3996
3997
3998 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3999 &pbd_e2->data.mac_addr.src_mid,
4000 &pbd_e2->data.mac_addr.src_lo,
4001 eth->h_source);
4002#endif
4003 }
4004
4005 SET_FLAG(pbd_e2_parsing_data,
4006 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4007 } else {
4008 u16 global_data = 0;
4009 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4010 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4011
4012 if (xmit_type & XMIT_CSUM)
4013 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4014
4015 SET_FLAG(global_data,
4016 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4017 pbd_e1x->global_data |= cpu_to_le16(global_data);
4018 }
4019
4020
4021 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4022 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4023 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4024 pkt_size = tx_start_bd->nbytes;
4025
4026 DP(NETIF_MSG_TX_QUEUED,
4027 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4028 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4029 le16_to_cpu(tx_start_bd->nbytes),
4030 tx_start_bd->bd_flags.as_bitfield,
4031 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4032
4033 if (xmit_type & XMIT_GSO) {
4034
4035 DP(NETIF_MSG_TX_QUEUED,
4036 "TSO packet len %d hlen %d total len %d tso size %d\n",
4037 skb->len, hlen, skb_headlen(skb),
4038 skb_shinfo(skb)->gso_size);
4039
4040 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4041
4042 if (unlikely(skb_headlen(skb) > hlen)) {
4043 nbd++;
4044 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4045 &tx_start_bd, hlen,
4046 bd_prod);
4047 }
4048 if (!CHIP_IS_E1x(bp))
4049 pbd_e2_parsing_data |=
4050 (skb_shinfo(skb)->gso_size <<
4051 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4052 ETH_TX_PARSE_BD_E2_LSO_MSS;
4053 else
4054 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4055 }
4056
4057
4058
4059
4060 if (pbd_e2_parsing_data)
4061 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4062
4063 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4064
4065
4066 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4067 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4068
4069 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4070 skb_frag_size(frag), DMA_TO_DEVICE);
4071 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4072 unsigned int pkts_compl = 0, bytes_compl = 0;
4073
4074 DP(NETIF_MSG_TX_QUEUED,
4075 "Unable to map page - dropping packet...\n");
4076
4077
4078
4079
4080
4081
4082 first_bd->nbd = cpu_to_le16(nbd);
4083 bnx2x_free_tx_pkt(bp, txdata,
4084 TX_BD(txdata->tx_pkt_prod),
4085 &pkts_compl, &bytes_compl);
4086 return NETDEV_TX_OK;
4087 }
4088
4089 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4090 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4091 if (total_pkt_bd == NULL)
4092 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4093
4094 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4095 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4096 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4097 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4098 nbd++;
4099
4100 DP(NETIF_MSG_TX_QUEUED,
4101 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4102 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4103 le16_to_cpu(tx_data_bd->nbytes));
4104 }
4105
4106 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4107
4108
4109 first_bd->nbd = cpu_to_le16(nbd);
4110
4111 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4112
4113
4114
4115
4116 if (TX_BD_POFF(bd_prod) < nbd)
4117 nbd++;
4118
4119
4120
4121
4122
4123
4124
4125
4126 if (total_pkt_bd != NULL)
4127 total_pkt_bd->total_pkt_bytes = pkt_size;
4128
4129 if (pbd_e1x)
4130 DP(NETIF_MSG_TX_QUEUED,
4131 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4132 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4133 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4134 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4135 le16_to_cpu(pbd_e1x->total_hlen_w));
4136 if (pbd_e2)
4137 DP(NETIF_MSG_TX_QUEUED,
4138 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4139 pbd_e2,
4140 pbd_e2->data.mac_addr.dst_hi,
4141 pbd_e2->data.mac_addr.dst_mid,
4142 pbd_e2->data.mac_addr.dst_lo,
4143 pbd_e2->data.mac_addr.src_hi,
4144 pbd_e2->data.mac_addr.src_mid,
4145 pbd_e2->data.mac_addr.src_lo,
4146 pbd_e2->parsing_data);
4147 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4148
4149 netdev_tx_sent_queue(txq, skb->len);
4150
4151 skb_tx_timestamp(skb);
4152
4153 txdata->tx_pkt_prod++;
4154
4155
4156
4157
4158
4159
4160
4161 wmb();
4162
4163 txdata->tx_db.data.prod += nbd;
4164
4165 wmb();
4166
4167 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4168
4169 mmiowb();
4170
4171 txdata->tx_bd_prod += nbd;
4172
4173 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4174 netif_tx_stop_queue(txq);
4175
4176
4177
4178
4179 smp_mb();
4180
4181 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4182 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4183 netif_tx_wake_queue(txq);
4184 }
4185 txdata->tx_pkt++;
4186
4187 return NETDEV_TX_OK;
4188}
4189
4190void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4191{
4192 int mfw_vn = BP_FW_MB_IDX(bp);
4193 u32 tmp;
4194
4195
4196 if (!IS_MF_BD(bp)) {
4197 int i;
4198
4199 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4200 c2s_map[i] = i;
4201 *c2s_default = 0;
4202
4203 return;
4204 }
4205
4206 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4207 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4208 c2s_map[0] = tmp & 0xff;
4209 c2s_map[1] = (tmp >> 8) & 0xff;
4210 c2s_map[2] = (tmp >> 16) & 0xff;
4211 c2s_map[3] = (tmp >> 24) & 0xff;
4212
4213 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4214 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4215 c2s_map[4] = tmp & 0xff;
4216 c2s_map[5] = (tmp >> 8) & 0xff;
4217 c2s_map[6] = (tmp >> 16) & 0xff;
4218 c2s_map[7] = (tmp >> 24) & 0xff;
4219
4220 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4221 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4222 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4223}
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4234{
4235 struct bnx2x *bp = netdev_priv(dev);
4236 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4237 int cos, prio, count, offset;
4238
4239
4240 ASSERT_RTNL();
4241
4242
4243 if (!num_tc) {
4244 netdev_reset_tc(dev);
4245 return 0;
4246 }
4247
4248
4249 if (num_tc > bp->max_cos) {
4250 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4251 num_tc, bp->max_cos);
4252 return -EINVAL;
4253 }
4254
4255
4256 if (netdev_set_num_tc(dev, num_tc)) {
4257 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4258 return -EINVAL;
4259 }
4260
4261 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4262
4263
4264 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4265 int outer_prio = c2s_map[prio];
4266
4267 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4268 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4269 "mapping priority %d to tc %d\n",
4270 outer_prio, bp->prio_to_cos[outer_prio]);
4271 }
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284 for (cos = 0; cos < bp->max_cos; cos++) {
4285 count = BNX2X_NUM_ETH_QUEUES(bp);
4286 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4287 netdev_set_tc_queue(dev, cos, count, offset);
4288 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4289 "mapping tc %d to offset %d count %d\n",
4290 cos, offset, count);
4291 }
4292
4293 return 0;
4294}
4295
4296int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4297 void *type_data)
4298{
4299 struct tc_mqprio_qopt *mqprio = type_data;
4300
4301 if (type != TC_SETUP_QDISC_MQPRIO)
4302 return -EOPNOTSUPP;
4303
4304 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4305
4306 return bnx2x_setup_tc(dev, mqprio->num_tc);
4307}
4308
4309
4310int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4311{
4312 struct sockaddr *addr = p;
4313 struct bnx2x *bp = netdev_priv(dev);
4314 int rc = 0;
4315
4316 if (!is_valid_ether_addr(addr->sa_data)) {
4317 BNX2X_ERR("Requested MAC address is not valid\n");
4318 return -EINVAL;
4319 }
4320
4321 if (IS_MF_STORAGE_ONLY(bp)) {
4322 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4323 return -EINVAL;
4324 }
4325
4326 if (netif_running(dev)) {
4327 rc = bnx2x_set_eth_mac(bp, false);
4328 if (rc)
4329 return rc;
4330 }
4331
4332 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4333
4334 if (netif_running(dev))
4335 rc = bnx2x_set_eth_mac(bp, true);
4336
4337 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4338 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4339
4340 return rc;
4341}
4342
4343static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4344{
4345 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4346 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4347 u8 cos;
4348
4349
4350
4351 if (IS_FCOE_IDX(fp_index)) {
4352 memset(sb, 0, sizeof(union host_hc_status_block));
4353 fp->status_blk_mapping = 0;
4354 } else {
4355
4356 if (!CHIP_IS_E1x(bp))
4357 BNX2X_PCI_FREE(sb->e2_sb,
4358 bnx2x_fp(bp, fp_index,
4359 status_blk_mapping),
4360 sizeof(struct host_hc_status_block_e2));
4361 else
4362 BNX2X_PCI_FREE(sb->e1x_sb,
4363 bnx2x_fp(bp, fp_index,
4364 status_blk_mapping),
4365 sizeof(struct host_hc_status_block_e1x));
4366 }
4367
4368
4369 if (!skip_rx_queue(bp, fp_index)) {
4370 bnx2x_free_rx_bds(fp);
4371
4372
4373 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4374 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4375 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4376 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4377
4378 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4379 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4380 sizeof(struct eth_fast_path_rx_cqe) *
4381 NUM_RCQ_BD);
4382
4383
4384 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4385 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4386 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4387 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4388 }
4389
4390
4391 if (!skip_tx_queue(bp, fp_index)) {
4392
4393 for_each_cos_in_tx_queue(fp, cos) {
4394 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4395
4396 DP(NETIF_MSG_IFDOWN,
4397 "freeing tx memory of fp %d cos %d cid %d\n",
4398 fp_index, cos, txdata->cid);
4399
4400 BNX2X_FREE(txdata->tx_buf_ring);
4401 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4402 txdata->tx_desc_mapping,
4403 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4404 }
4405 }
4406
4407}
4408
4409static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4410{
4411 int i;
4412 for_each_cnic_queue(bp, i)
4413 bnx2x_free_fp_mem_at(bp, i);
4414}
4415
4416void bnx2x_free_fp_mem(struct bnx2x *bp)
4417{
4418 int i;
4419 for_each_eth_queue(bp, i)
4420 bnx2x_free_fp_mem_at(bp, i);
4421}
4422
4423static void set_sb_shortcuts(struct bnx2x *bp, int index)
4424{
4425 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4426 if (!CHIP_IS_E1x(bp)) {
4427 bnx2x_fp(bp, index, sb_index_values) =
4428 (__le16 *)status_blk.e2_sb->sb.index_values;
4429 bnx2x_fp(bp, index, sb_running_index) =
4430 (__le16 *)status_blk.e2_sb->sb.running_index;
4431 } else {
4432 bnx2x_fp(bp, index, sb_index_values) =
4433 (__le16 *)status_blk.e1x_sb->sb.index_values;
4434 bnx2x_fp(bp, index, sb_running_index) =
4435 (__le16 *)status_blk.e1x_sb->sb.running_index;
4436 }
4437}
4438
4439
4440static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4441 int rx_ring_size)
4442{
4443 struct bnx2x *bp = fp->bp;
4444 u16 ring_prod, cqe_ring_prod;
4445 int i, failure_cnt = 0;
4446
4447 fp->rx_comp_cons = 0;
4448 cqe_ring_prod = ring_prod = 0;
4449
4450
4451
4452
4453 for (i = 0; i < rx_ring_size; i++) {
4454 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4455 failure_cnt++;
4456 continue;
4457 }
4458 ring_prod = NEXT_RX_IDX(ring_prod);
4459 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4460 WARN_ON(ring_prod <= (i - failure_cnt));
4461 }
4462
4463 if (failure_cnt)
4464 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4465 i - failure_cnt, fp->index);
4466
4467 fp->rx_bd_prod = ring_prod;
4468
4469 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4470 cqe_ring_prod);
4471
4472 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4473
4474 return i - failure_cnt;
4475}
4476
4477static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4478{
4479 int i;
4480
4481 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4482 struct eth_rx_cqe_next_page *nextpg;
4483
4484 nextpg = (struct eth_rx_cqe_next_page *)
4485 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4486 nextpg->addr_hi =
4487 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4488 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4489 nextpg->addr_lo =
4490 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4491 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4492 }
4493}
4494
4495static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4496{
4497 union host_hc_status_block *sb;
4498 struct bnx2x_fastpath *fp = &bp->fp[index];
4499 int ring_size = 0;
4500 u8 cos;
4501 int rx_ring_size = 0;
4502
4503 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4504 rx_ring_size = MIN_RX_SIZE_NONTPA;
4505 bp->rx_ring_size = rx_ring_size;
4506 } else if (!bp->rx_ring_size) {
4507 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4508
4509 if (CHIP_IS_E3(bp)) {
4510 u32 cfg = SHMEM_RD(bp,
4511 dev_info.port_hw_config[BP_PORT(bp)].
4512 default_cfg);
4513
4514
4515 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4516 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4517 rx_ring_size /= 10;
4518 }
4519
4520
4521 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4522 MIN_RX_SIZE_TPA, rx_ring_size);
4523
4524 bp->rx_ring_size = rx_ring_size;
4525 } else
4526 rx_ring_size = bp->rx_ring_size;
4527
4528 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4529
4530
4531 sb = &bnx2x_fp(bp, index, status_blk);
4532
4533 if (!IS_FCOE_IDX(index)) {
4534
4535 if (!CHIP_IS_E1x(bp)) {
4536 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4537 sizeof(struct host_hc_status_block_e2));
4538 if (!sb->e2_sb)
4539 goto alloc_mem_err;
4540 } else {
4541 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4542 sizeof(struct host_hc_status_block_e1x));
4543 if (!sb->e1x_sb)
4544 goto alloc_mem_err;
4545 }
4546 }
4547
4548
4549
4550
4551 if (!IS_FCOE_IDX(index))
4552 set_sb_shortcuts(bp, index);
4553
4554
4555 if (!skip_tx_queue(bp, index)) {
4556
4557 for_each_cos_in_tx_queue(fp, cos) {
4558 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4559
4560 DP(NETIF_MSG_IFUP,
4561 "allocating tx memory of fp %d cos %d\n",
4562 index, cos);
4563
4564 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4565 sizeof(struct sw_tx_bd),
4566 GFP_KERNEL);
4567 if (!txdata->tx_buf_ring)
4568 goto alloc_mem_err;
4569 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4570 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4571 if (!txdata->tx_desc_ring)
4572 goto alloc_mem_err;
4573 }
4574 }
4575
4576
4577 if (!skip_rx_queue(bp, index)) {
4578
4579 bnx2x_fp(bp, index, rx_buf_ring) =
4580 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4581 if (!bnx2x_fp(bp, index, rx_buf_ring))
4582 goto alloc_mem_err;
4583 bnx2x_fp(bp, index, rx_desc_ring) =
4584 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4585 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4586 if (!bnx2x_fp(bp, index, rx_desc_ring))
4587 goto alloc_mem_err;
4588
4589
4590 bnx2x_fp(bp, index, rx_comp_ring) =
4591 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4592 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4593 if (!bnx2x_fp(bp, index, rx_comp_ring))
4594 goto alloc_mem_err;
4595
4596
4597 bnx2x_fp(bp, index, rx_page_ring) =
4598 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4599 GFP_KERNEL);
4600 if (!bnx2x_fp(bp, index, rx_page_ring))
4601 goto alloc_mem_err;
4602 bnx2x_fp(bp, index, rx_sge_ring) =
4603 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4604 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4605 if (!bnx2x_fp(bp, index, rx_sge_ring))
4606 goto alloc_mem_err;
4607
4608 bnx2x_set_next_page_rx_bd(fp);
4609
4610
4611 bnx2x_set_next_page_rx_cq(fp);
4612
4613
4614 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4615 if (ring_size < rx_ring_size)
4616 goto alloc_mem_err;
4617 }
4618
4619 return 0;
4620
4621
4622alloc_mem_err:
4623 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4624 index, ring_size);
4625
4626
4627
4628
4629 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4630 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4631
4632 bnx2x_free_fp_mem_at(bp, index);
4633 return -ENOMEM;
4634 }
4635 return 0;
4636}
4637
4638static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4639{
4640 if (!NO_FCOE(bp))
4641
4642 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4643
4644
4645
4646 return -ENOMEM;
4647
4648 return 0;
4649}
4650
4651static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4652{
4653 int i;
4654
4655
4656
4657
4658
4659
4660 if (bnx2x_alloc_fp_mem_at(bp, 0))
4661 return -ENOMEM;
4662
4663
4664 for_each_nondefault_eth_queue(bp, i)
4665 if (bnx2x_alloc_fp_mem_at(bp, i))
4666 break;
4667
4668
4669 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4670 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4671
4672 WARN_ON(delta < 0);
4673 bnx2x_shrink_eth_fp(bp, delta);
4674 if (CNIC_SUPPORT(bp))
4675
4676
4677
4678
4679
4680
4681 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4682 bp->num_ethernet_queues -= delta;
4683 bp->num_queues = bp->num_ethernet_queues +
4684 bp->num_cnic_queues;
4685 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4686 bp->num_queues + delta, bp->num_queues);
4687 }
4688
4689 return 0;
4690}
4691
4692void bnx2x_free_mem_bp(struct bnx2x *bp)
4693{
4694 int i;
4695
4696 for (i = 0; i < bp->fp_array_size; i++)
4697 kfree(bp->fp[i].tpa_info);
4698 kfree(bp->fp);
4699 kfree(bp->sp_objs);
4700 kfree(bp->fp_stats);
4701 kfree(bp->bnx2x_txq);
4702 kfree(bp->msix_table);
4703 kfree(bp->ilt);
4704}
4705
4706int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4707{
4708 struct bnx2x_fastpath *fp;
4709 struct msix_entry *tbl;
4710 struct bnx2x_ilt *ilt;
4711 int msix_table_size = 0;
4712 int fp_array_size, txq_array_size;
4713 int i;
4714
4715
4716
4717
4718
4719 msix_table_size = bp->igu_sb_cnt;
4720 if (IS_PF(bp))
4721 msix_table_size++;
4722 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4723
4724
4725 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4726 bp->fp_array_size = fp_array_size;
4727 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4728
4729 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4730 if (!fp)
4731 goto alloc_err;
4732 for (i = 0; i < bp->fp_array_size; i++) {
4733 fp[i].tpa_info =
4734 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4735 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4736 if (!(fp[i].tpa_info))
4737 goto alloc_err;
4738 }
4739
4740 bp->fp = fp;
4741
4742
4743 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4744 GFP_KERNEL);
4745 if (!bp->sp_objs)
4746 goto alloc_err;
4747
4748
4749 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4750 GFP_KERNEL);
4751 if (!bp->fp_stats)
4752 goto alloc_err;
4753
4754
4755 txq_array_size =
4756 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4757 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4758
4759 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4760 GFP_KERNEL);
4761 if (!bp->bnx2x_txq)
4762 goto alloc_err;
4763
4764
4765 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4766 if (!tbl)
4767 goto alloc_err;
4768 bp->msix_table = tbl;
4769
4770
4771 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4772 if (!ilt)
4773 goto alloc_err;
4774 bp->ilt = ilt;
4775
4776 return 0;
4777alloc_err:
4778 bnx2x_free_mem_bp(bp);
4779 return -ENOMEM;
4780}
4781
4782int bnx2x_reload_if_running(struct net_device *dev)
4783{
4784 struct bnx2x *bp = netdev_priv(dev);
4785
4786 if (unlikely(!netif_running(dev)))
4787 return 0;
4788
4789 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4790 return bnx2x_nic_load(bp, LOAD_NORMAL);
4791}
4792
4793int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4794{
4795 u32 sel_phy_idx = 0;
4796 if (bp->link_params.num_phys <= 1)
4797 return INT_PHY;
4798
4799 if (bp->link_vars.link_up) {
4800 sel_phy_idx = EXT_PHY1;
4801
4802 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4803 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4804 sel_phy_idx = EXT_PHY2;
4805 } else {
4806
4807 switch (bnx2x_phy_selection(&bp->link_params)) {
4808 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4809 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4810 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4811 sel_phy_idx = EXT_PHY1;
4812 break;
4813 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4814 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4815 sel_phy_idx = EXT_PHY2;
4816 break;
4817 }
4818 }
4819
4820 return sel_phy_idx;
4821}
4822int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4823{
4824 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4825
4826
4827
4828
4829
4830
4831 if (bp->link_params.multi_phy_config &
4832 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4833 if (sel_phy_idx == EXT_PHY1)
4834 sel_phy_idx = EXT_PHY2;
4835 else if (sel_phy_idx == EXT_PHY2)
4836 sel_phy_idx = EXT_PHY1;
4837 }
4838 return LINK_CONFIG_IDX(sel_phy_idx);
4839}
4840
4841#ifdef NETDEV_FCOE_WWNN
4842int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4843{
4844 struct bnx2x *bp = netdev_priv(dev);
4845 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4846
4847 switch (type) {
4848 case NETDEV_FCOE_WWNN:
4849 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4850 cp->fcoe_wwn_node_name_lo);
4851 break;
4852 case NETDEV_FCOE_WWPN:
4853 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4854 cp->fcoe_wwn_port_name_lo);
4855 break;
4856 default:
4857 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4858 return -EINVAL;
4859 }
4860
4861 return 0;
4862}
4863#endif
4864
4865
4866int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4867{
4868 struct bnx2x *bp = netdev_priv(dev);
4869
4870 if (pci_num_vf(bp->pdev)) {
4871 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4872 return -EPERM;
4873 }
4874
4875 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4876 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4877 return -EAGAIN;
4878 }
4879
4880
4881
4882
4883
4884 dev->mtu = new_mtu;
4885
4886 if (!bnx2x_mtu_allows_gro(new_mtu))
4887 dev->features &= ~NETIF_F_GRO_HW;
4888
4889 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4890 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4891
4892 return bnx2x_reload_if_running(dev);
4893}
4894
4895netdev_features_t bnx2x_fix_features(struct net_device *dev,
4896 netdev_features_t features)
4897{
4898 struct bnx2x *bp = netdev_priv(dev);
4899
4900 if (pci_num_vf(bp->pdev)) {
4901 netdev_features_t changed = dev->features ^ features;
4902
4903
4904
4905
4906 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4907 features &= ~NETIF_F_RXCSUM;
4908 features |= dev->features & NETIF_F_RXCSUM;
4909 }
4910
4911 if (changed & NETIF_F_LOOPBACK) {
4912 features &= ~NETIF_F_LOOPBACK;
4913 features |= dev->features & NETIF_F_LOOPBACK;
4914 }
4915 }
4916
4917
4918 if (!(features & NETIF_F_RXCSUM))
4919 features &= ~NETIF_F_LRO;
4920
4921 if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4922 features &= ~NETIF_F_GRO_HW;
4923 if (features & NETIF_F_GRO_HW)
4924 features &= ~NETIF_F_LRO;
4925
4926 return features;
4927}
4928
4929int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4930{
4931 struct bnx2x *bp = netdev_priv(dev);
4932 netdev_features_t changes = features ^ dev->features;
4933 bool bnx2x_reload = false;
4934 int rc;
4935
4936
4937 if (!pci_num_vf(bp->pdev)) {
4938 if (features & NETIF_F_LOOPBACK) {
4939 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4940 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4941 bnx2x_reload = true;
4942 }
4943 } else {
4944 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4945 bp->link_params.loopback_mode = LOOPBACK_NONE;
4946 bnx2x_reload = true;
4947 }
4948 }
4949 }
4950
4951
4952 changes &= ~NETIF_F_GRO;
4953
4954 if (changes)
4955 bnx2x_reload = true;
4956
4957 if (bnx2x_reload) {
4958 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4959 dev->features = features;
4960 rc = bnx2x_reload_if_running(dev);
4961 return rc ? rc : 1;
4962 }
4963
4964 }
4965
4966 return 0;
4967}
4968
4969void bnx2x_tx_timeout(struct net_device *dev)
4970{
4971 struct bnx2x *bp = netdev_priv(dev);
4972
4973
4974
4975
4976 if (!bp->panic)
4977#ifndef BNX2X_STOP_ON_ERROR
4978 bnx2x_panic_dump(bp, false);
4979#else
4980 bnx2x_panic();
4981#endif
4982
4983
4984 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4985}
4986
4987int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4988{
4989 struct net_device *dev = pci_get_drvdata(pdev);
4990 struct bnx2x *bp;
4991
4992 if (!dev) {
4993 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4994 return -ENODEV;
4995 }
4996 bp = netdev_priv(dev);
4997
4998 rtnl_lock();
4999
5000 pci_save_state(pdev);
5001
5002 if (!netif_running(dev)) {
5003 rtnl_unlock();
5004 return 0;
5005 }
5006
5007 netif_device_detach(dev);
5008
5009 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5010
5011 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5012
5013 rtnl_unlock();
5014
5015 return 0;
5016}
5017
5018int bnx2x_resume(struct pci_dev *pdev)
5019{
5020 struct net_device *dev = pci_get_drvdata(pdev);
5021 struct bnx2x *bp;
5022 int rc;
5023
5024 if (!dev) {
5025 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5026 return -ENODEV;
5027 }
5028 bp = netdev_priv(dev);
5029
5030 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5031 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5032 return -EAGAIN;
5033 }
5034
5035 rtnl_lock();
5036
5037 pci_restore_state(pdev);
5038
5039 if (!netif_running(dev)) {
5040 rtnl_unlock();
5041 return 0;
5042 }
5043
5044 bnx2x_set_power_state(bp, PCI_D0);
5045 netif_device_attach(dev);
5046
5047 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5048
5049 rtnl_unlock();
5050
5051 return rc;
5052}
5053
5054void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5055 u32 cid)
5056{
5057 if (!cxt) {
5058 BNX2X_ERR("bad context pointer %p\n", cxt);
5059 return;
5060 }
5061
5062
5063 cxt->ustorm_ag_context.cdu_usage =
5064 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5065 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5066
5067 cxt->xstorm_ag_context.cdu_reserved =
5068 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5069 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5070}
5071
5072static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5073 u8 fw_sb_id, u8 sb_index,
5074 u8 ticks)
5075{
5076 u32 addr = BAR_CSTRORM_INTMEM +
5077 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5078 REG_WR8(bp, addr, ticks);
5079 DP(NETIF_MSG_IFUP,
5080 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5081 port, fw_sb_id, sb_index, ticks);
5082}
5083
5084static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5085 u16 fw_sb_id, u8 sb_index,
5086 u8 disable)
5087{
5088 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5089 u32 addr = BAR_CSTRORM_INTMEM +
5090 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5091 u8 flags = REG_RD8(bp, addr);
5092
5093 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5094 flags |= enable_flag;
5095 REG_WR8(bp, addr, flags);
5096 DP(NETIF_MSG_IFUP,
5097 "port %x fw_sb_id %d sb_index %d disable %d\n",
5098 port, fw_sb_id, sb_index, disable);
5099}
5100
5101void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5102 u8 sb_index, u8 disable, u16 usec)
5103{
5104 int port = BP_PORT(bp);
5105 u8 ticks = usec / BNX2X_BTR;
5106
5107 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5108
5109 disable = disable ? 1 : (usec ? 0 : 1);
5110 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5111}
5112
5113void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5114 u32 verbose)
5115{
5116 smp_mb__before_atomic();
5117 set_bit(flag, &bp->sp_rtnl_state);
5118 smp_mb__after_atomic();
5119 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5120 flag);
5121 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5122}
5123