1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/etherdevice.h>
23#include <linux/if_vlan.h>
24#include <linux/interrupt.h>
25#include <linux/ip.h>
26#include <linux/crash_dump.h>
27#include <net/tcp.h>
28#include <net/ipv6.h>
29#include <net/ip6_checksum.h>
30#include <net/busy_poll.h>
31#include <linux/prefetch.h>
32#include "bnx2x_cmn.h"
33#include "bnx2x_init.h"
34#include "bnx2x_sp.h"
35
36static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42{
43 int i;
44
45
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
49 }
50}
51
52static void bnx2x_add_all_napi(struct bnx2x *bp)
53{
54 int i;
55
56
57 for_each_eth_queue(bp, i) {
58 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
59 bnx2x_poll, NAPI_POLL_WEIGHT);
60 }
61}
62
63static int bnx2x_calc_num_queues(struct bnx2x *bp)
64{
65 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66
67
68 if (is_kdump_kernel())
69 nq = 1;
70
71 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
72 return nq;
73}
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89{
90 struct bnx2x_fastpath *from_fp = &bp->fp[from];
91 struct bnx2x_fastpath *to_fp = &bp->fp[to];
92 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
93 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
94 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
95 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
96 int old_max_eth_txqs, new_max_eth_txqs;
97 int old_txdata_index = 0, new_txdata_index = 0;
98 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99
100
101 from_fp->napi = to_fp->napi;
102
103
104 memcpy(to_fp, from_fp, sizeof(*to_fp));
105 to_fp->index = to;
106
107
108
109
110 to_fp->tpa_info = old_tpa_info;
111
112
113 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114
115
116 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117
118
119
120
121
122
123 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
124 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 (bp)->max_cos;
126 if (from == FCOE_IDX(bp)) {
127 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
129 }
130
131 memcpy(&bp->bnx2x_txq[new_txdata_index],
132 &bp->bnx2x_txq[old_txdata_index],
133 sizeof(struct bnx2x_fp_txdata));
134 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
135}
136
137
138
139
140
141
142
143
144
145void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
146{
147 if (IS_PF(bp)) {
148 u8 phy_fw_ver[PHY_FW_VER_LEN];
149
150 phy_fw_ver[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp->link_params,
152 phy_fw_ver, PHY_FW_VER_LEN);
153 strlcpy(buf, bp->fw_ver, buf_len);
154 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 "bc %d.%d.%d%s%s",
156 (bp->common.bc_ver & 0xff0000) >> 16,
157 (bp->common.bc_ver & 0xff00) >> 8,
158 (bp->common.bc_ver & 0xff),
159 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 } else {
161 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
162 }
163}
164
165
166
167
168
169
170
171static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172{
173 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174
175
176
177
178 for (cos = 1; cos < bp->max_cos; cos++) {
179 for (i = 0; i < old_eth_num - delta; i++) {
180 struct bnx2x_fastpath *fp = &bp->fp[i];
181 int new_idx = cos * (old_eth_num - delta) + i;
182
183 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
184 sizeof(struct bnx2x_fp_txdata));
185 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
186 }
187 }
188}
189
190int bnx2x_load_count[2][3] = { {0} };
191
192
193
194
195static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
196 u16 idx, unsigned int *pkts_compl,
197 unsigned int *bytes_compl)
198{
199 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
200 struct eth_tx_start_bd *tx_start_bd;
201 struct eth_tx_bd *tx_data_bd;
202 struct sk_buff *skb = tx_buf->skb;
203 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 int nbd;
205 u16 split_bd_len = 0;
206
207
208 prefetch(&skb->end);
209
210 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
211 txdata->txq_index, idx, tx_buf, skb);
212
213 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214
215 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
216#ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
218 BNX2X_ERR("BAD nbd!\n");
219 bnx2x_panic();
220 }
221#endif
222 new_cons = nbd + tx_buf->first_bd;
223
224
225 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226
227
228 --nbd;
229 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230
231 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
232
233 --nbd;
234 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
235 }
236
237
238 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
239 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
240 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241 --nbd;
242 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
243 }
244
245
246 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
247 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
248 DMA_TO_DEVICE);
249
250
251 while (nbd > 0) {
252
253 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
254 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
255 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 if (--nbd)
257 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
258 }
259
260
261 WARN_ON(!skb);
262 if (likely(skb)) {
263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len;
265 dev_kfree_skb_any(skb);
266 }
267
268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL;
270
271 return new_cons;
272}
273
274int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275{
276 struct netdev_queue *txq;
277 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
278 unsigned int pkts_compl = 0, bytes_compl = 0;
279
280#ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp->panic))
282 return -1;
283#endif
284
285 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
286 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
287 sw_cons = txdata->tx_pkt_cons;
288
289 while (sw_cons != hw_cons) {
290 u16 pkt_cons;
291
292 pkt_cons = TX_BD(sw_cons);
293
294 DP(NETIF_MSG_TX_DONE,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
296 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
297
298 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
299 &pkts_compl, &bytes_compl);
300
301 sw_cons++;
302 }
303
304 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305
306 txdata->tx_pkt_cons = sw_cons;
307 txdata->tx_bd_cons = bd_cons;
308
309
310
311
312
313
314
315
316
317
318 smp_mb();
319
320 if (unlikely(netif_tx_queue_stopped(txq))) {
321
322
323
324
325
326
327
328
329
330
331 __netif_tx_lock(txq, smp_processor_id());
332
333 if ((netif_tx_queue_stopped(txq)) &&
334 (bp->state == BNX2X_STATE_OPEN) &&
335 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
336 netif_tx_wake_queue(txq);
337
338 __netif_tx_unlock(txq);
339 }
340 return 0;
341}
342
343static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
344 u16 idx)
345{
346 u16 last_max = fp->last_max_sge;
347
348 if (SUB_S16(idx, last_max) > 0)
349 fp->last_max_sge = idx;
350}
351
352static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 u16 sge_len,
354 struct eth_end_agg_rx_cqe *cqe)
355{
356 struct bnx2x *bp = fp->bp;
357 u16 last_max, last_elem, first_elem;
358 u16 delta = 0;
359 u16 i;
360
361 if (!sge_len)
362 return;
363
364
365 for (i = 0; i < sge_len; i++)
366 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
368
369 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
371
372
373 prefetch((void *)(fp->sge_mask));
374 bnx2x_update_last_max_sge(fp,
375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
376
377 last_max = RX_SGE(fp->last_max_sge);
378 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
379 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
380
381
382 if (last_elem + 1 != first_elem)
383 last_elem++;
384
385
386 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
387 if (likely(fp->sge_mask[i]))
388 break;
389
390 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
391 delta += BIT_VEC64_ELEM_SZ;
392 }
393
394 if (delta > 0) {
395 fp->rx_sge_prod += delta;
396
397 bnx2x_clear_sge_mask_next_elems(fp);
398 }
399
400 DP(NETIF_MSG_RX_STATUS,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp->last_max_sge, fp->rx_sge_prod);
403}
404
405
406
407
408static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
409 const struct eth_fast_path_rx_cqe *cqe,
410 enum pkt_hash_types *rxhash_type)
411{
412
413 if ((bp->dev->features & NETIF_F_RXHASH) &&
414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
415 enum eth_rss_hash_type htype;
416
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
418 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
419 (htype == TCP_IPV6_HASH_TYPE)) ?
420 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421
422 return le32_to_cpu(cqe->rss_hash_result);
423 }
424 *rxhash_type = PKT_HASH_TYPE_NONE;
425 return 0;
426}
427
428static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
429 u16 cons, u16 prod,
430 struct eth_fast_path_rx_cqe *cqe)
431{
432 struct bnx2x *bp = fp->bp;
433 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
434 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
435 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 dma_addr_t mapping;
437 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
438 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
439
440
441 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
442 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443
444
445 mapping = dma_map_single(&bp->pdev->dev,
446 first_buf->data + NET_SKB_PAD,
447 fp->rx_buf_size, DMA_FROM_DEVICE);
448
449
450
451
452
453
454 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
455
456 bnx2x_reuse_rx_data(fp, cons, prod);
457 tpa_info->tpa_state = BNX2X_TPA_ERROR;
458 return;
459 }
460
461
462 prod_rx_buf->data = first_buf->data;
463 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
464
465 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
466 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467
468
469 *first_buf = *cons_rx_buf;
470
471
472 tpa_info->parsing_flags =
473 le16_to_cpu(cqe->pars_flags.flags);
474 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
475 tpa_info->tpa_state = BNX2X_TPA_START;
476 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
477 tpa_info->placement_offset = cqe->placement_offset;
478 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
479 if (fp->mode == TPA_MODE_GRO) {
480 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
481 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
482 tpa_info->gro_size = gro_size;
483 }
484
485#ifdef BNX2X_STOP_ON_ERROR
486 fp->tpa_queue_used |= (1 << queue);
487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
488 fp->tpa_queue_used);
489#endif
490}
491
492
493
494
495
496#define TPA_TSTAMP_OPT_LEN 12
497
498
499
500
501
502
503
504
505
506
507
508
509
510static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
511 u16 len_on_bd, unsigned int pkt_len,
512 u16 num_of_coalesced_segs)
513{
514
515
516
517 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518
519 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
520 PRS_FLAG_OVERETH_IPV6) {
521 hdrs_len += sizeof(struct ipv6hdr);
522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 } else {
524 hdrs_len += sizeof(struct iphdr);
525 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
526 }
527
528
529
530
531
532
533 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
534 hdrs_len += TPA_TSTAMP_OPT_LEN;
535
536 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537
538
539
540
541 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
542}
543
544static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
545 u16 index, gfp_t gfp_mask)
546{
547 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
549 struct bnx2x_alloc_pool *pool = &fp->page_pool;
550 dma_addr_t mapping;
551
552 if (!pool->page) {
553 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
554 if (unlikely(!pool->page))
555 return -ENOMEM;
556
557 pool->offset = 0;
558 }
559
560 mapping = dma_map_page(&bp->pdev->dev, pool->page,
561 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
562 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
563 BNX2X_ERR("Can't map sge\n");
564 return -ENOMEM;
565 }
566
567 sw_buf->page = pool->page;
568 sw_buf->offset = pool->offset;
569
570 dma_unmap_addr_set(sw_buf, mapping, mapping);
571
572 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
573 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
574
575 pool->offset += SGE_PAGE_SIZE;
576 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
577 get_page(pool->page);
578 else
579 pool->page = NULL;
580 return 0;
581}
582
583static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
584 struct bnx2x_agg_info *tpa_info,
585 u16 pages,
586 struct sk_buff *skb,
587 struct eth_end_agg_rx_cqe *cqe,
588 u16 cqe_idx)
589{
590 struct sw_rx_page *rx_pg, old_rx_pg;
591 u32 i, frag_len, frag_size;
592 int err, j, frag_id = 0;
593 u16 len_on_bd = tpa_info->len_on_bd;
594 u16 full_page = 0, gro_size = 0;
595
596 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
597
598 if (fp->mode == TPA_MODE_GRO) {
599 gro_size = tpa_info->gro_size;
600 full_page = tpa_info->full_page;
601 }
602
603
604 if (frag_size)
605 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
606 le16_to_cpu(cqe->pkt_len),
607 le16_to_cpu(cqe->num_of_coalesced_segs));
608
609#ifdef BNX2X_STOP_ON_ERROR
610 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
611 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
612 pages, cqe_idx);
613 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
614 bnx2x_panic();
615 return -EINVAL;
616 }
617#endif
618
619
620 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
621 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
622
623
624
625 if (fp->mode == TPA_MODE_GRO)
626 frag_len = min_t(u32, frag_size, (u32)full_page);
627 else
628 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
629
630 rx_pg = &fp->rx_page_ring[sge_idx];
631 old_rx_pg = *rx_pg;
632
633
634
635 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
636 if (unlikely(err)) {
637 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
638 return err;
639 }
640
641 dma_unmap_page(&bp->pdev->dev,
642 dma_unmap_addr(&old_rx_pg, mapping),
643 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
644
645 if (fp->mode == TPA_MODE_LRO)
646 skb_fill_page_desc(skb, j, old_rx_pg.page,
647 old_rx_pg.offset, frag_len);
648 else {
649 int rem;
650 int offset = 0;
651 for (rem = frag_len; rem > 0; rem -= gro_size) {
652 int len = rem > gro_size ? gro_size : rem;
653 skb_fill_page_desc(skb, frag_id++,
654 old_rx_pg.page,
655 old_rx_pg.offset + offset,
656 len);
657 if (offset)
658 get_page(old_rx_pg.page);
659 offset += len;
660 }
661 }
662
663 skb->data_len += frag_len;
664 skb->truesize += SGE_PAGES;
665 skb->len += frag_len;
666
667 frag_size -= frag_len;
668 }
669
670 return 0;
671}
672
673static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
674{
675 if (fp->rx_frag_size)
676 skb_free_frag(data);
677 else
678 kfree(data);
679}
680
681static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
682{
683 if (fp->rx_frag_size) {
684
685 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
686 return (void *)__get_free_page(gfp_mask);
687
688 return netdev_alloc_frag(fp->rx_frag_size);
689 }
690
691 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
692}
693
694#ifdef CONFIG_INET
695static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
696{
697 const struct iphdr *iph = ip_hdr(skb);
698 struct tcphdr *th;
699
700 skb_set_transport_header(skb, sizeof(struct iphdr));
701 th = tcp_hdr(skb);
702
703 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
704 iph->saddr, iph->daddr, 0);
705}
706
707static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
708{
709 struct ipv6hdr *iph = ipv6_hdr(skb);
710 struct tcphdr *th;
711
712 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
713 th = tcp_hdr(skb);
714
715 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
716 &iph->saddr, &iph->daddr, 0);
717}
718
719static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
720 void (*gro_func)(struct bnx2x*, struct sk_buff*))
721{
722 skb_reset_network_header(skb);
723 gro_func(bp, skb);
724 tcp_gro_complete(skb);
725}
726#endif
727
728static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
729 struct sk_buff *skb)
730{
731#ifdef CONFIG_INET
732 if (skb_shinfo(skb)->gso_size) {
733 switch (be16_to_cpu(skb->protocol)) {
734 case ETH_P_IP:
735 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
736 break;
737 case ETH_P_IPV6:
738 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
739 break;
740 default:
741 WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
742 be16_to_cpu(skb->protocol));
743 }
744 }
745#endif
746 skb_record_rx_queue(skb, fp->rx_queue);
747 napi_gro_receive(&fp->napi, skb);
748}
749
750static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751 struct bnx2x_agg_info *tpa_info,
752 u16 pages,
753 struct eth_end_agg_rx_cqe *cqe,
754 u16 cqe_idx)
755{
756 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
757 u8 pad = tpa_info->placement_offset;
758 u16 len = tpa_info->len_on_bd;
759 struct sk_buff *skb = NULL;
760 u8 *new_data, *data = rx_buf->data;
761 u8 old_tpa_state = tpa_info->tpa_state;
762
763 tpa_info->tpa_state = BNX2X_TPA_STOP;
764
765
766
767
768 if (old_tpa_state == BNX2X_TPA_ERROR)
769 goto drop;
770
771
772 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
773
774
775
776 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
777 fp->rx_buf_size, DMA_FROM_DEVICE);
778 if (likely(new_data))
779 skb = build_skb(data, fp->rx_frag_size);
780
781 if (likely(skb)) {
782#ifdef BNX2X_STOP_ON_ERROR
783 if (pad + len > fp->rx_buf_size) {
784 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
785 pad, len, fp->rx_buf_size);
786 bnx2x_panic();
787 return;
788 }
789#endif
790
791 skb_reserve(skb, pad + NET_SKB_PAD);
792 skb_put(skb, len);
793 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
794
795 skb->protocol = eth_type_trans(skb, bp->dev);
796 skb->ip_summed = CHECKSUM_UNNECESSARY;
797
798 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
799 skb, cqe, cqe_idx)) {
800 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
801 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
802 bnx2x_gro_receive(bp, fp, skb);
803 } else {
804 DP(NETIF_MSG_RX_STATUS,
805 "Failed to allocate new pages - dropping packet!\n");
806 dev_kfree_skb_any(skb);
807 }
808
809
810 rx_buf->data = new_data;
811
812 return;
813 }
814 if (new_data)
815 bnx2x_frag_free(fp, new_data);
816drop:
817
818 DP(NETIF_MSG_RX_STATUS,
819 "Failed to allocate or map a new skb - dropping packet!\n");
820 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
821}
822
823static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
824 u16 index, gfp_t gfp_mask)
825{
826 u8 *data;
827 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
828 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
829 dma_addr_t mapping;
830
831 data = bnx2x_frag_alloc(fp, gfp_mask);
832 if (unlikely(data == NULL))
833 return -ENOMEM;
834
835 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
836 fp->rx_buf_size,
837 DMA_FROM_DEVICE);
838 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
839 bnx2x_frag_free(fp, data);
840 BNX2X_ERR("Can't map rx data\n");
841 return -ENOMEM;
842 }
843
844 rx_buf->data = data;
845 dma_unmap_addr_set(rx_buf, mapping, mapping);
846
847 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
848 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
849
850 return 0;
851}
852
853static
854void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
855 struct bnx2x_fastpath *fp,
856 struct bnx2x_eth_q_stats *qstats)
857{
858
859
860
861
862
863 if (cqe->fast_path_cqe.status_flags &
864 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
865 return;
866
867
868
869 if (cqe->fast_path_cqe.type_error_flags &
870 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
871 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
872 qstats->hw_csum_err++;
873 else
874 skb->ip_summed = CHECKSUM_UNNECESSARY;
875}
876
877static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
878{
879 struct bnx2x *bp = fp->bp;
880 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
881 u16 sw_comp_cons, sw_comp_prod;
882 int rx_pkt = 0;
883 union eth_rx_cqe *cqe;
884 struct eth_fast_path_rx_cqe *cqe_fp;
885
886#ifdef BNX2X_STOP_ON_ERROR
887 if (unlikely(bp->panic))
888 return 0;
889#endif
890 if (budget <= 0)
891 return rx_pkt;
892
893 bd_cons = fp->rx_bd_cons;
894 bd_prod = fp->rx_bd_prod;
895 bd_prod_fw = bd_prod;
896 sw_comp_cons = fp->rx_comp_cons;
897 sw_comp_prod = fp->rx_comp_prod;
898
899 comp_ring_cons = RCQ_BD(sw_comp_cons);
900 cqe = &fp->rx_comp_ring[comp_ring_cons];
901 cqe_fp = &cqe->fast_path_cqe;
902
903 DP(NETIF_MSG_RX_STATUS,
904 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
905
906 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
907 struct sw_rx_bd *rx_buf = NULL;
908 struct sk_buff *skb;
909 u8 cqe_fp_flags;
910 enum eth_rx_cqe_type cqe_fp_type;
911 u16 len, pad, queue;
912 u8 *data;
913 u32 rxhash;
914 enum pkt_hash_types rxhash_type;
915
916#ifdef BNX2X_STOP_ON_ERROR
917 if (unlikely(bp->panic))
918 return 0;
919#endif
920
921 bd_prod = RX_BD(bd_prod);
922 bd_cons = RX_BD(bd_cons);
923
924
925
926
927
928
929
930
931
932
933
934 rmb();
935
936 cqe_fp_flags = cqe_fp->type_error_flags;
937 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
938
939 DP(NETIF_MSG_RX_STATUS,
940 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
941 CQE_TYPE(cqe_fp_flags),
942 cqe_fp_flags, cqe_fp->status_flags,
943 le32_to_cpu(cqe_fp->rss_hash_result),
944 le16_to_cpu(cqe_fp->vlan_tag),
945 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
946
947
948 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
949 bnx2x_sp_event(fp, cqe);
950 goto next_cqe;
951 }
952
953 rx_buf = &fp->rx_buf_ring[bd_cons];
954 data = rx_buf->data;
955
956 if (!CQE_TYPE_FAST(cqe_fp_type)) {
957 struct bnx2x_agg_info *tpa_info;
958 u16 frag_size, pages;
959#ifdef BNX2X_STOP_ON_ERROR
960
961 if (fp->mode == TPA_MODE_DISABLED &&
962 (CQE_TYPE_START(cqe_fp_type) ||
963 CQE_TYPE_STOP(cqe_fp_type)))
964 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
965 CQE_TYPE(cqe_fp_type));
966#endif
967
968 if (CQE_TYPE_START(cqe_fp_type)) {
969 u16 queue = cqe_fp->queue_index;
970 DP(NETIF_MSG_RX_STATUS,
971 "calling tpa_start on queue %d\n",
972 queue);
973
974 bnx2x_tpa_start(fp, queue,
975 bd_cons, bd_prod,
976 cqe_fp);
977
978 goto next_rx;
979 }
980 queue = cqe->end_agg_cqe.queue_index;
981 tpa_info = &fp->tpa_info[queue];
982 DP(NETIF_MSG_RX_STATUS,
983 "calling tpa_stop on queue %d\n",
984 queue);
985
986 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
987 tpa_info->len_on_bd;
988
989 if (fp->mode == TPA_MODE_GRO)
990 pages = (frag_size + tpa_info->full_page - 1) /
991 tpa_info->full_page;
992 else
993 pages = SGE_PAGE_ALIGN(frag_size) >>
994 SGE_PAGE_SHIFT;
995
996 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
997 &cqe->end_agg_cqe, comp_ring_cons);
998#ifdef BNX2X_STOP_ON_ERROR
999 if (bp->panic)
1000 return 0;
1001#endif
1002
1003 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1004 goto next_cqe;
1005 }
1006
1007 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1008 pad = cqe_fp->placement_offset;
1009 dma_sync_single_for_cpu(&bp->pdev->dev,
1010 dma_unmap_addr(rx_buf, mapping),
1011 pad + RX_COPY_THRESH,
1012 DMA_FROM_DEVICE);
1013 pad += NET_SKB_PAD;
1014 prefetch(data + pad);
1015
1016 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1017 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1018 "ERROR flags %x rx packet %u\n",
1019 cqe_fp_flags, sw_comp_cons);
1020 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1021 goto reuse_rx;
1022 }
1023
1024
1025
1026
1027 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1028 (len <= RX_COPY_THRESH)) {
1029 skb = napi_alloc_skb(&fp->napi, len);
1030 if (skb == NULL) {
1031 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1032 "ERROR packet dropped because of alloc failure\n");
1033 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1034 goto reuse_rx;
1035 }
1036 memcpy(skb->data, data + pad, len);
1037 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1038 } else {
1039 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1040 GFP_ATOMIC) == 0)) {
1041 dma_unmap_single(&bp->pdev->dev,
1042 dma_unmap_addr(rx_buf, mapping),
1043 fp->rx_buf_size,
1044 DMA_FROM_DEVICE);
1045 skb = build_skb(data, fp->rx_frag_size);
1046 if (unlikely(!skb)) {
1047 bnx2x_frag_free(fp, data);
1048 bnx2x_fp_qstats(bp, fp)->
1049 rx_skb_alloc_failed++;
1050 goto next_rx;
1051 }
1052 skb_reserve(skb, pad);
1053 } else {
1054 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1055 "ERROR packet dropped because of alloc failure\n");
1056 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1057reuse_rx:
1058 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1059 goto next_rx;
1060 }
1061 }
1062
1063 skb_put(skb, len);
1064 skb->protocol = eth_type_trans(skb, bp->dev);
1065
1066
1067 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1068 skb_set_hash(skb, rxhash, rxhash_type);
1069
1070 skb_checksum_none_assert(skb);
1071
1072 if (bp->dev->features & NETIF_F_RXCSUM)
1073 bnx2x_csum_validate(skb, cqe, fp,
1074 bnx2x_fp_qstats(bp, fp));
1075
1076 skb_record_rx_queue(skb, fp->rx_queue);
1077
1078
1079 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1080 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1081 bnx2x_set_rx_ts(bp, skb);
1082
1083 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1084 PARSING_FLAGS_VLAN)
1085 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1086 le16_to_cpu(cqe_fp->vlan_tag));
1087
1088 napi_gro_receive(&fp->napi, skb);
1089next_rx:
1090 rx_buf->data = NULL;
1091
1092 bd_cons = NEXT_RX_IDX(bd_cons);
1093 bd_prod = NEXT_RX_IDX(bd_prod);
1094 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1095 rx_pkt++;
1096next_cqe:
1097 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1098 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1099
1100
1101 BNX2X_SEED_CQE(cqe_fp);
1102
1103 if (rx_pkt == budget)
1104 break;
1105
1106 comp_ring_cons = RCQ_BD(sw_comp_cons);
1107 cqe = &fp->rx_comp_ring[comp_ring_cons];
1108 cqe_fp = &cqe->fast_path_cqe;
1109 }
1110
1111 fp->rx_bd_cons = bd_cons;
1112 fp->rx_bd_prod = bd_prod_fw;
1113 fp->rx_comp_cons = sw_comp_cons;
1114 fp->rx_comp_prod = sw_comp_prod;
1115
1116
1117 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1118 fp->rx_sge_prod);
1119
1120 return rx_pkt;
1121}
1122
1123static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1124{
1125 struct bnx2x_fastpath *fp = fp_cookie;
1126 struct bnx2x *bp = fp->bp;
1127 u8 cos;
1128
1129 DP(NETIF_MSG_INTR,
1130 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1131 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1132
1133 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1134
1135#ifdef BNX2X_STOP_ON_ERROR
1136 if (unlikely(bp->panic))
1137 return IRQ_HANDLED;
1138#endif
1139
1140
1141 for_each_cos_in_tx_queue(fp, cos)
1142 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1143
1144 prefetch(&fp->sb_running_index[SM_RX_ID]);
1145 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1146
1147 return IRQ_HANDLED;
1148}
1149
1150
1151void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1152{
1153 mutex_lock(&bp->port.phy_mutex);
1154
1155 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1156}
1157
1158void bnx2x_release_phy_lock(struct bnx2x *bp)
1159{
1160 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1161
1162 mutex_unlock(&bp->port.phy_mutex);
1163}
1164
1165
1166u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1167{
1168 u16 line_speed = bp->link_vars.line_speed;
1169 if (IS_MF(bp)) {
1170 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1171 bp->mf_config[BP_VN(bp)]);
1172
1173
1174
1175
1176 if (IS_MF_PERCENT_BW(bp))
1177 line_speed = (line_speed * maxCfg) / 100;
1178 else {
1179 u16 vn_max_rate = maxCfg * 100;
1180
1181 if (vn_max_rate < line_speed)
1182 line_speed = vn_max_rate;
1183 }
1184 }
1185
1186 return line_speed;
1187}
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197static void bnx2x_fill_report_data(struct bnx2x *bp,
1198 struct bnx2x_link_report_data *data)
1199{
1200 memset(data, 0, sizeof(*data));
1201
1202 if (IS_PF(bp)) {
1203
1204 data->line_speed = bnx2x_get_mf_speed(bp);
1205
1206
1207 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1208 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1209 &data->link_report_flags);
1210
1211 if (!BNX2X_NUM_ETH_QUEUES(bp))
1212 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1213 &data->link_report_flags);
1214
1215
1216 if (bp->link_vars.duplex == DUPLEX_FULL)
1217 __set_bit(BNX2X_LINK_REPORT_FD,
1218 &data->link_report_flags);
1219
1220
1221 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1222 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1223 &data->link_report_flags);
1224
1225
1226 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1227 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1228 &data->link_report_flags);
1229 } else {
1230 *data = bp->vf_link_vars;
1231 }
1232}
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244void bnx2x_link_report(struct bnx2x *bp)
1245{
1246 bnx2x_acquire_phy_lock(bp);
1247 __bnx2x_link_report(bp);
1248 bnx2x_release_phy_lock(bp);
1249}
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259void __bnx2x_link_report(struct bnx2x *bp)
1260{
1261 struct bnx2x_link_report_data cur_data;
1262
1263
1264 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1265 bnx2x_read_mf_cfg(bp);
1266
1267
1268 bnx2x_fill_report_data(bp, &cur_data);
1269
1270
1271 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1272 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1273 &bp->last_reported_link.link_report_flags) &&
1274 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1275 &cur_data.link_report_flags)))
1276 return;
1277
1278 bp->link_cnt++;
1279
1280
1281
1282
1283 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1284
1285
1286 if (IS_PF(bp))
1287 bnx2x_iov_link_update(bp);
1288
1289 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290 &cur_data.link_report_flags)) {
1291 netif_carrier_off(bp->dev);
1292 netdev_err(bp->dev, "NIC Link is Down\n");
1293 return;
1294 } else {
1295 const char *duplex;
1296 const char *flow;
1297
1298 netif_carrier_on(bp->dev);
1299
1300 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1301 &cur_data.link_report_flags))
1302 duplex = "full";
1303 else
1304 duplex = "half";
1305
1306
1307
1308
1309
1310 if (cur_data.link_report_flags) {
1311 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1312 &cur_data.link_report_flags)) {
1313 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1314 &cur_data.link_report_flags))
1315 flow = "ON - receive & transmit";
1316 else
1317 flow = "ON - receive";
1318 } else {
1319 flow = "ON - transmit";
1320 }
1321 } else {
1322 flow = "none";
1323 }
1324 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1325 cur_data.line_speed, duplex, flow);
1326 }
1327}
1328
1329static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1330{
1331 int i;
1332
1333 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1334 struct eth_rx_sge *sge;
1335
1336 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1337 sge->addr_hi =
1338 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1339 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1340
1341 sge->addr_lo =
1342 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1343 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1344 }
1345}
1346
1347static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1348 struct bnx2x_fastpath *fp, int last)
1349{
1350 int i;
1351
1352 for (i = 0; i < last; i++) {
1353 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1354 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1355 u8 *data = first_buf->data;
1356
1357 if (data == NULL) {
1358 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1359 continue;
1360 }
1361 if (tpa_info->tpa_state == BNX2X_TPA_START)
1362 dma_unmap_single(&bp->pdev->dev,
1363 dma_unmap_addr(first_buf, mapping),
1364 fp->rx_buf_size, DMA_FROM_DEVICE);
1365 bnx2x_frag_free(fp, data);
1366 first_buf->data = NULL;
1367 }
1368}
1369
1370void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1371{
1372 int j;
1373
1374 for_each_rx_queue_cnic(bp, j) {
1375 struct bnx2x_fastpath *fp = &bp->fp[j];
1376
1377 fp->rx_bd_cons = 0;
1378
1379
1380
1381
1382
1383
1384 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1385 fp->rx_sge_prod);
1386 }
1387}
1388
1389void bnx2x_init_rx_rings(struct bnx2x *bp)
1390{
1391 int func = BP_FUNC(bp);
1392 u16 ring_prod;
1393 int i, j;
1394
1395
1396 for_each_eth_queue(bp, j) {
1397 struct bnx2x_fastpath *fp = &bp->fp[j];
1398
1399 DP(NETIF_MSG_IFUP,
1400 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1401
1402 if (fp->mode != TPA_MODE_DISABLED) {
1403
1404 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1405 struct bnx2x_agg_info *tpa_info =
1406 &fp->tpa_info[i];
1407 struct sw_rx_bd *first_buf =
1408 &tpa_info->first_buf;
1409
1410 first_buf->data =
1411 bnx2x_frag_alloc(fp, GFP_KERNEL);
1412 if (!first_buf->data) {
1413 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1414 j);
1415 bnx2x_free_tpa_pool(bp, fp, i);
1416 fp->mode = TPA_MODE_DISABLED;
1417 break;
1418 }
1419 dma_unmap_addr_set(first_buf, mapping, 0);
1420 tpa_info->tpa_state = BNX2X_TPA_STOP;
1421 }
1422
1423
1424 bnx2x_set_next_page_sgl(fp);
1425
1426
1427 bnx2x_init_sge_ring_bit_mask(fp);
1428
1429
1430 for (i = 0, ring_prod = 0;
1431 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1432
1433 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1434 GFP_KERNEL) < 0) {
1435 BNX2X_ERR("was only able to allocate %d rx sges\n",
1436 i);
1437 BNX2X_ERR("disabling TPA for queue[%d]\n",
1438 j);
1439
1440 bnx2x_free_rx_sge_range(bp, fp,
1441 ring_prod);
1442 bnx2x_free_tpa_pool(bp, fp,
1443 MAX_AGG_QS(bp));
1444 fp->mode = TPA_MODE_DISABLED;
1445 ring_prod = 0;
1446 break;
1447 }
1448 ring_prod = NEXT_SGE_IDX(ring_prod);
1449 }
1450
1451 fp->rx_sge_prod = ring_prod;
1452 }
1453 }
1454
1455 for_each_eth_queue(bp, j) {
1456 struct bnx2x_fastpath *fp = &bp->fp[j];
1457
1458 fp->rx_bd_cons = 0;
1459
1460
1461
1462
1463
1464
1465 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1466 fp->rx_sge_prod);
1467
1468 if (j != 0)
1469 continue;
1470
1471 if (CHIP_IS_E1(bp)) {
1472 REG_WR(bp, BAR_USTRORM_INTMEM +
1473 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1474 U64_LO(fp->rx_comp_mapping));
1475 REG_WR(bp, BAR_USTRORM_INTMEM +
1476 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1477 U64_HI(fp->rx_comp_mapping));
1478 }
1479 }
1480}
1481
1482static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1483{
1484 u8 cos;
1485 struct bnx2x *bp = fp->bp;
1486
1487 for_each_cos_in_tx_queue(fp, cos) {
1488 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1489 unsigned pkts_compl = 0, bytes_compl = 0;
1490
1491 u16 sw_prod = txdata->tx_pkt_prod;
1492 u16 sw_cons = txdata->tx_pkt_cons;
1493
1494 while (sw_cons != sw_prod) {
1495 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1496 &pkts_compl, &bytes_compl);
1497 sw_cons++;
1498 }
1499
1500 netdev_tx_reset_queue(
1501 netdev_get_tx_queue(bp->dev,
1502 txdata->txq_index));
1503 }
1504}
1505
1506static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1507{
1508 int i;
1509
1510 for_each_tx_queue_cnic(bp, i) {
1511 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1512 }
1513}
1514
1515static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1516{
1517 int i;
1518
1519 for_each_eth_queue(bp, i) {
1520 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1521 }
1522}
1523
1524static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1525{
1526 struct bnx2x *bp = fp->bp;
1527 int i;
1528
1529
1530 if (fp->rx_buf_ring == NULL)
1531 return;
1532
1533 for (i = 0; i < NUM_RX_BD; i++) {
1534 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1535 u8 *data = rx_buf->data;
1536
1537 if (data == NULL)
1538 continue;
1539 dma_unmap_single(&bp->pdev->dev,
1540 dma_unmap_addr(rx_buf, mapping),
1541 fp->rx_buf_size, DMA_FROM_DEVICE);
1542
1543 rx_buf->data = NULL;
1544 bnx2x_frag_free(fp, data);
1545 }
1546}
1547
1548static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1549{
1550 int j;
1551
1552 for_each_rx_queue_cnic(bp, j) {
1553 bnx2x_free_rx_bds(&bp->fp[j]);
1554 }
1555}
1556
1557static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1558{
1559 int j;
1560
1561 for_each_eth_queue(bp, j) {
1562 struct bnx2x_fastpath *fp = &bp->fp[j];
1563
1564 bnx2x_free_rx_bds(fp);
1565
1566 if (fp->mode != TPA_MODE_DISABLED)
1567 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1568 }
1569}
1570
1571static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1572{
1573 bnx2x_free_tx_skbs_cnic(bp);
1574 bnx2x_free_rx_skbs_cnic(bp);
1575}
1576
1577void bnx2x_free_skbs(struct bnx2x *bp)
1578{
1579 bnx2x_free_tx_skbs(bp);
1580 bnx2x_free_rx_skbs(bp);
1581}
1582
1583void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1584{
1585
1586 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1587
1588 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1589
1590 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1591
1592
1593 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1594 & FUNC_MF_CFG_MAX_BW_MASK;
1595
1596 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1597 }
1598}
1599
1600
1601
1602
1603
1604
1605
1606static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1607{
1608 int i, offset = 0;
1609
1610 if (nvecs == offset)
1611 return;
1612
1613
1614 if (IS_PF(bp)) {
1615 free_irq(bp->msix_table[offset].vector, bp->dev);
1616 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1617 bp->msix_table[offset].vector);
1618 offset++;
1619 }
1620
1621 if (CNIC_SUPPORT(bp)) {
1622 if (nvecs == offset)
1623 return;
1624 offset++;
1625 }
1626
1627 for_each_eth_queue(bp, i) {
1628 if (nvecs == offset)
1629 return;
1630 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1631 i, bp->msix_table[offset].vector);
1632
1633 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1634 }
1635}
1636
1637void bnx2x_free_irq(struct bnx2x *bp)
1638{
1639 if (bp->flags & USING_MSIX_FLAG &&
1640 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1641 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1642
1643
1644 if (IS_PF(bp))
1645 nvecs++;
1646
1647 bnx2x_free_msix_irqs(bp, nvecs);
1648 } else {
1649 free_irq(bp->dev->irq, bp->dev);
1650 }
1651}
1652
1653int bnx2x_enable_msix(struct bnx2x *bp)
1654{
1655 int msix_vec = 0, i, rc;
1656
1657
1658 if (IS_PF(bp)) {
1659 bp->msix_table[msix_vec].entry = msix_vec;
1660 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1661 bp->msix_table[0].entry);
1662 msix_vec++;
1663 }
1664
1665
1666 if (CNIC_SUPPORT(bp)) {
1667 bp->msix_table[msix_vec].entry = msix_vec;
1668 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1669 msix_vec, bp->msix_table[msix_vec].entry);
1670 msix_vec++;
1671 }
1672
1673
1674 for_each_eth_queue(bp, i) {
1675 bp->msix_table[msix_vec].entry = msix_vec;
1676 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1677 msix_vec, msix_vec, i);
1678 msix_vec++;
1679 }
1680
1681 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1682 msix_vec);
1683
1684 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1685 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1686
1687
1688
1689
1690 if (rc == -ENOSPC) {
1691
1692 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1693 if (rc < 0) {
1694 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1695 rc);
1696 goto no_msix;
1697 }
1698
1699 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1700 bp->flags |= USING_SINGLE_MSIX_FLAG;
1701
1702 BNX2X_DEV_INFO("set number of queues to 1\n");
1703 bp->num_ethernet_queues = 1;
1704 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1705 } else if (rc < 0) {
1706 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1707 goto no_msix;
1708 } else if (rc < msix_vec) {
1709
1710 int diff = msix_vec - rc;
1711
1712 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1713
1714
1715
1716
1717 bp->num_ethernet_queues -= diff;
1718 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1719
1720 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1721 bp->num_queues);
1722 }
1723
1724 bp->flags |= USING_MSIX_FLAG;
1725
1726 return 0;
1727
1728no_msix:
1729
1730 if (rc == -ENOMEM)
1731 bp->flags |= DISABLE_MSI_FLAG;
1732
1733 return rc;
1734}
1735
1736static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1737{
1738 int i, rc, offset = 0;
1739
1740
1741 if (IS_PF(bp)) {
1742 rc = request_irq(bp->msix_table[offset++].vector,
1743 bnx2x_msix_sp_int, 0,
1744 bp->dev->name, bp->dev);
1745 if (rc) {
1746 BNX2X_ERR("request sp irq failed\n");
1747 return -EBUSY;
1748 }
1749 }
1750
1751 if (CNIC_SUPPORT(bp))
1752 offset++;
1753
1754 for_each_eth_queue(bp, i) {
1755 struct bnx2x_fastpath *fp = &bp->fp[i];
1756 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1757 bp->dev->name, i);
1758
1759 rc = request_irq(bp->msix_table[offset].vector,
1760 bnx2x_msix_fp_int, 0, fp->name, fp);
1761 if (rc) {
1762 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1763 bp->msix_table[offset].vector, rc);
1764 bnx2x_free_msix_irqs(bp, offset);
1765 return -EBUSY;
1766 }
1767
1768 offset++;
1769 }
1770
1771 i = BNX2X_NUM_ETH_QUEUES(bp);
1772 if (IS_PF(bp)) {
1773 offset = 1 + CNIC_SUPPORT(bp);
1774 netdev_info(bp->dev,
1775 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1776 bp->msix_table[0].vector,
1777 0, bp->msix_table[offset].vector,
1778 i - 1, bp->msix_table[offset + i - 1].vector);
1779 } else {
1780 offset = CNIC_SUPPORT(bp);
1781 netdev_info(bp->dev,
1782 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1783 0, bp->msix_table[offset].vector,
1784 i - 1, bp->msix_table[offset + i - 1].vector);
1785 }
1786 return 0;
1787}
1788
1789int bnx2x_enable_msi(struct bnx2x *bp)
1790{
1791 int rc;
1792
1793 rc = pci_enable_msi(bp->pdev);
1794 if (rc) {
1795 BNX2X_DEV_INFO("MSI is not attainable\n");
1796 return -1;
1797 }
1798 bp->flags |= USING_MSI_FLAG;
1799
1800 return 0;
1801}
1802
1803static int bnx2x_req_irq(struct bnx2x *bp)
1804{
1805 unsigned long flags;
1806 unsigned int irq;
1807
1808 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1809 flags = 0;
1810 else
1811 flags = IRQF_SHARED;
1812
1813 if (bp->flags & USING_MSIX_FLAG)
1814 irq = bp->msix_table[0].vector;
1815 else
1816 irq = bp->pdev->irq;
1817
1818 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1819}
1820
1821static int bnx2x_setup_irqs(struct bnx2x *bp)
1822{
1823 int rc = 0;
1824 if (bp->flags & USING_MSIX_FLAG &&
1825 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1826 rc = bnx2x_req_msix_irqs(bp);
1827 if (rc)
1828 return rc;
1829 } else {
1830 rc = bnx2x_req_irq(bp);
1831 if (rc) {
1832 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1833 return rc;
1834 }
1835 if (bp->flags & USING_MSI_FLAG) {
1836 bp->dev->irq = bp->pdev->irq;
1837 netdev_info(bp->dev, "using MSI IRQ %d\n",
1838 bp->dev->irq);
1839 }
1840 if (bp->flags & USING_MSIX_FLAG) {
1841 bp->dev->irq = bp->msix_table[0].vector;
1842 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1843 bp->dev->irq);
1844 }
1845 }
1846
1847 return 0;
1848}
1849
1850static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1851{
1852 int i;
1853
1854 for_each_rx_queue_cnic(bp, i) {
1855 napi_enable(&bnx2x_fp(bp, i, napi));
1856 }
1857}
1858
1859static void bnx2x_napi_enable(struct bnx2x *bp)
1860{
1861 int i;
1862
1863 for_each_eth_queue(bp, i) {
1864 napi_enable(&bnx2x_fp(bp, i, napi));
1865 }
1866}
1867
1868static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1869{
1870 int i;
1871
1872 for_each_rx_queue_cnic(bp, i) {
1873 napi_disable(&bnx2x_fp(bp, i, napi));
1874 }
1875}
1876
1877static void bnx2x_napi_disable(struct bnx2x *bp)
1878{
1879 int i;
1880
1881 for_each_eth_queue(bp, i) {
1882 napi_disable(&bnx2x_fp(bp, i, napi));
1883 }
1884}
1885
1886void bnx2x_netif_start(struct bnx2x *bp)
1887{
1888 if (netif_running(bp->dev)) {
1889 bnx2x_napi_enable(bp);
1890 if (CNIC_LOADED(bp))
1891 bnx2x_napi_enable_cnic(bp);
1892 bnx2x_int_enable(bp);
1893 if (bp->state == BNX2X_STATE_OPEN)
1894 netif_tx_wake_all_queues(bp->dev);
1895 }
1896}
1897
1898void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1899{
1900 bnx2x_int_disable_sync(bp, disable_hw);
1901 bnx2x_napi_disable(bp);
1902 if (CNIC_LOADED(bp))
1903 bnx2x_napi_disable_cnic(bp);
1904}
1905
1906u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1907 void *accel_priv, select_queue_fallback_t fallback)
1908{
1909 struct bnx2x *bp = netdev_priv(dev);
1910
1911 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1912 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1913 u16 ether_type = ntohs(hdr->h_proto);
1914
1915
1916 if (ether_type == ETH_P_8021Q) {
1917 struct vlan_ethhdr *vhdr =
1918 (struct vlan_ethhdr *)skb->data;
1919
1920 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1921 }
1922
1923
1924 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1925 return bnx2x_fcoe_tx(bp, txq_index);
1926 }
1927
1928
1929 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1930}
1931
1932void bnx2x_set_num_queues(struct bnx2x *bp)
1933{
1934
1935 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1936
1937
1938 if (IS_MF_STORAGE_ONLY(bp))
1939 bp->num_ethernet_queues = 1;
1940
1941
1942 bp->num_cnic_queues = CNIC_SUPPORT(bp);
1943 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1944
1945 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1971{
1972 int rc, tx, rx;
1973
1974 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1975 rx = BNX2X_NUM_ETH_QUEUES(bp);
1976
1977
1978 if (include_cnic && !NO_FCOE(bp)) {
1979 rx++;
1980 tx++;
1981 }
1982
1983 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1984 if (rc) {
1985 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1986 return rc;
1987 }
1988 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1989 if (rc) {
1990 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1991 return rc;
1992 }
1993
1994 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1995 tx, rx);
1996
1997 return rc;
1998}
1999
2000static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2001{
2002 int i;
2003
2004 for_each_queue(bp, i) {
2005 struct bnx2x_fastpath *fp = &bp->fp[i];
2006 u32 mtu;
2007
2008
2009 if (IS_FCOE_IDX(i))
2010
2011
2012
2013
2014
2015
2016 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2017 else
2018 mtu = bp->dev->mtu;
2019 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2020 IP_HEADER_ALIGNMENT_PADDING +
2021 ETH_OVERHEAD +
2022 mtu +
2023 BNX2X_FW_RX_ALIGN_END;
2024 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2025
2026 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2027 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2028 else
2029 fp->rx_frag_size = 0;
2030 }
2031}
2032
2033static int bnx2x_init_rss(struct bnx2x *bp)
2034{
2035 int i;
2036 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2037
2038
2039
2040
2041 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2042 bp->rss_conf_obj.ind_table[i] =
2043 bp->fp->cl_id +
2044 ethtool_rxfh_indir_default(i, num_eth_queues);
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2055}
2056
2057int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2058 bool config_hash, bool enable)
2059{
2060 struct bnx2x_config_rss_params params = {NULL};
2061
2062
2063
2064
2065
2066
2067
2068
2069 params.rss_obj = rss_obj;
2070
2071 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2072
2073 if (enable) {
2074 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2075
2076
2077 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2078 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2079 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2080 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2081 if (rss_obj->udp_rss_v4)
2082 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2083 if (rss_obj->udp_rss_v6)
2084 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2085
2086 if (!CHIP_IS_E1x(bp)) {
2087
2088 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2089 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2090
2091
2092 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2093 }
2094 } else {
2095 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2096 }
2097
2098
2099 params.rss_result_mask = MULTI_MASK;
2100
2101 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2102
2103 if (config_hash) {
2104
2105 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2106 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2107 }
2108
2109 if (IS_PF(bp))
2110 return bnx2x_config_rss(bp, ¶ms);
2111 else
2112 return bnx2x_vfpf_config_rss(bp, ¶ms);
2113}
2114
2115static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2116{
2117 struct bnx2x_func_state_params func_params = {NULL};
2118
2119
2120 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2121
2122 func_params.f_obj = &bp->func_obj;
2123 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2124
2125 func_params.params.hw_init.load_phase = load_code;
2126
2127 return bnx2x_func_state_change(bp, &func_params);
2128}
2129
2130
2131
2132
2133
2134void bnx2x_squeeze_objects(struct bnx2x *bp)
2135{
2136 int rc;
2137 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2138 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2139 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2140
2141
2142
2143
2144 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2145
2146 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2147
2148
2149 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2150 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2151 &ramrod_flags);
2152 if (rc != 0)
2153 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2154
2155
2156 vlan_mac_flags = 0;
2157 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2158 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2159 &ramrod_flags);
2160 if (rc != 0)
2161 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2162
2163
2164 rparam.mcast_obj = &bp->mcast_obj;
2165 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2166
2167
2168
2169
2170
2171 netif_addr_lock_bh(bp->dev);
2172 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2173 if (rc < 0)
2174 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2175 rc);
2176
2177
2178 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2179 while (rc != 0) {
2180 if (rc < 0) {
2181 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2182 rc);
2183 netif_addr_unlock_bh(bp->dev);
2184 return;
2185 }
2186
2187 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2188 }
2189 netif_addr_unlock_bh(bp->dev);
2190}
2191
2192#ifndef BNX2X_STOP_ON_ERROR
2193#define LOAD_ERROR_EXIT(bp, label) \
2194 do { \
2195 (bp)->state = BNX2X_STATE_ERROR; \
2196 goto label; \
2197 } while (0)
2198
2199#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2200 do { \
2201 bp->cnic_loaded = false; \
2202 goto label; \
2203 } while (0)
2204#else
2205#define LOAD_ERROR_EXIT(bp, label) \
2206 do { \
2207 (bp)->state = BNX2X_STATE_ERROR; \
2208 (bp)->panic = 1; \
2209 return -EBUSY; \
2210 } while (0)
2211#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2212 do { \
2213 bp->cnic_loaded = false; \
2214 (bp)->panic = 1; \
2215 return -EBUSY; \
2216 } while (0)
2217#endif
2218
2219static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2220{
2221 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2222 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2223 return;
2224}
2225
2226static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2227{
2228 int num_groups, vf_headroom = 0;
2229 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2230
2231
2232 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2233
2234
2235
2236
2237
2238
2239 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2240
2241
2242
2243
2244
2245
2246 if (IS_SRIOV(bp))
2247 vf_headroom = bnx2x_vf_headroom(bp);
2248
2249
2250
2251
2252
2253
2254 num_groups =
2255 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2256 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2257 1 : 0));
2258
2259 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2260 bp->fw_stats_num, vf_headroom, num_groups);
2261 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2262 num_groups * sizeof(struct stats_query_cmd_group);
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2273 sizeof(struct per_pf_stats) +
2274 sizeof(struct fcoe_statistics_params) +
2275 sizeof(struct per_queue_stats) * num_queue_stats +
2276 sizeof(struct stats_counter);
2277
2278 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2279 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2280 if (!bp->fw_stats)
2281 goto alloc_mem_err;
2282
2283
2284 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2285 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2286 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2287 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2288 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2289 bp->fw_stats_req_sz;
2290
2291 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2292 U64_HI(bp->fw_stats_req_mapping),
2293 U64_LO(bp->fw_stats_req_mapping));
2294 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2295 U64_HI(bp->fw_stats_data_mapping),
2296 U64_LO(bp->fw_stats_data_mapping));
2297 return 0;
2298
2299alloc_mem_err:
2300 bnx2x_free_fw_stats_mem(bp);
2301 BNX2X_ERR("Can't allocate FW stats memory\n");
2302 return -ENOMEM;
2303}
2304
2305
2306static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2307{
2308 u32 param;
2309
2310
2311 bp->fw_seq =
2312 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2313 DRV_MSG_SEQ_NUMBER_MASK);
2314 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2315
2316
2317 bp->fw_drv_pulse_wr_seq =
2318 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2319 DRV_PULSE_SEQ_MASK);
2320 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2321
2322 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2323
2324 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2325 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2326
2327
2328 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2329
2330
2331 if (!(*load_code)) {
2332 BNX2X_ERR("MCP response failure, aborting\n");
2333 return -EBUSY;
2334 }
2335
2336
2337
2338
2339 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2340 BNX2X_ERR("MCP refused load request, aborting\n");
2341 return -EBUSY;
2342 }
2343 return 0;
2344}
2345
2346
2347
2348
2349
2350int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2351{
2352
2353 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2354 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2355
2356 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2357 (BCM_5710_FW_MINOR_VERSION << 8) +
2358 (BCM_5710_FW_REVISION_VERSION << 16) +
2359 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2360
2361
2362 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2363
2364 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2365 loaded_fw, my_fw);
2366
2367
2368 if (my_fw != loaded_fw) {
2369 if (print_err)
2370 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2371 loaded_fw, my_fw);
2372 else
2373 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2374 loaded_fw, my_fw);
2375 return -EBUSY;
2376 }
2377 }
2378 return 0;
2379}
2380
2381
2382static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2383{
2384 int path = BP_PATH(bp);
2385
2386 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2387 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2388 bnx2x_load_count[path][2]);
2389 bnx2x_load_count[path][0]++;
2390 bnx2x_load_count[path][1 + port]++;
2391 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2392 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2393 bnx2x_load_count[path][2]);
2394 if (bnx2x_load_count[path][0] == 1)
2395 return FW_MSG_CODE_DRV_LOAD_COMMON;
2396 else if (bnx2x_load_count[path][1 + port] == 1)
2397 return FW_MSG_CODE_DRV_LOAD_PORT;
2398 else
2399 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2400}
2401
2402
2403static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2404{
2405 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2406 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2407 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2408 bp->port.pmf = 1;
2409
2410
2411
2412
2413 smp_mb();
2414 } else {
2415 bp->port.pmf = 0;
2416 }
2417
2418 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2419}
2420
2421static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2422{
2423 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2424 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2425 (bp->common.shmem2_base)) {
2426 if (SHMEM2_HAS(bp, dcc_support))
2427 SHMEM2_WR(bp, dcc_support,
2428 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2429 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2430 if (SHMEM2_HAS(bp, afex_driver_support))
2431 SHMEM2_WR(bp, afex_driver_support,
2432 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2433 }
2434
2435
2436 bp->afex_def_vlan_tag = -1;
2437}
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2449{
2450 struct bnx2x_fastpath *fp = &bp->fp[index];
2451 int cos;
2452 struct napi_struct orig_napi = fp->napi;
2453 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2454
2455
2456 if (fp->tpa_info)
2457 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2458 sizeof(struct bnx2x_agg_info));
2459 memset(fp, 0, sizeof(*fp));
2460
2461
2462 fp->napi = orig_napi;
2463 fp->tpa_info = orig_tpa_info;
2464 fp->bp = bp;
2465 fp->index = index;
2466 if (IS_ETH_FP(fp))
2467 fp->max_cos = bp->max_cos;
2468 else
2469
2470 fp->max_cos = 1;
2471
2472
2473 if (IS_FCOE_FP(fp))
2474 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2475 if (IS_ETH_FP(fp))
2476 for_each_cos_in_tx_queue(fp, cos)
2477 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2478 BNX2X_NUM_ETH_QUEUES(bp) + index];
2479
2480
2481
2482
2483 if (bp->dev->features & NETIF_F_LRO)
2484 fp->mode = TPA_MODE_LRO;
2485 else if (bp->dev->features & NETIF_F_GRO &&
2486 bnx2x_mtu_allows_gro(bp->dev->mtu))
2487 fp->mode = TPA_MODE_GRO;
2488 else
2489 fp->mode = TPA_MODE_DISABLED;
2490
2491
2492
2493
2494 if (bp->disable_tpa || IS_FCOE_FP(fp))
2495 fp->mode = TPA_MODE_DISABLED;
2496}
2497
2498void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2499{
2500 u32 cur;
2501
2502 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2503 return;
2504
2505 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2506 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2507 cur, state);
2508
2509 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2510}
2511
2512int bnx2x_load_cnic(struct bnx2x *bp)
2513{
2514 int i, rc, port = BP_PORT(bp);
2515
2516 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2517
2518 mutex_init(&bp->cnic_mutex);
2519
2520 if (IS_PF(bp)) {
2521 rc = bnx2x_alloc_mem_cnic(bp);
2522 if (rc) {
2523 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2524 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2525 }
2526 }
2527
2528 rc = bnx2x_alloc_fp_mem_cnic(bp);
2529 if (rc) {
2530 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2531 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2532 }
2533
2534
2535 rc = bnx2x_set_real_num_queues(bp, 1);
2536 if (rc) {
2537 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2538 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2539 }
2540
2541
2542 bnx2x_add_all_napi_cnic(bp);
2543 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2544 bnx2x_napi_enable_cnic(bp);
2545
2546 rc = bnx2x_init_hw_func_cnic(bp);
2547 if (rc)
2548 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2549
2550 bnx2x_nic_init_cnic(bp);
2551
2552 if (IS_PF(bp)) {
2553
2554 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2555
2556
2557 for_each_cnic_queue(bp, i) {
2558 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2559 if (rc) {
2560 BNX2X_ERR("Queue setup failed\n");
2561 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2562 }
2563 }
2564 }
2565
2566
2567 bnx2x_set_rx_mode_inner(bp);
2568
2569
2570 bnx2x_get_iscsi_info(bp);
2571 bnx2x_setup_cnic_irq_info(bp);
2572 bnx2x_setup_cnic_info(bp);
2573 bp->cnic_loaded = true;
2574 if (bp->state == BNX2X_STATE_OPEN)
2575 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2576
2577 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2578
2579 return 0;
2580
2581#ifndef BNX2X_STOP_ON_ERROR
2582load_error_cnic2:
2583
2584 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2585
2586load_error_cnic1:
2587 bnx2x_napi_disable_cnic(bp);
2588
2589 if (bnx2x_set_real_num_queues(bp, 0))
2590 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2591load_error_cnic0:
2592 BNX2X_ERR("CNIC-related load failed\n");
2593 bnx2x_free_fp_mem_cnic(bp);
2594 bnx2x_free_mem_cnic(bp);
2595 return rc;
2596#endif
2597}
2598
2599
2600int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2601{
2602 int port = BP_PORT(bp);
2603 int i, rc = 0, load_code = 0;
2604
2605 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2606 DP(NETIF_MSG_IFUP,
2607 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2608
2609#ifdef BNX2X_STOP_ON_ERROR
2610 if (unlikely(bp->panic)) {
2611 BNX2X_ERR("Can't load NIC when there is panic\n");
2612 return -EPERM;
2613 }
2614#endif
2615
2616 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2617
2618
2619 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2620 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2621 &bp->last_reported_link.link_report_flags);
2622
2623 if (IS_PF(bp))
2624
2625 bnx2x_ilt_set_info(bp);
2626
2627
2628
2629
2630
2631
2632 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2633 for_each_queue(bp, i)
2634 bnx2x_bz_fp(bp, i);
2635 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2636 bp->num_cnic_queues) *
2637 sizeof(struct bnx2x_fp_txdata));
2638
2639 bp->fcoe_init = false;
2640
2641
2642 bnx2x_set_rx_buf_size(bp);
2643
2644 if (IS_PF(bp)) {
2645 rc = bnx2x_alloc_mem(bp);
2646 if (rc) {
2647 BNX2X_ERR("Unable to allocate bp memory\n");
2648 return rc;
2649 }
2650 }
2651
2652
2653
2654
2655 rc = bnx2x_alloc_fp_mem(bp);
2656 if (rc) {
2657 BNX2X_ERR("Unable to allocate memory for fps\n");
2658 LOAD_ERROR_EXIT(bp, load_error0);
2659 }
2660
2661
2662 if (bnx2x_alloc_fw_stats_mem(bp))
2663 LOAD_ERROR_EXIT(bp, load_error0);
2664
2665
2666 if (IS_VF(bp)) {
2667 rc = bnx2x_vfpf_init(bp);
2668 if (rc)
2669 LOAD_ERROR_EXIT(bp, load_error0);
2670 }
2671
2672
2673
2674
2675
2676 rc = bnx2x_set_real_num_queues(bp, 0);
2677 if (rc) {
2678 BNX2X_ERR("Unable to set real_num_queues\n");
2679 LOAD_ERROR_EXIT(bp, load_error0);
2680 }
2681
2682
2683
2684
2685
2686 bnx2x_setup_tc(bp->dev, bp->max_cos);
2687
2688
2689 bnx2x_add_all_napi(bp);
2690 DP(NETIF_MSG_IFUP, "napi added\n");
2691 bnx2x_napi_enable(bp);
2692
2693 if (IS_PF(bp)) {
2694
2695 bnx2x_set_pf_load(bp);
2696
2697
2698 if (!BP_NOMCP(bp)) {
2699
2700 rc = bnx2x_nic_load_request(bp, &load_code);
2701 if (rc)
2702 LOAD_ERROR_EXIT(bp, load_error1);
2703
2704
2705 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2706 if (rc) {
2707 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2708 LOAD_ERROR_EXIT(bp, load_error2);
2709 }
2710 } else {
2711 load_code = bnx2x_nic_load_no_mcp(bp, port);
2712 }
2713
2714
2715 bnx2x_nic_load_pmf(bp, load_code);
2716
2717
2718 bnx2x__init_func_obj(bp);
2719
2720
2721 rc = bnx2x_init_hw(bp, load_code);
2722 if (rc) {
2723 BNX2X_ERR("HW init failed, aborting\n");
2724 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2725 LOAD_ERROR_EXIT(bp, load_error2);
2726 }
2727 }
2728
2729 bnx2x_pre_irq_nic_init(bp);
2730
2731
2732 rc = bnx2x_setup_irqs(bp);
2733 if (rc) {
2734 BNX2X_ERR("setup irqs failed\n");
2735 if (IS_PF(bp))
2736 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2737 LOAD_ERROR_EXIT(bp, load_error2);
2738 }
2739
2740
2741 if (IS_PF(bp)) {
2742
2743 bnx2x_post_irq_nic_init(bp, load_code);
2744
2745 bnx2x_init_bp_objs(bp);
2746 bnx2x_iov_nic_init(bp);
2747
2748
2749 bp->afex_def_vlan_tag = -1;
2750 bnx2x_nic_load_afex_dcc(bp, load_code);
2751 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2752 rc = bnx2x_func_start(bp);
2753 if (rc) {
2754 BNX2X_ERR("Function start failed!\n");
2755 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2756
2757 LOAD_ERROR_EXIT(bp, load_error3);
2758 }
2759
2760
2761 if (!BP_NOMCP(bp)) {
2762 load_code = bnx2x_fw_command(bp,
2763 DRV_MSG_CODE_LOAD_DONE, 0);
2764 if (!load_code) {
2765 BNX2X_ERR("MCP response failure, aborting\n");
2766 rc = -EBUSY;
2767 LOAD_ERROR_EXIT(bp, load_error3);
2768 }
2769 }
2770
2771
2772 bnx2x_update_coalesce(bp);
2773 }
2774
2775
2776 rc = bnx2x_setup_leading(bp);
2777 if (rc) {
2778 BNX2X_ERR("Setup leading failed!\n");
2779 LOAD_ERROR_EXIT(bp, load_error3);
2780 }
2781
2782
2783 for_each_nondefault_eth_queue(bp, i) {
2784 if (IS_PF(bp))
2785 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2786 else
2787 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2788 if (rc) {
2789 BNX2X_ERR("Queue %d setup failed\n", i);
2790 LOAD_ERROR_EXIT(bp, load_error3);
2791 }
2792 }
2793
2794
2795 rc = bnx2x_init_rss(bp);
2796 if (rc) {
2797 BNX2X_ERR("PF RSS init failed\n");
2798 LOAD_ERROR_EXIT(bp, load_error3);
2799 }
2800
2801
2802 bp->state = BNX2X_STATE_OPEN;
2803
2804
2805 if (IS_PF(bp))
2806 rc = bnx2x_set_eth_mac(bp, true);
2807 else
2808 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2809 true);
2810 if (rc) {
2811 BNX2X_ERR("Setting Ethernet MAC failed\n");
2812 LOAD_ERROR_EXIT(bp, load_error3);
2813 }
2814
2815 if (IS_PF(bp) && bp->pending_max) {
2816 bnx2x_update_max_mf_config(bp, bp->pending_max);
2817 bp->pending_max = 0;
2818 }
2819
2820 if (bp->port.pmf) {
2821 rc = bnx2x_initial_phy_init(bp, load_mode);
2822 if (rc)
2823 LOAD_ERROR_EXIT(bp, load_error3);
2824 }
2825 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2826
2827
2828
2829
2830 rc = bnx2x_vlan_reconfigure_vid(bp);
2831 if (rc)
2832 LOAD_ERROR_EXIT(bp, load_error3);
2833
2834
2835 bnx2x_set_rx_mode_inner(bp);
2836
2837 if (bp->flags & PTP_SUPPORTED) {
2838 bnx2x_init_ptp(bp);
2839 bnx2x_configure_ptp_filters(bp);
2840 }
2841
2842 switch (load_mode) {
2843 case LOAD_NORMAL:
2844
2845 netif_tx_wake_all_queues(bp->dev);
2846 break;
2847
2848 case LOAD_OPEN:
2849 netif_tx_start_all_queues(bp->dev);
2850 smp_mb__after_atomic();
2851 break;
2852
2853 case LOAD_DIAG:
2854 case LOAD_LOOPBACK_EXT:
2855 bp->state = BNX2X_STATE_DIAG;
2856 break;
2857
2858 default:
2859 break;
2860 }
2861
2862 if (bp->port.pmf)
2863 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2864 else
2865 bnx2x__link_status_update(bp);
2866
2867
2868 mod_timer(&bp->timer, jiffies + bp->current_interval);
2869
2870 if (CNIC_ENABLED(bp))
2871 bnx2x_load_cnic(bp);
2872
2873 if (IS_PF(bp))
2874 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2875
2876 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2877
2878 u32 val;
2879 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2880 val &= ~DRV_FLAGS_MTU_MASK;
2881 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2882 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2883 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2884 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2885 }
2886
2887
2888 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2889 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2890 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2891 return -EBUSY;
2892 }
2893
2894
2895 if (IS_PF(bp))
2896 bnx2x_update_mfw_dump(bp);
2897
2898
2899 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2900 bnx2x_dcbx_init(bp, false);
2901
2902 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2903 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2904
2905 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2906
2907 return 0;
2908
2909#ifndef BNX2X_STOP_ON_ERROR
2910load_error3:
2911 if (IS_PF(bp)) {
2912 bnx2x_int_disable_sync(bp, 1);
2913
2914
2915 bnx2x_squeeze_objects(bp);
2916 }
2917
2918
2919 bnx2x_free_skbs(bp);
2920 for_each_rx_queue(bp, i)
2921 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2922
2923
2924 bnx2x_free_irq(bp);
2925load_error2:
2926 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2927 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2928 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2929 }
2930
2931 bp->port.pmf = 0;
2932load_error1:
2933 bnx2x_napi_disable(bp);
2934 bnx2x_del_all_napi(bp);
2935
2936
2937 if (IS_PF(bp))
2938 bnx2x_clear_pf_load(bp);
2939load_error0:
2940 bnx2x_free_fw_stats_mem(bp);
2941 bnx2x_free_fp_mem(bp);
2942 bnx2x_free_mem(bp);
2943
2944 return rc;
2945#endif
2946}
2947
2948int bnx2x_drain_tx_queues(struct bnx2x *bp)
2949{
2950 u8 rc = 0, cos, i;
2951
2952
2953 for_each_tx_queue(bp, i) {
2954 struct bnx2x_fastpath *fp = &bp->fp[i];
2955
2956 for_each_cos_in_tx_queue(fp, cos)
2957 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2958 if (rc)
2959 return rc;
2960 }
2961 return 0;
2962}
2963
2964
2965int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2966{
2967 int i;
2968 bool global = false;
2969
2970 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2971
2972 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2973 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2974
2975
2976 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2977 u32 val;
2978 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2979 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2980 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2981 }
2982
2983 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2984 (bp->state == BNX2X_STATE_CLOSED ||
2985 bp->state == BNX2X_STATE_ERROR)) {
2986
2987
2988
2989
2990
2991
2992
2993 bp->recovery_state = BNX2X_RECOVERY_DONE;
2994 bp->is_leader = 0;
2995 bnx2x_release_leader_lock(bp);
2996 smp_mb();
2997
2998 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2999 BNX2X_ERR("Can't unload in closed or error state\n");
3000 return -EINVAL;
3001 }
3002
3003
3004
3005
3006
3007
3008
3009 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3010 return 0;
3011
3012
3013
3014
3015
3016 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3017 smp_mb();
3018
3019
3020 bnx2x_iov_channel_down(bp);
3021
3022 if (CNIC_LOADED(bp))
3023 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3024
3025
3026 bnx2x_tx_disable(bp);
3027 netdev_reset_tc(bp->dev);
3028
3029 bp->rx_mode = BNX2X_RX_MODE_NONE;
3030
3031 del_timer_sync(&bp->timer);
3032
3033 if (IS_PF(bp)) {
3034
3035 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3036 bnx2x_drv_pulse(bp);
3037 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3038 bnx2x_save_statistics(bp);
3039 }
3040
3041
3042
3043
3044
3045 if (unload_mode != UNLOAD_RECOVERY)
3046 bnx2x_drain_tx_queues(bp);
3047
3048
3049
3050
3051 if (IS_VF(bp))
3052 bnx2x_vfpf_close_vf(bp);
3053 else if (unload_mode != UNLOAD_RECOVERY)
3054
3055 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3056 else {
3057
3058 bnx2x_send_unload_req(bp, unload_mode);
3059
3060
3061
3062
3063
3064
3065
3066 if (!CHIP_IS_E1x(bp))
3067 bnx2x_pf_disable(bp);
3068
3069
3070 bnx2x_netif_stop(bp, 1);
3071
3072 bnx2x_del_all_napi(bp);
3073 if (CNIC_LOADED(bp))
3074 bnx2x_del_all_napi_cnic(bp);
3075
3076 bnx2x_free_irq(bp);
3077
3078
3079 bnx2x_send_unload_done(bp, false);
3080 }
3081
3082
3083
3084
3085
3086 if (IS_PF(bp))
3087 bnx2x_squeeze_objects(bp);
3088
3089
3090 bp->sp_state = 0;
3091
3092 bp->port.pmf = 0;
3093
3094
3095 bp->sp_rtnl_state = 0;
3096 smp_mb();
3097
3098
3099 bnx2x_free_skbs(bp);
3100 if (CNIC_LOADED(bp))
3101 bnx2x_free_skbs_cnic(bp);
3102 for_each_rx_queue(bp, i)
3103 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3104
3105 bnx2x_free_fp_mem(bp);
3106 if (CNIC_LOADED(bp))
3107 bnx2x_free_fp_mem_cnic(bp);
3108
3109 if (IS_PF(bp)) {
3110 if (CNIC_LOADED(bp))
3111 bnx2x_free_mem_cnic(bp);
3112 }
3113 bnx2x_free_mem(bp);
3114
3115 bp->state = BNX2X_STATE_CLOSED;
3116 bp->cnic_loaded = false;
3117
3118
3119 if (IS_PF(bp))
3120 bnx2x_update_mng_version(bp);
3121
3122
3123
3124
3125 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3126 bnx2x_set_reset_in_progress(bp);
3127
3128
3129 if (global)
3130 bnx2x_set_reset_global(bp);
3131 }
3132
3133
3134
3135
3136 if (IS_PF(bp) &&
3137 !bnx2x_clear_pf_load(bp) &&
3138 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3139 bnx2x_disable_close_the_gate(bp);
3140
3141 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3142
3143 return 0;
3144}
3145
3146int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3147{
3148 u16 pmcsr;
3149
3150
3151 if (!bp->pdev->pm_cap) {
3152 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3153 return 0;
3154 }
3155
3156 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3157
3158 switch (state) {
3159 case PCI_D0:
3160 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3161 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3162 PCI_PM_CTRL_PME_STATUS));
3163
3164 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3165
3166 msleep(20);
3167 break;
3168
3169 case PCI_D3hot:
3170
3171
3172 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3173 return 0;
3174
3175 if (CHIP_REV_IS_SLOW(bp))
3176 return 0;
3177
3178 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3179 pmcsr |= 3;
3180
3181 if (bp->wol)
3182 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3183
3184 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3185 pmcsr);
3186
3187
3188
3189
3190 break;
3191
3192 default:
3193 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3194 return -EINVAL;
3195 }
3196 return 0;
3197}
3198
3199
3200
3201
3202static int bnx2x_poll(struct napi_struct *napi, int budget)
3203{
3204 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3205 napi);
3206 struct bnx2x *bp = fp->bp;
3207 int rx_work_done;
3208 u8 cos;
3209
3210#ifdef BNX2X_STOP_ON_ERROR
3211 if (unlikely(bp->panic)) {
3212 napi_complete(napi);
3213 return 0;
3214 }
3215#endif
3216 for_each_cos_in_tx_queue(fp, cos)
3217 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3218 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3219
3220 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3221
3222 if (rx_work_done < budget) {
3223
3224
3225
3226
3227 if (IS_FCOE_FP(fp)) {
3228 napi_complete_done(napi, rx_work_done);
3229 } else {
3230 bnx2x_update_fpsb_idx(fp);
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244 rmb();
3245
3246 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3247 if (napi_complete_done(napi, rx_work_done)) {
3248
3249 DP(NETIF_MSG_RX_STATUS,
3250 "Update index to %d\n", fp->fp_hc_idx);
3251 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3252 le16_to_cpu(fp->fp_hc_idx),
3253 IGU_INT_ENABLE, 1);
3254 }
3255 } else {
3256 rx_work_done = budget;
3257 }
3258 }
3259 }
3260
3261 return rx_work_done;
3262}
3263
3264
3265
3266
3267
3268static u16 bnx2x_tx_split(struct bnx2x *bp,
3269 struct bnx2x_fp_txdata *txdata,
3270 struct sw_tx_bd *tx_buf,
3271 struct eth_tx_start_bd **tx_bd, u16 hlen,
3272 u16 bd_prod)
3273{
3274 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3275 struct eth_tx_bd *d_tx_bd;
3276 dma_addr_t mapping;
3277 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3278
3279
3280 h_tx_bd->nbytes = cpu_to_le16(hlen);
3281
3282 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3283 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3284
3285
3286
3287 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3288 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3289
3290 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3291 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3292
3293 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3294 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3295 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3296
3297
3298 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3299
3300 DP(NETIF_MSG_TX_QUEUED,
3301 "TSO split data size is %d (%x:%x)\n",
3302 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3303
3304
3305 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3306
3307 return bd_prod;
3308}
3309
3310#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3311#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3312static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3313{
3314 __sum16 tsum = (__force __sum16) csum;
3315
3316 if (fix > 0)
3317 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3318 csum_partial(t_header - fix, fix, 0)));
3319
3320 else if (fix < 0)
3321 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3322 csum_partial(t_header, -fix, 0)));
3323
3324 return bswab16(tsum);
3325}
3326
3327static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3328{
3329 u32 rc;
3330 __u8 prot = 0;
3331 __be16 protocol;
3332
3333 if (skb->ip_summed != CHECKSUM_PARTIAL)
3334 return XMIT_PLAIN;
3335
3336 protocol = vlan_get_protocol(skb);
3337 if (protocol == htons(ETH_P_IPV6)) {
3338 rc = XMIT_CSUM_V6;
3339 prot = ipv6_hdr(skb)->nexthdr;
3340 } else {
3341 rc = XMIT_CSUM_V4;
3342 prot = ip_hdr(skb)->protocol;
3343 }
3344
3345 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3346 if (inner_ip_hdr(skb)->version == 6) {
3347 rc |= XMIT_CSUM_ENC_V6;
3348 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3349 rc |= XMIT_CSUM_TCP;
3350 } else {
3351 rc |= XMIT_CSUM_ENC_V4;
3352 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3353 rc |= XMIT_CSUM_TCP;
3354 }
3355 }
3356 if (prot == IPPROTO_TCP)
3357 rc |= XMIT_CSUM_TCP;
3358
3359 if (skb_is_gso(skb)) {
3360 if (skb_is_gso_v6(skb)) {
3361 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3362 if (rc & XMIT_CSUM_ENC)
3363 rc |= XMIT_GSO_ENC_V6;
3364 } else {
3365 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3366 if (rc & XMIT_CSUM_ENC)
3367 rc |= XMIT_GSO_ENC_V4;
3368 }
3369 }
3370
3371 return rc;
3372}
3373
3374
3375#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3376
3377
3378#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3379
3380#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3381
3382
3383
3384static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3385 u32 xmit_type)
3386{
3387 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3388 int to_copy = 0, hlen = 0;
3389
3390 if (xmit_type & XMIT_GSO_ENC)
3391 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3392
3393 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3394 if (xmit_type & XMIT_GSO) {
3395 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3396 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3397
3398 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3399 int wnd_idx = 0;
3400 int frag_idx = 0;
3401 u32 wnd_sum = 0;
3402
3403
3404 if (xmit_type & XMIT_GSO_ENC)
3405 hlen = (int)(skb_inner_transport_header(skb) -
3406 skb->data) +
3407 inner_tcp_hdrlen(skb);
3408 else
3409 hlen = (int)(skb_transport_header(skb) -
3410 skb->data) + tcp_hdrlen(skb);
3411
3412
3413 first_bd_sz = skb_headlen(skb) - hlen;
3414
3415 wnd_sum = first_bd_sz;
3416
3417
3418 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3419 wnd_sum +=
3420 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3421
3422
3423 if (first_bd_sz > 0) {
3424 if (unlikely(wnd_sum < lso_mss)) {
3425 to_copy = 1;
3426 goto exit_lbl;
3427 }
3428
3429 wnd_sum -= first_bd_sz;
3430 }
3431
3432
3433
3434 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3435 wnd_sum +=
3436 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3437
3438 if (unlikely(wnd_sum < lso_mss)) {
3439 to_copy = 1;
3440 break;
3441 }
3442 wnd_sum -=
3443 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3444 }
3445 } else {
3446
3447
3448 to_copy = 1;
3449 }
3450 }
3451
3452exit_lbl:
3453 if (unlikely(to_copy))
3454 DP(NETIF_MSG_TX_QUEUED,
3455 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3456 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3457 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3458
3459 return to_copy;
3460}
3461#endif
3462
3463
3464
3465
3466
3467
3468
3469
3470static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3471 struct eth_tx_parse_bd_e1x *pbd,
3472 u32 xmit_type)
3473{
3474 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3475 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3476 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3477
3478 if (xmit_type & XMIT_GSO_V4) {
3479 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3480 pbd->tcp_pseudo_csum =
3481 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3482 ip_hdr(skb)->daddr,
3483 0, IPPROTO_TCP, 0));
3484 } else {
3485 pbd->tcp_pseudo_csum =
3486 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3487 &ipv6_hdr(skb)->daddr,
3488 0, IPPROTO_TCP, 0));
3489 }
3490
3491 pbd->global_data |=
3492 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3493}
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3506 u32 *parsing_data, u32 xmit_type)
3507{
3508 *parsing_data |=
3509 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3510 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3511 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3512
3513 if (xmit_type & XMIT_CSUM_TCP) {
3514 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3515 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3516 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3517
3518 return skb_inner_transport_header(skb) +
3519 inner_tcp_hdrlen(skb) - skb->data;
3520 }
3521
3522
3523
3524
3525 return skb_inner_transport_header(skb) +
3526 sizeof(struct udphdr) - skb->data;
3527}
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3540 u32 *parsing_data, u32 xmit_type)
3541{
3542 *parsing_data |=
3543 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3544 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3545 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3546
3547 if (xmit_type & XMIT_CSUM_TCP) {
3548 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3549 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3550 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3551
3552 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3553 }
3554
3555
3556
3557 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3558}
3559
3560
3561static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3562 struct eth_tx_start_bd *tx_start_bd,
3563 u32 xmit_type)
3564{
3565 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3566
3567 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3568 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3569
3570 if (!(xmit_type & XMIT_CSUM_TCP))
3571 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3572}
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3583 struct eth_tx_parse_bd_e1x *pbd,
3584 u32 xmit_type)
3585{
3586 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3587
3588
3589 pbd->global_data =
3590 cpu_to_le16(hlen |
3591 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3592 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3593
3594 pbd->ip_hlen_w = (skb_transport_header(skb) -
3595 skb_network_header(skb)) >> 1;
3596
3597 hlen += pbd->ip_hlen_w;
3598
3599
3600 if (xmit_type & XMIT_CSUM_TCP)
3601 hlen += tcp_hdrlen(skb) / 2;
3602 else
3603 hlen += sizeof(struct udphdr) / 2;
3604
3605 pbd->total_hlen_w = cpu_to_le16(hlen);
3606 hlen = hlen*2;
3607
3608 if (xmit_type & XMIT_CSUM_TCP) {
3609 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3610
3611 } else {
3612 s8 fix = SKB_CS_OFF(skb);
3613
3614 DP(NETIF_MSG_TX_QUEUED,
3615 "hlen %d fix %d csum before fix %x\n",
3616 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3617
3618
3619 pbd->tcp_pseudo_csum =
3620 bnx2x_csum_fix(skb_transport_header(skb),
3621 SKB_CS(skb), fix);
3622
3623 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3624 pbd->tcp_pseudo_csum);
3625 }
3626
3627 return hlen;
3628}
3629
3630static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3631 struct eth_tx_parse_bd_e2 *pbd_e2,
3632 struct eth_tx_parse_2nd_bd *pbd2,
3633 u16 *global_data,
3634 u32 xmit_type)
3635{
3636 u16 hlen_w = 0;
3637 u8 outerip_off, outerip_len = 0;
3638
3639
3640 hlen_w = (skb_inner_transport_header(skb) -
3641 skb_network_header(skb)) >> 1;
3642
3643
3644 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3645
3646 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3647
3648
3649 if (xmit_type & XMIT_CSUM_V4) {
3650 struct iphdr *iph = ip_hdr(skb);
3651 u32 csum = (__force u32)(~iph->check) -
3652 (__force u32)iph->tot_len -
3653 (__force u32)iph->frag_off;
3654
3655 outerip_len = iph->ihl << 1;
3656
3657 pbd2->fw_ip_csum_wo_len_flags_frag =
3658 bswab16(csum_fold((__force __wsum)csum));
3659 } else {
3660 pbd2->fw_ip_hdr_to_payload_w =
3661 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3662 pbd_e2->data.tunnel_data.flags |=
3663 ETH_TUNNEL_DATA_IPV6_OUTER;
3664 }
3665
3666 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3667
3668 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3669
3670
3671 if (xmit_type & XMIT_CSUM_ENC_V4) {
3672 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3673
3674 pbd_e2->data.tunnel_data.pseudo_csum =
3675 bswab16(~csum_tcpudp_magic(
3676 inner_ip_hdr(skb)->saddr,
3677 inner_ip_hdr(skb)->daddr,
3678 0, IPPROTO_TCP, 0));
3679 } else {
3680 pbd_e2->data.tunnel_data.pseudo_csum =
3681 bswab16(~csum_ipv6_magic(
3682 &inner_ipv6_hdr(skb)->saddr,
3683 &inner_ipv6_hdr(skb)->daddr,
3684 0, IPPROTO_TCP, 0));
3685 }
3686
3687 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3688
3689 *global_data |=
3690 outerip_off |
3691 (outerip_len <<
3692 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3693 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3694 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3695
3696 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3697 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3698 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3699 }
3700}
3701
3702static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3703 u32 xmit_type)
3704{
3705 struct ipv6hdr *ipv6;
3706
3707 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3708 return;
3709
3710 if (xmit_type & XMIT_GSO_ENC_V6)
3711 ipv6 = inner_ipv6_hdr(skb);
3712 else
3713 ipv6 = ipv6_hdr(skb);
3714
3715 if (ipv6->nexthdr == NEXTHDR_IPV6)
3716 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3717}
3718
3719
3720
3721
3722
3723netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3724{
3725 struct bnx2x *bp = netdev_priv(dev);
3726
3727 struct netdev_queue *txq;
3728 struct bnx2x_fp_txdata *txdata;
3729 struct sw_tx_bd *tx_buf;
3730 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3731 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3732 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3733 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3734 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3735 u32 pbd_e2_parsing_data = 0;
3736 u16 pkt_prod, bd_prod;
3737 int nbd, txq_index;
3738 dma_addr_t mapping;
3739 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3740 int i;
3741 u8 hlen = 0;
3742 __le16 pkt_size = 0;
3743 struct ethhdr *eth;
3744 u8 mac_type = UNICAST_ADDRESS;
3745
3746#ifdef BNX2X_STOP_ON_ERROR
3747 if (unlikely(bp->panic))
3748 return NETDEV_TX_BUSY;
3749#endif
3750
3751 txq_index = skb_get_queue_mapping(skb);
3752 txq = netdev_get_tx_queue(dev, txq_index);
3753
3754 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3755
3756 txdata = &bp->bnx2x_txq[txq_index];
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3768 skb_shinfo(skb)->nr_frags +
3769 BDS_PER_TX_PKT +
3770 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3771
3772 if (txdata->tx_ring_size == 0) {
3773 struct bnx2x_eth_q_stats *q_stats =
3774 bnx2x_fp_qstats(bp, txdata->parent_fp);
3775 q_stats->driver_filtered_tx_pkt++;
3776 dev_kfree_skb(skb);
3777 return NETDEV_TX_OK;
3778 }
3779 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3780 netif_tx_stop_queue(txq);
3781 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3782
3783 return NETDEV_TX_BUSY;
3784 }
3785
3786 DP(NETIF_MSG_TX_QUEUED,
3787 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3788 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3789 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3790 skb->len);
3791
3792 eth = (struct ethhdr *)skb->data;
3793
3794
3795 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3796 if (is_broadcast_ether_addr(eth->h_dest))
3797 mac_type = BROADCAST_ADDRESS;
3798 else
3799 mac_type = MULTICAST_ADDRESS;
3800 }
3801
3802#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3803
3804
3805
3806 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3807
3808 bp->lin_cnt++;
3809 if (skb_linearize(skb) != 0) {
3810 DP(NETIF_MSG_TX_QUEUED,
3811 "SKB linearization failed - silently dropping this SKB\n");
3812 dev_kfree_skb_any(skb);
3813 return NETDEV_TX_OK;
3814 }
3815 }
3816#endif
3817
3818 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3819 skb_headlen(skb), DMA_TO_DEVICE);
3820 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3821 DP(NETIF_MSG_TX_QUEUED,
3822 "SKB mapping failed - silently dropping this SKB\n");
3823 dev_kfree_skb_any(skb);
3824 return NETDEV_TX_OK;
3825 }
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838 pkt_prod = txdata->tx_pkt_prod;
3839 bd_prod = TX_BD(txdata->tx_bd_prod);
3840
3841
3842
3843
3844
3845 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3846 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3847 first_bd = tx_start_bd;
3848
3849 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3850
3851 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3852 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3853 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3854 } else if (bp->ptp_tx_skb) {
3855 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3856 } else {
3857 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3858
3859 bp->ptp_tx_skb = skb_get(skb);
3860 bp->ptp_tx_start = jiffies;
3861 schedule_work(&bp->ptp_task);
3862 }
3863 }
3864
3865
3866 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3867
3868
3869 tx_buf->first_bd = txdata->tx_bd_prod;
3870 tx_buf->skb = skb;
3871 tx_buf->flags = 0;
3872
3873 DP(NETIF_MSG_TX_QUEUED,
3874 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3875 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3876
3877 if (skb_vlan_tag_present(skb)) {
3878 tx_start_bd->vlan_or_ethertype =
3879 cpu_to_le16(skb_vlan_tag_get(skb));
3880 tx_start_bd->bd_flags.as_bitfield |=
3881 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3882 } else {
3883
3884
3885
3886 u16 vlan_tci = 0;
3887#ifndef BNX2X_STOP_ON_ERROR
3888 if (IS_VF(bp)) {
3889#endif
3890
3891 if (__vlan_get_tag(skb, &vlan_tci)) {
3892 tx_start_bd->vlan_or_ethertype =
3893 cpu_to_le16(ntohs(eth->h_proto));
3894 } else {
3895 tx_start_bd->bd_flags.as_bitfield |=
3896 (X_ETH_INBAND_VLAN <<
3897 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3898 tx_start_bd->vlan_or_ethertype =
3899 cpu_to_le16(vlan_tci);
3900 }
3901#ifndef BNX2X_STOP_ON_ERROR
3902 } else {
3903
3904 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3905 }
3906#endif
3907 }
3908
3909 nbd = 2;
3910
3911
3912 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3913
3914 if (xmit_type & XMIT_CSUM)
3915 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3916
3917 if (!CHIP_IS_E1x(bp)) {
3918 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3919 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3920
3921 if (xmit_type & XMIT_CSUM_ENC) {
3922 u16 global_data = 0;
3923
3924
3925 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3926 &pbd_e2_parsing_data,
3927 xmit_type);
3928
3929
3930 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3931
3932 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3933
3934 memset(pbd2, 0, sizeof(*pbd2));
3935
3936 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3937 (skb_inner_network_header(skb) -
3938 skb->data) >> 1;
3939
3940 if (xmit_type & XMIT_GSO_ENC)
3941 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3942 &global_data,
3943 xmit_type);
3944
3945 pbd2->global_data = cpu_to_le16(global_data);
3946
3947
3948 SET_FLAG(tx_start_bd->general_data,
3949 ETH_TX_START_BD_PARSE_NBDS, 1);
3950
3951 SET_FLAG(tx_start_bd->general_data,
3952 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3953
3954 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3955
3956 nbd++;
3957 } else if (xmit_type & XMIT_CSUM) {
3958
3959 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3960 &pbd_e2_parsing_data,
3961 xmit_type);
3962 }
3963
3964 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3965
3966
3967
3968 if (IS_VF(bp)) {
3969
3970 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3971 &pbd_e2->data.mac_addr.src_mid,
3972 &pbd_e2->data.mac_addr.src_lo,
3973 eth->h_source);
3974
3975 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3976 &pbd_e2->data.mac_addr.dst_mid,
3977 &pbd_e2->data.mac_addr.dst_lo,
3978 eth->h_dest);
3979 } else {
3980 if (bp->flags & TX_SWITCHING)
3981 bnx2x_set_fw_mac_addr(
3982 &pbd_e2->data.mac_addr.dst_hi,
3983 &pbd_e2->data.mac_addr.dst_mid,
3984 &pbd_e2->data.mac_addr.dst_lo,
3985 eth->h_dest);
3986#ifdef BNX2X_STOP_ON_ERROR
3987
3988
3989
3990 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3991 &pbd_e2->data.mac_addr.src_mid,
3992 &pbd_e2->data.mac_addr.src_lo,
3993 eth->h_source);
3994#endif
3995 }
3996
3997 SET_FLAG(pbd_e2_parsing_data,
3998 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3999 } else {
4000 u16 global_data = 0;
4001 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4002 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4003
4004 if (xmit_type & XMIT_CSUM)
4005 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4006
4007 SET_FLAG(global_data,
4008 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4009 pbd_e1x->global_data |= cpu_to_le16(global_data);
4010 }
4011
4012
4013 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4014 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4015 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4016 pkt_size = tx_start_bd->nbytes;
4017
4018 DP(NETIF_MSG_TX_QUEUED,
4019 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4020 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4021 le16_to_cpu(tx_start_bd->nbytes),
4022 tx_start_bd->bd_flags.as_bitfield,
4023 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4024
4025 if (xmit_type & XMIT_GSO) {
4026
4027 DP(NETIF_MSG_TX_QUEUED,
4028 "TSO packet len %d hlen %d total len %d tso size %d\n",
4029 skb->len, hlen, skb_headlen(skb),
4030 skb_shinfo(skb)->gso_size);
4031
4032 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4033
4034 if (unlikely(skb_headlen(skb) > hlen)) {
4035 nbd++;
4036 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4037 &tx_start_bd, hlen,
4038 bd_prod);
4039 }
4040 if (!CHIP_IS_E1x(bp))
4041 pbd_e2_parsing_data |=
4042 (skb_shinfo(skb)->gso_size <<
4043 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4044 ETH_TX_PARSE_BD_E2_LSO_MSS;
4045 else
4046 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4047 }
4048
4049
4050
4051
4052 if (pbd_e2_parsing_data)
4053 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4054
4055 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4056
4057
4058 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4059 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4060
4061 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4062 skb_frag_size(frag), DMA_TO_DEVICE);
4063 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4064 unsigned int pkts_compl = 0, bytes_compl = 0;
4065
4066 DP(NETIF_MSG_TX_QUEUED,
4067 "Unable to map page - dropping packet...\n");
4068
4069
4070
4071
4072
4073
4074 first_bd->nbd = cpu_to_le16(nbd);
4075 bnx2x_free_tx_pkt(bp, txdata,
4076 TX_BD(txdata->tx_pkt_prod),
4077 &pkts_compl, &bytes_compl);
4078 return NETDEV_TX_OK;
4079 }
4080
4081 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4082 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4083 if (total_pkt_bd == NULL)
4084 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4085
4086 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4087 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4088 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4089 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4090 nbd++;
4091
4092 DP(NETIF_MSG_TX_QUEUED,
4093 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4094 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4095 le16_to_cpu(tx_data_bd->nbytes));
4096 }
4097
4098 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4099
4100
4101 first_bd->nbd = cpu_to_le16(nbd);
4102
4103 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4104
4105
4106
4107
4108 if (TX_BD_POFF(bd_prod) < nbd)
4109 nbd++;
4110
4111
4112
4113
4114
4115
4116
4117
4118 if (total_pkt_bd != NULL)
4119 total_pkt_bd->total_pkt_bytes = pkt_size;
4120
4121 if (pbd_e1x)
4122 DP(NETIF_MSG_TX_QUEUED,
4123 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4124 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4125 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4126 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4127 le16_to_cpu(pbd_e1x->total_hlen_w));
4128 if (pbd_e2)
4129 DP(NETIF_MSG_TX_QUEUED,
4130 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4131 pbd_e2,
4132 pbd_e2->data.mac_addr.dst_hi,
4133 pbd_e2->data.mac_addr.dst_mid,
4134 pbd_e2->data.mac_addr.dst_lo,
4135 pbd_e2->data.mac_addr.src_hi,
4136 pbd_e2->data.mac_addr.src_mid,
4137 pbd_e2->data.mac_addr.src_lo,
4138 pbd_e2->parsing_data);
4139 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4140
4141 netdev_tx_sent_queue(txq, skb->len);
4142
4143 skb_tx_timestamp(skb);
4144
4145 txdata->tx_pkt_prod++;
4146
4147
4148
4149
4150
4151
4152
4153 wmb();
4154
4155 txdata->tx_db.data.prod += nbd;
4156 barrier();
4157
4158 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4159
4160 mmiowb();
4161
4162 txdata->tx_bd_prod += nbd;
4163
4164 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4165 netif_tx_stop_queue(txq);
4166
4167
4168
4169
4170 smp_mb();
4171
4172 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4173 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4174 netif_tx_wake_queue(txq);
4175 }
4176 txdata->tx_pkt++;
4177
4178 return NETDEV_TX_OK;
4179}
4180
4181void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4182{
4183 int mfw_vn = BP_FW_MB_IDX(bp);
4184 u32 tmp;
4185
4186
4187 if (!IS_MF_BD(bp)) {
4188 int i;
4189
4190 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4191 c2s_map[i] = i;
4192 *c2s_default = 0;
4193
4194 return;
4195 }
4196
4197 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4198 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4199 c2s_map[0] = tmp & 0xff;
4200 c2s_map[1] = (tmp >> 8) & 0xff;
4201 c2s_map[2] = (tmp >> 16) & 0xff;
4202 c2s_map[3] = (tmp >> 24) & 0xff;
4203
4204 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4205 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4206 c2s_map[4] = tmp & 0xff;
4207 c2s_map[5] = (tmp >> 8) & 0xff;
4208 c2s_map[6] = (tmp >> 16) & 0xff;
4209 c2s_map[7] = (tmp >> 24) & 0xff;
4210
4211 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4212 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4213 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4214}
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4225{
4226 struct bnx2x *bp = netdev_priv(dev);
4227 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4228 int cos, prio, count, offset;
4229
4230
4231 ASSERT_RTNL();
4232
4233
4234 if (!num_tc) {
4235 netdev_reset_tc(dev);
4236 return 0;
4237 }
4238
4239
4240 if (num_tc > bp->max_cos) {
4241 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4242 num_tc, bp->max_cos);
4243 return -EINVAL;
4244 }
4245
4246
4247 if (netdev_set_num_tc(dev, num_tc)) {
4248 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4249 return -EINVAL;
4250 }
4251
4252 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4253
4254
4255 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4256 int outer_prio = c2s_map[prio];
4257
4258 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4259 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4260 "mapping priority %d to tc %d\n",
4261 outer_prio, bp->prio_to_cos[outer_prio]);
4262 }
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275 for (cos = 0; cos < bp->max_cos; cos++) {
4276 count = BNX2X_NUM_ETH_QUEUES(bp);
4277 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4278 netdev_set_tc_queue(dev, cos, count, offset);
4279 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4280 "mapping tc %d to offset %d count %d\n",
4281 cos, offset, count);
4282 }
4283
4284 return 0;
4285}
4286
4287int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4288 void *type_data)
4289{
4290 struct tc_mqprio_qopt *mqprio = type_data;
4291
4292 if (type != TC_SETUP_MQPRIO)
4293 return -EOPNOTSUPP;
4294
4295 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4296
4297 return bnx2x_setup_tc(dev, mqprio->num_tc);
4298}
4299
4300
4301int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4302{
4303 struct sockaddr *addr = p;
4304 struct bnx2x *bp = netdev_priv(dev);
4305 int rc = 0;
4306
4307 if (!is_valid_ether_addr(addr->sa_data)) {
4308 BNX2X_ERR("Requested MAC address is not valid\n");
4309 return -EINVAL;
4310 }
4311
4312 if (IS_MF_STORAGE_ONLY(bp)) {
4313 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4314 return -EINVAL;
4315 }
4316
4317 if (netif_running(dev)) {
4318 rc = bnx2x_set_eth_mac(bp, false);
4319 if (rc)
4320 return rc;
4321 }
4322
4323 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4324
4325 if (netif_running(dev))
4326 rc = bnx2x_set_eth_mac(bp, true);
4327
4328 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4329 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4330
4331 return rc;
4332}
4333
4334static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4335{
4336 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4337 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4338 u8 cos;
4339
4340
4341
4342 if (IS_FCOE_IDX(fp_index)) {
4343 memset(sb, 0, sizeof(union host_hc_status_block));
4344 fp->status_blk_mapping = 0;
4345 } else {
4346
4347 if (!CHIP_IS_E1x(bp))
4348 BNX2X_PCI_FREE(sb->e2_sb,
4349 bnx2x_fp(bp, fp_index,
4350 status_blk_mapping),
4351 sizeof(struct host_hc_status_block_e2));
4352 else
4353 BNX2X_PCI_FREE(sb->e1x_sb,
4354 bnx2x_fp(bp, fp_index,
4355 status_blk_mapping),
4356 sizeof(struct host_hc_status_block_e1x));
4357 }
4358
4359
4360 if (!skip_rx_queue(bp, fp_index)) {
4361 bnx2x_free_rx_bds(fp);
4362
4363
4364 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4365 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4366 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4367 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4368
4369 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4370 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4371 sizeof(struct eth_fast_path_rx_cqe) *
4372 NUM_RCQ_BD);
4373
4374
4375 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4376 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4377 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4378 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4379 }
4380
4381
4382 if (!skip_tx_queue(bp, fp_index)) {
4383
4384 for_each_cos_in_tx_queue(fp, cos) {
4385 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4386
4387 DP(NETIF_MSG_IFDOWN,
4388 "freeing tx memory of fp %d cos %d cid %d\n",
4389 fp_index, cos, txdata->cid);
4390
4391 BNX2X_FREE(txdata->tx_buf_ring);
4392 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4393 txdata->tx_desc_mapping,
4394 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4395 }
4396 }
4397
4398}
4399
4400static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4401{
4402 int i;
4403 for_each_cnic_queue(bp, i)
4404 bnx2x_free_fp_mem_at(bp, i);
4405}
4406
4407void bnx2x_free_fp_mem(struct bnx2x *bp)
4408{
4409 int i;
4410 for_each_eth_queue(bp, i)
4411 bnx2x_free_fp_mem_at(bp, i);
4412}
4413
4414static void set_sb_shortcuts(struct bnx2x *bp, int index)
4415{
4416 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4417 if (!CHIP_IS_E1x(bp)) {
4418 bnx2x_fp(bp, index, sb_index_values) =
4419 (__le16 *)status_blk.e2_sb->sb.index_values;
4420 bnx2x_fp(bp, index, sb_running_index) =
4421 (__le16 *)status_blk.e2_sb->sb.running_index;
4422 } else {
4423 bnx2x_fp(bp, index, sb_index_values) =
4424 (__le16 *)status_blk.e1x_sb->sb.index_values;
4425 bnx2x_fp(bp, index, sb_running_index) =
4426 (__le16 *)status_blk.e1x_sb->sb.running_index;
4427 }
4428}
4429
4430
4431static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4432 int rx_ring_size)
4433{
4434 struct bnx2x *bp = fp->bp;
4435 u16 ring_prod, cqe_ring_prod;
4436 int i, failure_cnt = 0;
4437
4438 fp->rx_comp_cons = 0;
4439 cqe_ring_prod = ring_prod = 0;
4440
4441
4442
4443
4444 for (i = 0; i < rx_ring_size; i++) {
4445 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4446 failure_cnt++;
4447 continue;
4448 }
4449 ring_prod = NEXT_RX_IDX(ring_prod);
4450 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4451 WARN_ON(ring_prod <= (i - failure_cnt));
4452 }
4453
4454 if (failure_cnt)
4455 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4456 i - failure_cnt, fp->index);
4457
4458 fp->rx_bd_prod = ring_prod;
4459
4460 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4461 cqe_ring_prod);
4462
4463 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4464
4465 return i - failure_cnt;
4466}
4467
4468static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4469{
4470 int i;
4471
4472 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4473 struct eth_rx_cqe_next_page *nextpg;
4474
4475 nextpg = (struct eth_rx_cqe_next_page *)
4476 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4477 nextpg->addr_hi =
4478 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4479 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4480 nextpg->addr_lo =
4481 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4482 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4483 }
4484}
4485
4486static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4487{
4488 union host_hc_status_block *sb;
4489 struct bnx2x_fastpath *fp = &bp->fp[index];
4490 int ring_size = 0;
4491 u8 cos;
4492 int rx_ring_size = 0;
4493
4494 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4495 rx_ring_size = MIN_RX_SIZE_NONTPA;
4496 bp->rx_ring_size = rx_ring_size;
4497 } else if (!bp->rx_ring_size) {
4498 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4499
4500 if (CHIP_IS_E3(bp)) {
4501 u32 cfg = SHMEM_RD(bp,
4502 dev_info.port_hw_config[BP_PORT(bp)].
4503 default_cfg);
4504
4505
4506 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4507 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4508 rx_ring_size /= 10;
4509 }
4510
4511
4512 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4513 MIN_RX_SIZE_TPA, rx_ring_size);
4514
4515 bp->rx_ring_size = rx_ring_size;
4516 } else
4517 rx_ring_size = bp->rx_ring_size;
4518
4519 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4520
4521
4522 sb = &bnx2x_fp(bp, index, status_blk);
4523
4524 if (!IS_FCOE_IDX(index)) {
4525
4526 if (!CHIP_IS_E1x(bp)) {
4527 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4528 sizeof(struct host_hc_status_block_e2));
4529 if (!sb->e2_sb)
4530 goto alloc_mem_err;
4531 } else {
4532 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4533 sizeof(struct host_hc_status_block_e1x));
4534 if (!sb->e1x_sb)
4535 goto alloc_mem_err;
4536 }
4537 }
4538
4539
4540
4541
4542 if (!IS_FCOE_IDX(index))
4543 set_sb_shortcuts(bp, index);
4544
4545
4546 if (!skip_tx_queue(bp, index)) {
4547
4548 for_each_cos_in_tx_queue(fp, cos) {
4549 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4550
4551 DP(NETIF_MSG_IFUP,
4552 "allocating tx memory of fp %d cos %d\n",
4553 index, cos);
4554
4555 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4556 sizeof(struct sw_tx_bd),
4557 GFP_KERNEL);
4558 if (!txdata->tx_buf_ring)
4559 goto alloc_mem_err;
4560 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4561 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4562 if (!txdata->tx_desc_ring)
4563 goto alloc_mem_err;
4564 }
4565 }
4566
4567
4568 if (!skip_rx_queue(bp, index)) {
4569
4570 bnx2x_fp(bp, index, rx_buf_ring) =
4571 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4572 if (!bnx2x_fp(bp, index, rx_buf_ring))
4573 goto alloc_mem_err;
4574 bnx2x_fp(bp, index, rx_desc_ring) =
4575 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4576 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4577 if (!bnx2x_fp(bp, index, rx_desc_ring))
4578 goto alloc_mem_err;
4579
4580
4581 bnx2x_fp(bp, index, rx_comp_ring) =
4582 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4583 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4584 if (!bnx2x_fp(bp, index, rx_comp_ring))
4585 goto alloc_mem_err;
4586
4587
4588 bnx2x_fp(bp, index, rx_page_ring) =
4589 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4590 GFP_KERNEL);
4591 if (!bnx2x_fp(bp, index, rx_page_ring))
4592 goto alloc_mem_err;
4593 bnx2x_fp(bp, index, rx_sge_ring) =
4594 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4595 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4596 if (!bnx2x_fp(bp, index, rx_sge_ring))
4597 goto alloc_mem_err;
4598
4599 bnx2x_set_next_page_rx_bd(fp);
4600
4601
4602 bnx2x_set_next_page_rx_cq(fp);
4603
4604
4605 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4606 if (ring_size < rx_ring_size)
4607 goto alloc_mem_err;
4608 }
4609
4610 return 0;
4611
4612
4613alloc_mem_err:
4614 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4615 index, ring_size);
4616
4617
4618
4619
4620 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4621 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4622
4623 bnx2x_free_fp_mem_at(bp, index);
4624 return -ENOMEM;
4625 }
4626 return 0;
4627}
4628
4629static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4630{
4631 if (!NO_FCOE(bp))
4632
4633 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4634
4635
4636
4637 return -ENOMEM;
4638
4639 return 0;
4640}
4641
4642static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4643{
4644 int i;
4645
4646
4647
4648
4649
4650
4651 if (bnx2x_alloc_fp_mem_at(bp, 0))
4652 return -ENOMEM;
4653
4654
4655 for_each_nondefault_eth_queue(bp, i)
4656 if (bnx2x_alloc_fp_mem_at(bp, i))
4657 break;
4658
4659
4660 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4661 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4662
4663 WARN_ON(delta < 0);
4664 bnx2x_shrink_eth_fp(bp, delta);
4665 if (CNIC_SUPPORT(bp))
4666
4667
4668
4669
4670
4671
4672 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4673 bp->num_ethernet_queues -= delta;
4674 bp->num_queues = bp->num_ethernet_queues +
4675 bp->num_cnic_queues;
4676 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4677 bp->num_queues + delta, bp->num_queues);
4678 }
4679
4680 return 0;
4681}
4682
4683void bnx2x_free_mem_bp(struct bnx2x *bp)
4684{
4685 int i;
4686
4687 for (i = 0; i < bp->fp_array_size; i++)
4688 kfree(bp->fp[i].tpa_info);
4689 kfree(bp->fp);
4690 kfree(bp->sp_objs);
4691 kfree(bp->fp_stats);
4692 kfree(bp->bnx2x_txq);
4693 kfree(bp->msix_table);
4694 kfree(bp->ilt);
4695}
4696
4697int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4698{
4699 struct bnx2x_fastpath *fp;
4700 struct msix_entry *tbl;
4701 struct bnx2x_ilt *ilt;
4702 int msix_table_size = 0;
4703 int fp_array_size, txq_array_size;
4704 int i;
4705
4706
4707
4708
4709
4710 msix_table_size = bp->igu_sb_cnt;
4711 if (IS_PF(bp))
4712 msix_table_size++;
4713 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4714
4715
4716 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4717 bp->fp_array_size = fp_array_size;
4718 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4719
4720 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4721 if (!fp)
4722 goto alloc_err;
4723 for (i = 0; i < bp->fp_array_size; i++) {
4724 fp[i].tpa_info =
4725 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4726 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4727 if (!(fp[i].tpa_info))
4728 goto alloc_err;
4729 }
4730
4731 bp->fp = fp;
4732
4733
4734 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4735 GFP_KERNEL);
4736 if (!bp->sp_objs)
4737 goto alloc_err;
4738
4739
4740 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4741 GFP_KERNEL);
4742 if (!bp->fp_stats)
4743 goto alloc_err;
4744
4745
4746 txq_array_size =
4747 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4748 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4749
4750 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4751 GFP_KERNEL);
4752 if (!bp->bnx2x_txq)
4753 goto alloc_err;
4754
4755
4756 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4757 if (!tbl)
4758 goto alloc_err;
4759 bp->msix_table = tbl;
4760
4761
4762 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4763 if (!ilt)
4764 goto alloc_err;
4765 bp->ilt = ilt;
4766
4767 return 0;
4768alloc_err:
4769 bnx2x_free_mem_bp(bp);
4770 return -ENOMEM;
4771}
4772
4773int bnx2x_reload_if_running(struct net_device *dev)
4774{
4775 struct bnx2x *bp = netdev_priv(dev);
4776
4777 if (unlikely(!netif_running(dev)))
4778 return 0;
4779
4780 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4781 return bnx2x_nic_load(bp, LOAD_NORMAL);
4782}
4783
4784int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4785{
4786 u32 sel_phy_idx = 0;
4787 if (bp->link_params.num_phys <= 1)
4788 return INT_PHY;
4789
4790 if (bp->link_vars.link_up) {
4791 sel_phy_idx = EXT_PHY1;
4792
4793 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4794 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4795 sel_phy_idx = EXT_PHY2;
4796 } else {
4797
4798 switch (bnx2x_phy_selection(&bp->link_params)) {
4799 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4800 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4801 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4802 sel_phy_idx = EXT_PHY1;
4803 break;
4804 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4805 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4806 sel_phy_idx = EXT_PHY2;
4807 break;
4808 }
4809 }
4810
4811 return sel_phy_idx;
4812}
4813int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4814{
4815 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4816
4817
4818
4819
4820
4821
4822 if (bp->link_params.multi_phy_config &
4823 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4824 if (sel_phy_idx == EXT_PHY1)
4825 sel_phy_idx = EXT_PHY2;
4826 else if (sel_phy_idx == EXT_PHY2)
4827 sel_phy_idx = EXT_PHY1;
4828 }
4829 return LINK_CONFIG_IDX(sel_phy_idx);
4830}
4831
4832#ifdef NETDEV_FCOE_WWNN
4833int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4834{
4835 struct bnx2x *bp = netdev_priv(dev);
4836 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4837
4838 switch (type) {
4839 case NETDEV_FCOE_WWNN:
4840 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4841 cp->fcoe_wwn_node_name_lo);
4842 break;
4843 case NETDEV_FCOE_WWPN:
4844 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4845 cp->fcoe_wwn_port_name_lo);
4846 break;
4847 default:
4848 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4849 return -EINVAL;
4850 }
4851
4852 return 0;
4853}
4854#endif
4855
4856
4857int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4858{
4859 struct bnx2x *bp = netdev_priv(dev);
4860
4861 if (pci_num_vf(bp->pdev)) {
4862 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4863 return -EPERM;
4864 }
4865
4866 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4867 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4868 return -EAGAIN;
4869 }
4870
4871
4872
4873
4874
4875 dev->mtu = new_mtu;
4876
4877 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4878 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4879
4880 return bnx2x_reload_if_running(dev);
4881}
4882
4883netdev_features_t bnx2x_fix_features(struct net_device *dev,
4884 netdev_features_t features)
4885{
4886 struct bnx2x *bp = netdev_priv(dev);
4887
4888 if (pci_num_vf(bp->pdev)) {
4889 netdev_features_t changed = dev->features ^ features;
4890
4891
4892
4893
4894 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4895 features &= ~NETIF_F_RXCSUM;
4896 features |= dev->features & NETIF_F_RXCSUM;
4897 }
4898
4899 if (changed & NETIF_F_LOOPBACK) {
4900 features &= ~NETIF_F_LOOPBACK;
4901 features |= dev->features & NETIF_F_LOOPBACK;
4902 }
4903 }
4904
4905
4906 if (!(features & NETIF_F_RXCSUM)) {
4907 features &= ~NETIF_F_LRO;
4908 features &= ~NETIF_F_GRO;
4909 }
4910
4911 return features;
4912}
4913
4914int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4915{
4916 struct bnx2x *bp = netdev_priv(dev);
4917 netdev_features_t changes = features ^ dev->features;
4918 bool bnx2x_reload = false;
4919 int rc;
4920
4921
4922 if (!pci_num_vf(bp->pdev)) {
4923 if (features & NETIF_F_LOOPBACK) {
4924 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4925 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4926 bnx2x_reload = true;
4927 }
4928 } else {
4929 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4930 bp->link_params.loopback_mode = LOOPBACK_NONE;
4931 bnx2x_reload = true;
4932 }
4933 }
4934 }
4935
4936
4937 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4938 changes &= ~NETIF_F_GRO;
4939
4940
4941 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4942 changes &= ~NETIF_F_GRO;
4943
4944 if (changes)
4945 bnx2x_reload = true;
4946
4947 if (bnx2x_reload) {
4948 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4949 dev->features = features;
4950 rc = bnx2x_reload_if_running(dev);
4951 return rc ? rc : 1;
4952 }
4953
4954 }
4955
4956 return 0;
4957}
4958
4959void bnx2x_tx_timeout(struct net_device *dev)
4960{
4961 struct bnx2x *bp = netdev_priv(dev);
4962
4963#ifdef BNX2X_STOP_ON_ERROR
4964 if (!bp->panic)
4965 bnx2x_panic();
4966#endif
4967
4968
4969 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4970}
4971
4972int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4973{
4974 struct net_device *dev = pci_get_drvdata(pdev);
4975 struct bnx2x *bp;
4976
4977 if (!dev) {
4978 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4979 return -ENODEV;
4980 }
4981 bp = netdev_priv(dev);
4982
4983 rtnl_lock();
4984
4985 pci_save_state(pdev);
4986
4987 if (!netif_running(dev)) {
4988 rtnl_unlock();
4989 return 0;
4990 }
4991
4992 netif_device_detach(dev);
4993
4994 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4995
4996 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4997
4998 rtnl_unlock();
4999
5000 return 0;
5001}
5002
5003int bnx2x_resume(struct pci_dev *pdev)
5004{
5005 struct net_device *dev = pci_get_drvdata(pdev);
5006 struct bnx2x *bp;
5007 int rc;
5008
5009 if (!dev) {
5010 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5011 return -ENODEV;
5012 }
5013 bp = netdev_priv(dev);
5014
5015 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5016 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5017 return -EAGAIN;
5018 }
5019
5020 rtnl_lock();
5021
5022 pci_restore_state(pdev);
5023
5024 if (!netif_running(dev)) {
5025 rtnl_unlock();
5026 return 0;
5027 }
5028
5029 bnx2x_set_power_state(bp, PCI_D0);
5030 netif_device_attach(dev);
5031
5032 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5033
5034 rtnl_unlock();
5035
5036 return rc;
5037}
5038
5039void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5040 u32 cid)
5041{
5042 if (!cxt) {
5043 BNX2X_ERR("bad context pointer %p\n", cxt);
5044 return;
5045 }
5046
5047
5048 cxt->ustorm_ag_context.cdu_usage =
5049 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5050 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5051
5052 cxt->xstorm_ag_context.cdu_reserved =
5053 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5054 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5055}
5056
5057static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5058 u8 fw_sb_id, u8 sb_index,
5059 u8 ticks)
5060{
5061 u32 addr = BAR_CSTRORM_INTMEM +
5062 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5063 REG_WR8(bp, addr, ticks);
5064 DP(NETIF_MSG_IFUP,
5065 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5066 port, fw_sb_id, sb_index, ticks);
5067}
5068
5069static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5070 u16 fw_sb_id, u8 sb_index,
5071 u8 disable)
5072{
5073 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5074 u32 addr = BAR_CSTRORM_INTMEM +
5075 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5076 u8 flags = REG_RD8(bp, addr);
5077
5078 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5079 flags |= enable_flag;
5080 REG_WR8(bp, addr, flags);
5081 DP(NETIF_MSG_IFUP,
5082 "port %x fw_sb_id %d sb_index %d disable %d\n",
5083 port, fw_sb_id, sb_index, disable);
5084}
5085
5086void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5087 u8 sb_index, u8 disable, u16 usec)
5088{
5089 int port = BP_PORT(bp);
5090 u8 ticks = usec / BNX2X_BTR;
5091
5092 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5093
5094 disable = disable ? 1 : (usec ? 0 : 1);
5095 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5096}
5097
5098void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5099 u32 verbose)
5100{
5101 smp_mb__before_atomic();
5102 set_bit(flag, &bp->sp_rtnl_state);
5103 smp_mb__after_atomic();
5104 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5105 flag);
5106 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5107}
5108