1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/etherdevice.h>
23#include <linux/if_vlan.h>
24#include <linux/interrupt.h>
25#include <linux/ip.h>
26#include <linux/crash_dump.h>
27#include <net/tcp.h>
28#include <net/ipv6.h>
29#include <net/ip6_checksum.h>
30#include <linux/prefetch.h>
31#include "bnx2x_cmn.h"
32#include "bnx2x_init.h"
33#include "bnx2x_sp.h"
34
35static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
36static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
37static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
38static int bnx2x_poll(struct napi_struct *napi, int budget);
39
40static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
41{
42 int i;
43
44
45 for_each_rx_queue_cnic(bp, i) {
46 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
47 bnx2x_poll, NAPI_POLL_WEIGHT);
48 }
49}
50
51static void bnx2x_add_all_napi(struct bnx2x *bp)
52{
53 int i;
54
55
56 for_each_eth_queue(bp, i) {
57 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58 bnx2x_poll, NAPI_POLL_WEIGHT);
59 }
60}
61
62static int bnx2x_calc_num_queues(struct bnx2x *bp)
63{
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65
66
67 if (is_kdump_kernel())
68 nq = 1;
69
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71 return nq;
72}
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88{
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
98
99
100 from_fp->napi = to_fp->napi;
101
102
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
104 to_fp->index = to;
105
106
107
108
109 to_fp->tpa_info = old_tpa_info;
110
111
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
113
114
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
116
117
118
119
120
121
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
124 (bp)->max_cos;
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 }
129
130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
134}
135
136
137
138
139
140
141
142
143
144void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
145{
146 if (IS_PF(bp)) {
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
148
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
154 "bc %d.%d.%d%s%s",
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
159 } else {
160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
161 }
162}
163
164
165
166
167
168
169
170static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
171{
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
173
174
175
176
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
181
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185 }
186 }
187}
188
189int bnx2x_load_count[2][3] = { {0} };
190
191
192
193
194static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
197{
198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203 int nbd;
204 u16 split_bd_len = 0;
205
206
207 prefetch(&skb->end);
208
209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
210 txdata->txq_index, idx, tx_buf, skb);
211
212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
213
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215#ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
218 bnx2x_panic();
219 }
220#endif
221 new_cons = nbd + tx_buf->first_bd;
222
223
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
226
227 --nbd;
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231
232 --nbd;
233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
234 }
235
236
237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
239 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
240 --nbd;
241 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
242 }
243
244
245 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
246 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
247 DMA_TO_DEVICE);
248
249
250 while (nbd > 0) {
251
252 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
255 if (--nbd)
256 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
257 }
258
259
260 WARN_ON(!skb);
261 if (likely(skb)) {
262 (*pkts_compl)++;
263 (*bytes_compl) += skb->len;
264 dev_kfree_skb_any(skb);
265 }
266
267 tx_buf->first_bd = 0;
268 tx_buf->skb = NULL;
269
270 return new_cons;
271}
272
273int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
274{
275 struct netdev_queue *txq;
276 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277 unsigned int pkts_compl = 0, bytes_compl = 0;
278
279#ifdef BNX2X_STOP_ON_ERROR
280 if (unlikely(bp->panic))
281 return -1;
282#endif
283
284 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286 sw_cons = txdata->tx_pkt_cons;
287
288
289 smp_rmb();
290
291 while (sw_cons != hw_cons) {
292 u16 pkt_cons;
293
294 pkt_cons = TX_BD(sw_cons);
295
296 DP(NETIF_MSG_TX_DONE,
297 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
298 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
299
300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
301 &pkts_compl, &bytes_compl);
302
303 sw_cons++;
304 }
305
306 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
307
308 txdata->tx_pkt_cons = sw_cons;
309 txdata->tx_bd_cons = bd_cons;
310
311
312
313
314
315
316
317
318
319
320 smp_mb();
321
322 if (unlikely(netif_tx_queue_stopped(txq))) {
323
324
325
326
327
328
329
330
331
332
333 __netif_tx_lock(txq, smp_processor_id());
334
335 if ((netif_tx_queue_stopped(txq)) &&
336 (bp->state == BNX2X_STATE_OPEN) &&
337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
338 netif_tx_wake_queue(txq);
339
340 __netif_tx_unlock(txq);
341 }
342 return 0;
343}
344
345static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
346 u16 idx)
347{
348 u16 last_max = fp->last_max_sge;
349
350 if (SUB_S16(idx, last_max) > 0)
351 fp->last_max_sge = idx;
352}
353
354static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
355 u16 sge_len,
356 struct eth_end_agg_rx_cqe *cqe)
357{
358 struct bnx2x *bp = fp->bp;
359 u16 last_max, last_elem, first_elem;
360 u16 delta = 0;
361 u16 i;
362
363 if (!sge_len)
364 return;
365
366
367 for (i = 0; i < sge_len; i++)
368 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
369 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
370
371 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
372 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
373
374
375 prefetch((void *)(fp->sge_mask));
376 bnx2x_update_last_max_sge(fp,
377 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
378
379 last_max = RX_SGE(fp->last_max_sge);
380 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
382
383
384 if (last_elem + 1 != first_elem)
385 last_elem++;
386
387
388 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
389 if (likely(fp->sge_mask[i]))
390 break;
391
392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
393 delta += BIT_VEC64_ELEM_SZ;
394 }
395
396 if (delta > 0) {
397 fp->rx_sge_prod += delta;
398
399 bnx2x_clear_sge_mask_next_elems(fp);
400 }
401
402 DP(NETIF_MSG_RX_STATUS,
403 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
404 fp->last_max_sge, fp->rx_sge_prod);
405}
406
407
408
409
410static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
411 const struct eth_fast_path_rx_cqe *cqe,
412 enum pkt_hash_types *rxhash_type)
413{
414
415 if ((bp->dev->features & NETIF_F_RXHASH) &&
416 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417 enum eth_rss_hash_type htype;
418
419 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
420 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
421 (htype == TCP_IPV6_HASH_TYPE)) ?
422 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
423
424 return le32_to_cpu(cqe->rss_hash_result);
425 }
426 *rxhash_type = PKT_HASH_TYPE_NONE;
427 return 0;
428}
429
430static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
431 u16 cons, u16 prod,
432 struct eth_fast_path_rx_cqe *cqe)
433{
434 struct bnx2x *bp = fp->bp;
435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
438 dma_addr_t mapping;
439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
440 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
441
442
443 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
444 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
445
446
447 mapping = dma_map_single(&bp->pdev->dev,
448 first_buf->data + NET_SKB_PAD,
449 fp->rx_buf_size, DMA_FROM_DEVICE);
450
451
452
453
454
455
456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
457
458 bnx2x_reuse_rx_data(fp, cons, prod);
459 tpa_info->tpa_state = BNX2X_TPA_ERROR;
460 return;
461 }
462
463
464 prod_rx_buf->data = first_buf->data;
465 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
466
467 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
468 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
469
470
471 *first_buf = *cons_rx_buf;
472
473
474 tpa_info->parsing_flags =
475 le16_to_cpu(cqe->pars_flags.flags);
476 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
477 tpa_info->tpa_state = BNX2X_TPA_START;
478 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
479 tpa_info->placement_offset = cqe->placement_offset;
480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
481 if (fp->mode == TPA_MODE_GRO) {
482 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
483 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
484 tpa_info->gro_size = gro_size;
485 }
486
487#ifdef BNX2X_STOP_ON_ERROR
488 fp->tpa_queue_used |= (1 << queue);
489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
490 fp->tpa_queue_used);
491#endif
492}
493
494
495
496
497
498#define TPA_TSTAMP_OPT_LEN 12
499
500
501
502
503
504
505
506
507
508
509
510
511
512static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
513 u16 len_on_bd, unsigned int pkt_len,
514 u16 num_of_coalesced_segs)
515{
516
517
518
519 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
520
521 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
522 PRS_FLAG_OVERETH_IPV6) {
523 hdrs_len += sizeof(struct ipv6hdr);
524 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
525 } else {
526 hdrs_len += sizeof(struct iphdr);
527 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
528 }
529
530
531
532
533
534
535 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
536 hdrs_len += TPA_TSTAMP_OPT_LEN;
537
538 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
539
540
541
542
543 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
544}
545
546static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
547 u16 index, gfp_t gfp_mask)
548{
549 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
550 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
551 struct bnx2x_alloc_pool *pool = &fp->page_pool;
552 dma_addr_t mapping;
553
554 if (!pool->page) {
555 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
556 if (unlikely(!pool->page))
557 return -ENOMEM;
558
559 pool->offset = 0;
560 }
561
562 mapping = dma_map_page(&bp->pdev->dev, pool->page,
563 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
564 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
565 BNX2X_ERR("Can't map sge\n");
566 return -ENOMEM;
567 }
568
569 sw_buf->page = pool->page;
570 sw_buf->offset = pool->offset;
571
572 dma_unmap_addr_set(sw_buf, mapping, mapping);
573
574 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
575 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
576
577 pool->offset += SGE_PAGE_SIZE;
578 if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
579 get_page(pool->page);
580 else
581 pool->page = NULL;
582 return 0;
583}
584
585static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
586 struct bnx2x_agg_info *tpa_info,
587 u16 pages,
588 struct sk_buff *skb,
589 struct eth_end_agg_rx_cqe *cqe,
590 u16 cqe_idx)
591{
592 struct sw_rx_page *rx_pg, old_rx_pg;
593 u32 i, frag_len, frag_size;
594 int err, j, frag_id = 0;
595 u16 len_on_bd = tpa_info->len_on_bd;
596 u16 full_page = 0, gro_size = 0;
597
598 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
599
600 if (fp->mode == TPA_MODE_GRO) {
601 gro_size = tpa_info->gro_size;
602 full_page = tpa_info->full_page;
603 }
604
605
606 if (frag_size)
607 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
608 le16_to_cpu(cqe->pkt_len),
609 le16_to_cpu(cqe->num_of_coalesced_segs));
610
611#ifdef BNX2X_STOP_ON_ERROR
612 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
613 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
614 pages, cqe_idx);
615 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
616 bnx2x_panic();
617 return -EINVAL;
618 }
619#endif
620
621
622 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
623 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
624
625
626
627 if (fp->mode == TPA_MODE_GRO)
628 frag_len = min_t(u32, frag_size, (u32)full_page);
629 else
630 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
631
632 rx_pg = &fp->rx_page_ring[sge_idx];
633 old_rx_pg = *rx_pg;
634
635
636
637 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
638 if (unlikely(err)) {
639 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
640 return err;
641 }
642
643 dma_unmap_page(&bp->pdev->dev,
644 dma_unmap_addr(&old_rx_pg, mapping),
645 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
646
647 if (fp->mode == TPA_MODE_LRO)
648 skb_fill_page_desc(skb, j, old_rx_pg.page,
649 old_rx_pg.offset, frag_len);
650 else {
651 int rem;
652 int offset = 0;
653 for (rem = frag_len; rem > 0; rem -= gro_size) {
654 int len = rem > gro_size ? gro_size : rem;
655 skb_fill_page_desc(skb, frag_id++,
656 old_rx_pg.page,
657 old_rx_pg.offset + offset,
658 len);
659 if (offset)
660 get_page(old_rx_pg.page);
661 offset += len;
662 }
663 }
664
665 skb->data_len += frag_len;
666 skb->truesize += SGE_PAGES;
667 skb->len += frag_len;
668
669 frag_size -= frag_len;
670 }
671
672 return 0;
673}
674
675static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
676{
677 if (fp->rx_frag_size)
678 skb_free_frag(data);
679 else
680 kfree(data);
681}
682
683static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
684{
685 if (fp->rx_frag_size) {
686
687 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
688 return (void *)__get_free_page(gfp_mask);
689
690 return napi_alloc_frag(fp->rx_frag_size);
691 }
692
693 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
694}
695
696#ifdef CONFIG_INET
697static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
698{
699 const struct iphdr *iph = ip_hdr(skb);
700 struct tcphdr *th;
701
702 skb_set_transport_header(skb, sizeof(struct iphdr));
703 th = tcp_hdr(skb);
704
705 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
706 iph->saddr, iph->daddr, 0);
707}
708
709static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
710{
711 struct ipv6hdr *iph = ipv6_hdr(skb);
712 struct tcphdr *th;
713
714 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
715 th = tcp_hdr(skb);
716
717 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
718 &iph->saddr, &iph->daddr, 0);
719}
720
721static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
722 void (*gro_func)(struct bnx2x*, struct sk_buff*))
723{
724 skb_reset_network_header(skb);
725 gro_func(bp, skb);
726 tcp_gro_complete(skb);
727}
728#endif
729
730static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
731 struct sk_buff *skb)
732{
733#ifdef CONFIG_INET
734 if (skb_shinfo(skb)->gso_size) {
735 switch (be16_to_cpu(skb->protocol)) {
736 case ETH_P_IP:
737 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
738 break;
739 case ETH_P_IPV6:
740 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
741 break;
742 default:
743 netdev_WARN_ONCE(bp->dev,
744 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
745 be16_to_cpu(skb->protocol));
746 }
747 }
748#endif
749 skb_record_rx_queue(skb, fp->rx_queue);
750 napi_gro_receive(&fp->napi, skb);
751}
752
753static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
754 struct bnx2x_agg_info *tpa_info,
755 u16 pages,
756 struct eth_end_agg_rx_cqe *cqe,
757 u16 cqe_idx)
758{
759 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
760 u8 pad = tpa_info->placement_offset;
761 u16 len = tpa_info->len_on_bd;
762 struct sk_buff *skb = NULL;
763 u8 *new_data, *data = rx_buf->data;
764 u8 old_tpa_state = tpa_info->tpa_state;
765
766 tpa_info->tpa_state = BNX2X_TPA_STOP;
767
768
769
770
771 if (old_tpa_state == BNX2X_TPA_ERROR)
772 goto drop;
773
774
775 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
776
777
778
779 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
780 fp->rx_buf_size, DMA_FROM_DEVICE);
781 if (likely(new_data))
782 skb = build_skb(data, fp->rx_frag_size);
783
784 if (likely(skb)) {
785#ifdef BNX2X_STOP_ON_ERROR
786 if (pad + len > fp->rx_buf_size) {
787 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
788 pad, len, fp->rx_buf_size);
789 bnx2x_panic();
790 return;
791 }
792#endif
793
794 skb_reserve(skb, pad + NET_SKB_PAD);
795 skb_put(skb, len);
796 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
797
798 skb->protocol = eth_type_trans(skb, bp->dev);
799 skb->ip_summed = CHECKSUM_UNNECESSARY;
800
801 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
802 skb, cqe, cqe_idx)) {
803 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
804 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
805 bnx2x_gro_receive(bp, fp, skb);
806 } else {
807 DP(NETIF_MSG_RX_STATUS,
808 "Failed to allocate new pages - dropping packet!\n");
809 dev_kfree_skb_any(skb);
810 }
811
812
813 rx_buf->data = new_data;
814
815 return;
816 }
817 if (new_data)
818 bnx2x_frag_free(fp, new_data);
819drop:
820
821 DP(NETIF_MSG_RX_STATUS,
822 "Failed to allocate or map a new skb - dropping packet!\n");
823 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
824}
825
826static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 index, gfp_t gfp_mask)
828{
829 u8 *data;
830 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
831 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
832 dma_addr_t mapping;
833
834 data = bnx2x_frag_alloc(fp, gfp_mask);
835 if (unlikely(data == NULL))
836 return -ENOMEM;
837
838 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
839 fp->rx_buf_size,
840 DMA_FROM_DEVICE);
841 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
842 bnx2x_frag_free(fp, data);
843 BNX2X_ERR("Can't map rx data\n");
844 return -ENOMEM;
845 }
846
847 rx_buf->data = data;
848 dma_unmap_addr_set(rx_buf, mapping, mapping);
849
850 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
851 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
852
853 return 0;
854}
855
856static
857void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
858 struct bnx2x_fastpath *fp,
859 struct bnx2x_eth_q_stats *qstats)
860{
861
862
863
864
865
866 if (cqe->fast_path_cqe.status_flags &
867 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
868 return;
869
870
871
872 if (cqe->fast_path_cqe.type_error_flags &
873 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
874 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
875 qstats->hw_csum_err++;
876 else
877 skb->ip_summed = CHECKSUM_UNNECESSARY;
878}
879
880static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
881{
882 struct bnx2x *bp = fp->bp;
883 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
884 u16 sw_comp_cons, sw_comp_prod;
885 int rx_pkt = 0;
886 union eth_rx_cqe *cqe;
887 struct eth_fast_path_rx_cqe *cqe_fp;
888
889#ifdef BNX2X_STOP_ON_ERROR
890 if (unlikely(bp->panic))
891 return 0;
892#endif
893 if (budget <= 0)
894 return rx_pkt;
895
896 bd_cons = fp->rx_bd_cons;
897 bd_prod = fp->rx_bd_prod;
898 bd_prod_fw = bd_prod;
899 sw_comp_cons = fp->rx_comp_cons;
900 sw_comp_prod = fp->rx_comp_prod;
901
902 comp_ring_cons = RCQ_BD(sw_comp_cons);
903 cqe = &fp->rx_comp_ring[comp_ring_cons];
904 cqe_fp = &cqe->fast_path_cqe;
905
906 DP(NETIF_MSG_RX_STATUS,
907 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
908
909 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
910 struct sw_rx_bd *rx_buf = NULL;
911 struct sk_buff *skb;
912 u8 cqe_fp_flags;
913 enum eth_rx_cqe_type cqe_fp_type;
914 u16 len, pad, queue;
915 u8 *data;
916 u32 rxhash;
917 enum pkt_hash_types rxhash_type;
918
919#ifdef BNX2X_STOP_ON_ERROR
920 if (unlikely(bp->panic))
921 return 0;
922#endif
923
924 bd_prod = RX_BD(bd_prod);
925 bd_cons = RX_BD(bd_cons);
926
927
928
929
930
931
932
933
934
935
936
937 rmb();
938
939 cqe_fp_flags = cqe_fp->type_error_flags;
940 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
941
942 DP(NETIF_MSG_RX_STATUS,
943 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
944 CQE_TYPE(cqe_fp_flags),
945 cqe_fp_flags, cqe_fp->status_flags,
946 le32_to_cpu(cqe_fp->rss_hash_result),
947 le16_to_cpu(cqe_fp->vlan_tag),
948 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
949
950
951 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
952 bnx2x_sp_event(fp, cqe);
953 goto next_cqe;
954 }
955
956 rx_buf = &fp->rx_buf_ring[bd_cons];
957 data = rx_buf->data;
958
959 if (!CQE_TYPE_FAST(cqe_fp_type)) {
960 struct bnx2x_agg_info *tpa_info;
961 u16 frag_size, pages;
962#ifdef BNX2X_STOP_ON_ERROR
963
964 if (fp->mode == TPA_MODE_DISABLED &&
965 (CQE_TYPE_START(cqe_fp_type) ||
966 CQE_TYPE_STOP(cqe_fp_type)))
967 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
968 CQE_TYPE(cqe_fp_type));
969#endif
970
971 if (CQE_TYPE_START(cqe_fp_type)) {
972 u16 queue = cqe_fp->queue_index;
973 DP(NETIF_MSG_RX_STATUS,
974 "calling tpa_start on queue %d\n",
975 queue);
976
977 bnx2x_tpa_start(fp, queue,
978 bd_cons, bd_prod,
979 cqe_fp);
980
981 goto next_rx;
982 }
983 queue = cqe->end_agg_cqe.queue_index;
984 tpa_info = &fp->tpa_info[queue];
985 DP(NETIF_MSG_RX_STATUS,
986 "calling tpa_stop on queue %d\n",
987 queue);
988
989 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
990 tpa_info->len_on_bd;
991
992 if (fp->mode == TPA_MODE_GRO)
993 pages = (frag_size + tpa_info->full_page - 1) /
994 tpa_info->full_page;
995 else
996 pages = SGE_PAGE_ALIGN(frag_size) >>
997 SGE_PAGE_SHIFT;
998
999 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1000 &cqe->end_agg_cqe, comp_ring_cons);
1001#ifdef BNX2X_STOP_ON_ERROR
1002 if (bp->panic)
1003 return 0;
1004#endif
1005
1006 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1007 goto next_cqe;
1008 }
1009
1010 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1011 pad = cqe_fp->placement_offset;
1012 dma_sync_single_for_cpu(&bp->pdev->dev,
1013 dma_unmap_addr(rx_buf, mapping),
1014 pad + RX_COPY_THRESH,
1015 DMA_FROM_DEVICE);
1016 pad += NET_SKB_PAD;
1017 prefetch(data + pad);
1018
1019 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1020 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1021 "ERROR flags %x rx packet %u\n",
1022 cqe_fp_flags, sw_comp_cons);
1023 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1024 goto reuse_rx;
1025 }
1026
1027
1028
1029
1030 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1031 (len <= RX_COPY_THRESH)) {
1032 skb = napi_alloc_skb(&fp->napi, len);
1033 if (skb == NULL) {
1034 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1035 "ERROR packet dropped because of alloc failure\n");
1036 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1037 goto reuse_rx;
1038 }
1039 memcpy(skb->data, data + pad, len);
1040 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1041 } else {
1042 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1043 GFP_ATOMIC) == 0)) {
1044 dma_unmap_single(&bp->pdev->dev,
1045 dma_unmap_addr(rx_buf, mapping),
1046 fp->rx_buf_size,
1047 DMA_FROM_DEVICE);
1048 skb = build_skb(data, fp->rx_frag_size);
1049 if (unlikely(!skb)) {
1050 bnx2x_frag_free(fp, data);
1051 bnx2x_fp_qstats(bp, fp)->
1052 rx_skb_alloc_failed++;
1053 goto next_rx;
1054 }
1055 skb_reserve(skb, pad);
1056 } else {
1057 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1058 "ERROR packet dropped because of alloc failure\n");
1059 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1060reuse_rx:
1061 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1062 goto next_rx;
1063 }
1064 }
1065
1066 skb_put(skb, len);
1067 skb->protocol = eth_type_trans(skb, bp->dev);
1068
1069
1070 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1071 skb_set_hash(skb, rxhash, rxhash_type);
1072
1073 skb_checksum_none_assert(skb);
1074
1075 if (bp->dev->features & NETIF_F_RXCSUM)
1076 bnx2x_csum_validate(skb, cqe, fp,
1077 bnx2x_fp_qstats(bp, fp));
1078
1079 skb_record_rx_queue(skb, fp->rx_queue);
1080
1081
1082 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1083 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1084 bnx2x_set_rx_ts(bp, skb);
1085
1086 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1087 PARSING_FLAGS_VLAN)
1088 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1089 le16_to_cpu(cqe_fp->vlan_tag));
1090
1091 napi_gro_receive(&fp->napi, skb);
1092next_rx:
1093 rx_buf->data = NULL;
1094
1095 bd_cons = NEXT_RX_IDX(bd_cons);
1096 bd_prod = NEXT_RX_IDX(bd_prod);
1097 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1098 rx_pkt++;
1099next_cqe:
1100 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1101 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1102
1103
1104 BNX2X_SEED_CQE(cqe_fp);
1105
1106 if (rx_pkt == budget)
1107 break;
1108
1109 comp_ring_cons = RCQ_BD(sw_comp_cons);
1110 cqe = &fp->rx_comp_ring[comp_ring_cons];
1111 cqe_fp = &cqe->fast_path_cqe;
1112 }
1113
1114 fp->rx_bd_cons = bd_cons;
1115 fp->rx_bd_prod = bd_prod_fw;
1116 fp->rx_comp_cons = sw_comp_cons;
1117 fp->rx_comp_prod = sw_comp_prod;
1118
1119
1120 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1121 fp->rx_sge_prod);
1122
1123 return rx_pkt;
1124}
1125
1126static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1127{
1128 struct bnx2x_fastpath *fp = fp_cookie;
1129 struct bnx2x *bp = fp->bp;
1130 u8 cos;
1131
1132 DP(NETIF_MSG_INTR,
1133 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1134 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1135
1136 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1137
1138#ifdef BNX2X_STOP_ON_ERROR
1139 if (unlikely(bp->panic))
1140 return IRQ_HANDLED;
1141#endif
1142
1143
1144 for_each_cos_in_tx_queue(fp, cos)
1145 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1146
1147 prefetch(&fp->sb_running_index[SM_RX_ID]);
1148 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1149
1150 return IRQ_HANDLED;
1151}
1152
1153
1154void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1155{
1156 mutex_lock(&bp->port.phy_mutex);
1157
1158 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1159}
1160
1161void bnx2x_release_phy_lock(struct bnx2x *bp)
1162{
1163 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1164
1165 mutex_unlock(&bp->port.phy_mutex);
1166}
1167
1168
1169u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1170{
1171 u16 line_speed = bp->link_vars.line_speed;
1172 if (IS_MF(bp)) {
1173 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1174 bp->mf_config[BP_VN(bp)]);
1175
1176
1177
1178
1179 if (IS_MF_PERCENT_BW(bp))
1180 line_speed = (line_speed * maxCfg) / 100;
1181 else {
1182 u16 vn_max_rate = maxCfg * 100;
1183
1184 if (vn_max_rate < line_speed)
1185 line_speed = vn_max_rate;
1186 }
1187 }
1188
1189 return line_speed;
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200static void bnx2x_fill_report_data(struct bnx2x *bp,
1201 struct bnx2x_link_report_data *data)
1202{
1203 memset(data, 0, sizeof(*data));
1204
1205 if (IS_PF(bp)) {
1206
1207 data->line_speed = bnx2x_get_mf_speed(bp);
1208
1209
1210 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1211 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1212 &data->link_report_flags);
1213
1214 if (!BNX2X_NUM_ETH_QUEUES(bp))
1215 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1216 &data->link_report_flags);
1217
1218
1219 if (bp->link_vars.duplex == DUPLEX_FULL)
1220 __set_bit(BNX2X_LINK_REPORT_FD,
1221 &data->link_report_flags);
1222
1223
1224 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1225 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1226 &data->link_report_flags);
1227
1228
1229 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1230 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1231 &data->link_report_flags);
1232 } else {
1233 *data = bp->vf_link_vars;
1234 }
1235}
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247void bnx2x_link_report(struct bnx2x *bp)
1248{
1249 bnx2x_acquire_phy_lock(bp);
1250 __bnx2x_link_report(bp);
1251 bnx2x_release_phy_lock(bp);
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262void __bnx2x_link_report(struct bnx2x *bp)
1263{
1264 struct bnx2x_link_report_data cur_data;
1265
1266 if (bp->force_link_down) {
1267 bp->link_vars.link_up = 0;
1268 return;
1269 }
1270
1271
1272 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1273 bnx2x_read_mf_cfg(bp);
1274
1275
1276 bnx2x_fill_report_data(bp, &cur_data);
1277
1278
1279 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1280 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1281 &bp->last_reported_link.link_report_flags) &&
1282 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1283 &cur_data.link_report_flags)))
1284 return;
1285
1286 bp->link_cnt++;
1287
1288
1289
1290
1291 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1292
1293
1294 if (IS_PF(bp))
1295 bnx2x_iov_link_update(bp);
1296
1297 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1298 &cur_data.link_report_flags)) {
1299 netif_carrier_off(bp->dev);
1300 netdev_err(bp->dev, "NIC Link is Down\n");
1301 return;
1302 } else {
1303 const char *duplex;
1304 const char *flow;
1305
1306 netif_carrier_on(bp->dev);
1307
1308 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1309 &cur_data.link_report_flags))
1310 duplex = "full";
1311 else
1312 duplex = "half";
1313
1314
1315
1316
1317
1318 if (cur_data.link_report_flags) {
1319 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1320 &cur_data.link_report_flags)) {
1321 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1322 &cur_data.link_report_flags))
1323 flow = "ON - receive & transmit";
1324 else
1325 flow = "ON - receive";
1326 } else {
1327 flow = "ON - transmit";
1328 }
1329 } else {
1330 flow = "none";
1331 }
1332 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1333 cur_data.line_speed, duplex, flow);
1334 }
1335}
1336
1337static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1338{
1339 int i;
1340
1341 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1342 struct eth_rx_sge *sge;
1343
1344 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1345 sge->addr_hi =
1346 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1347 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1348
1349 sge->addr_lo =
1350 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1351 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1352 }
1353}
1354
1355static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1356 struct bnx2x_fastpath *fp, int last)
1357{
1358 int i;
1359
1360 for (i = 0; i < last; i++) {
1361 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1362 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1363 u8 *data = first_buf->data;
1364
1365 if (data == NULL) {
1366 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1367 continue;
1368 }
1369 if (tpa_info->tpa_state == BNX2X_TPA_START)
1370 dma_unmap_single(&bp->pdev->dev,
1371 dma_unmap_addr(first_buf, mapping),
1372 fp->rx_buf_size, DMA_FROM_DEVICE);
1373 bnx2x_frag_free(fp, data);
1374 first_buf->data = NULL;
1375 }
1376}
1377
1378void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1379{
1380 int j;
1381
1382 for_each_rx_queue_cnic(bp, j) {
1383 struct bnx2x_fastpath *fp = &bp->fp[j];
1384
1385 fp->rx_bd_cons = 0;
1386
1387
1388
1389
1390
1391
1392 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1393 fp->rx_sge_prod);
1394 }
1395}
1396
1397void bnx2x_init_rx_rings(struct bnx2x *bp)
1398{
1399 int func = BP_FUNC(bp);
1400 u16 ring_prod;
1401 int i, j;
1402
1403
1404 for_each_eth_queue(bp, j) {
1405 struct bnx2x_fastpath *fp = &bp->fp[j];
1406
1407 DP(NETIF_MSG_IFUP,
1408 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1409
1410 if (fp->mode != TPA_MODE_DISABLED) {
1411
1412 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1413 struct bnx2x_agg_info *tpa_info =
1414 &fp->tpa_info[i];
1415 struct sw_rx_bd *first_buf =
1416 &tpa_info->first_buf;
1417
1418 first_buf->data =
1419 bnx2x_frag_alloc(fp, GFP_KERNEL);
1420 if (!first_buf->data) {
1421 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1422 j);
1423 bnx2x_free_tpa_pool(bp, fp, i);
1424 fp->mode = TPA_MODE_DISABLED;
1425 break;
1426 }
1427 dma_unmap_addr_set(first_buf, mapping, 0);
1428 tpa_info->tpa_state = BNX2X_TPA_STOP;
1429 }
1430
1431
1432 bnx2x_set_next_page_sgl(fp);
1433
1434
1435 bnx2x_init_sge_ring_bit_mask(fp);
1436
1437
1438 for (i = 0, ring_prod = 0;
1439 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1440
1441 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1442 GFP_KERNEL) < 0) {
1443 BNX2X_ERR("was only able to allocate %d rx sges\n",
1444 i);
1445 BNX2X_ERR("disabling TPA for queue[%d]\n",
1446 j);
1447
1448 bnx2x_free_rx_sge_range(bp, fp,
1449 ring_prod);
1450 bnx2x_free_tpa_pool(bp, fp,
1451 MAX_AGG_QS(bp));
1452 fp->mode = TPA_MODE_DISABLED;
1453 ring_prod = 0;
1454 break;
1455 }
1456 ring_prod = NEXT_SGE_IDX(ring_prod);
1457 }
1458
1459 fp->rx_sge_prod = ring_prod;
1460 }
1461 }
1462
1463 for_each_eth_queue(bp, j) {
1464 struct bnx2x_fastpath *fp = &bp->fp[j];
1465
1466 fp->rx_bd_cons = 0;
1467
1468
1469
1470
1471
1472
1473 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1474 fp->rx_sge_prod);
1475
1476 if (j != 0)
1477 continue;
1478
1479 if (CHIP_IS_E1(bp)) {
1480 REG_WR(bp, BAR_USTRORM_INTMEM +
1481 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1482 U64_LO(fp->rx_comp_mapping));
1483 REG_WR(bp, BAR_USTRORM_INTMEM +
1484 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1485 U64_HI(fp->rx_comp_mapping));
1486 }
1487 }
1488}
1489
1490static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1491{
1492 u8 cos;
1493 struct bnx2x *bp = fp->bp;
1494
1495 for_each_cos_in_tx_queue(fp, cos) {
1496 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1497 unsigned pkts_compl = 0, bytes_compl = 0;
1498
1499 u16 sw_prod = txdata->tx_pkt_prod;
1500 u16 sw_cons = txdata->tx_pkt_cons;
1501
1502 while (sw_cons != sw_prod) {
1503 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1504 &pkts_compl, &bytes_compl);
1505 sw_cons++;
1506 }
1507
1508 netdev_tx_reset_queue(
1509 netdev_get_tx_queue(bp->dev,
1510 txdata->txq_index));
1511 }
1512}
1513
1514static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1515{
1516 int i;
1517
1518 for_each_tx_queue_cnic(bp, i) {
1519 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1520 }
1521}
1522
1523static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1524{
1525 int i;
1526
1527 for_each_eth_queue(bp, i) {
1528 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1529 }
1530}
1531
1532static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1533{
1534 struct bnx2x *bp = fp->bp;
1535 int i;
1536
1537
1538 if (fp->rx_buf_ring == NULL)
1539 return;
1540
1541 for (i = 0; i < NUM_RX_BD; i++) {
1542 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1543 u8 *data = rx_buf->data;
1544
1545 if (data == NULL)
1546 continue;
1547 dma_unmap_single(&bp->pdev->dev,
1548 dma_unmap_addr(rx_buf, mapping),
1549 fp->rx_buf_size, DMA_FROM_DEVICE);
1550
1551 rx_buf->data = NULL;
1552 bnx2x_frag_free(fp, data);
1553 }
1554}
1555
1556static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1557{
1558 int j;
1559
1560 for_each_rx_queue_cnic(bp, j) {
1561 bnx2x_free_rx_bds(&bp->fp[j]);
1562 }
1563}
1564
1565static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1566{
1567 int j;
1568
1569 for_each_eth_queue(bp, j) {
1570 struct bnx2x_fastpath *fp = &bp->fp[j];
1571
1572 bnx2x_free_rx_bds(fp);
1573
1574 if (fp->mode != TPA_MODE_DISABLED)
1575 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1576 }
1577}
1578
1579static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1580{
1581 bnx2x_free_tx_skbs_cnic(bp);
1582 bnx2x_free_rx_skbs_cnic(bp);
1583}
1584
1585void bnx2x_free_skbs(struct bnx2x *bp)
1586{
1587 bnx2x_free_tx_skbs(bp);
1588 bnx2x_free_rx_skbs(bp);
1589}
1590
1591void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1592{
1593
1594 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1595
1596 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1597
1598 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1599
1600
1601 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1602 & FUNC_MF_CFG_MAX_BW_MASK;
1603
1604 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1605 }
1606}
1607
1608
1609
1610
1611
1612
1613
1614static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1615{
1616 int i, offset = 0;
1617
1618 if (nvecs == offset)
1619 return;
1620
1621
1622 if (IS_PF(bp)) {
1623 free_irq(bp->msix_table[offset].vector, bp->dev);
1624 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1625 bp->msix_table[offset].vector);
1626 offset++;
1627 }
1628
1629 if (CNIC_SUPPORT(bp)) {
1630 if (nvecs == offset)
1631 return;
1632 offset++;
1633 }
1634
1635 for_each_eth_queue(bp, i) {
1636 if (nvecs == offset)
1637 return;
1638 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1639 i, bp->msix_table[offset].vector);
1640
1641 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1642 }
1643}
1644
1645void bnx2x_free_irq(struct bnx2x *bp)
1646{
1647 if (bp->flags & USING_MSIX_FLAG &&
1648 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1649 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1650
1651
1652 if (IS_PF(bp))
1653 nvecs++;
1654
1655 bnx2x_free_msix_irqs(bp, nvecs);
1656 } else {
1657 free_irq(bp->dev->irq, bp->dev);
1658 }
1659}
1660
1661int bnx2x_enable_msix(struct bnx2x *bp)
1662{
1663 int msix_vec = 0, i, rc;
1664
1665
1666 if (IS_PF(bp)) {
1667 bp->msix_table[msix_vec].entry = msix_vec;
1668 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1669 bp->msix_table[0].entry);
1670 msix_vec++;
1671 }
1672
1673
1674 if (CNIC_SUPPORT(bp)) {
1675 bp->msix_table[msix_vec].entry = msix_vec;
1676 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1677 msix_vec, bp->msix_table[msix_vec].entry);
1678 msix_vec++;
1679 }
1680
1681
1682 for_each_eth_queue(bp, i) {
1683 bp->msix_table[msix_vec].entry = msix_vec;
1684 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1685 msix_vec, msix_vec, i);
1686 msix_vec++;
1687 }
1688
1689 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1690 msix_vec);
1691
1692 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1693 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1694
1695
1696
1697
1698 if (rc == -ENOSPC) {
1699
1700 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1701 if (rc < 0) {
1702 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1703 rc);
1704 goto no_msix;
1705 }
1706
1707 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1708 bp->flags |= USING_SINGLE_MSIX_FLAG;
1709
1710 BNX2X_DEV_INFO("set number of queues to 1\n");
1711 bp->num_ethernet_queues = 1;
1712 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1713 } else if (rc < 0) {
1714 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1715 goto no_msix;
1716 } else if (rc < msix_vec) {
1717
1718 int diff = msix_vec - rc;
1719
1720 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1721
1722
1723
1724
1725 bp->num_ethernet_queues -= diff;
1726 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1727
1728 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1729 bp->num_queues);
1730 }
1731
1732 bp->flags |= USING_MSIX_FLAG;
1733
1734 return 0;
1735
1736no_msix:
1737
1738 if (rc == -ENOMEM)
1739 bp->flags |= DISABLE_MSI_FLAG;
1740
1741 return rc;
1742}
1743
1744static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1745{
1746 int i, rc, offset = 0;
1747
1748
1749 if (IS_PF(bp)) {
1750 rc = request_irq(bp->msix_table[offset++].vector,
1751 bnx2x_msix_sp_int, 0,
1752 bp->dev->name, bp->dev);
1753 if (rc) {
1754 BNX2X_ERR("request sp irq failed\n");
1755 return -EBUSY;
1756 }
1757 }
1758
1759 if (CNIC_SUPPORT(bp))
1760 offset++;
1761
1762 for_each_eth_queue(bp, i) {
1763 struct bnx2x_fastpath *fp = &bp->fp[i];
1764 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1765 bp->dev->name, i);
1766
1767 rc = request_irq(bp->msix_table[offset].vector,
1768 bnx2x_msix_fp_int, 0, fp->name, fp);
1769 if (rc) {
1770 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1771 bp->msix_table[offset].vector, rc);
1772 bnx2x_free_msix_irqs(bp, offset);
1773 return -EBUSY;
1774 }
1775
1776 offset++;
1777 }
1778
1779 i = BNX2X_NUM_ETH_QUEUES(bp);
1780 if (IS_PF(bp)) {
1781 offset = 1 + CNIC_SUPPORT(bp);
1782 netdev_info(bp->dev,
1783 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1784 bp->msix_table[0].vector,
1785 0, bp->msix_table[offset].vector,
1786 i - 1, bp->msix_table[offset + i - 1].vector);
1787 } else {
1788 offset = CNIC_SUPPORT(bp);
1789 netdev_info(bp->dev,
1790 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1791 0, bp->msix_table[offset].vector,
1792 i - 1, bp->msix_table[offset + i - 1].vector);
1793 }
1794 return 0;
1795}
1796
1797int bnx2x_enable_msi(struct bnx2x *bp)
1798{
1799 int rc;
1800
1801 rc = pci_enable_msi(bp->pdev);
1802 if (rc) {
1803 BNX2X_DEV_INFO("MSI is not attainable\n");
1804 return -1;
1805 }
1806 bp->flags |= USING_MSI_FLAG;
1807
1808 return 0;
1809}
1810
1811static int bnx2x_req_irq(struct bnx2x *bp)
1812{
1813 unsigned long flags;
1814 unsigned int irq;
1815
1816 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1817 flags = 0;
1818 else
1819 flags = IRQF_SHARED;
1820
1821 if (bp->flags & USING_MSIX_FLAG)
1822 irq = bp->msix_table[0].vector;
1823 else
1824 irq = bp->pdev->irq;
1825
1826 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1827}
1828
1829static int bnx2x_setup_irqs(struct bnx2x *bp)
1830{
1831 int rc = 0;
1832 if (bp->flags & USING_MSIX_FLAG &&
1833 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1834 rc = bnx2x_req_msix_irqs(bp);
1835 if (rc)
1836 return rc;
1837 } else {
1838 rc = bnx2x_req_irq(bp);
1839 if (rc) {
1840 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1841 return rc;
1842 }
1843 if (bp->flags & USING_MSI_FLAG) {
1844 bp->dev->irq = bp->pdev->irq;
1845 netdev_info(bp->dev, "using MSI IRQ %d\n",
1846 bp->dev->irq);
1847 }
1848 if (bp->flags & USING_MSIX_FLAG) {
1849 bp->dev->irq = bp->msix_table[0].vector;
1850 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1851 bp->dev->irq);
1852 }
1853 }
1854
1855 return 0;
1856}
1857
1858static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1859{
1860 int i;
1861
1862 for_each_rx_queue_cnic(bp, i) {
1863 napi_enable(&bnx2x_fp(bp, i, napi));
1864 }
1865}
1866
1867static void bnx2x_napi_enable(struct bnx2x *bp)
1868{
1869 int i;
1870
1871 for_each_eth_queue(bp, i) {
1872 napi_enable(&bnx2x_fp(bp, i, napi));
1873 }
1874}
1875
1876static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1877{
1878 int i;
1879
1880 for_each_rx_queue_cnic(bp, i) {
1881 napi_disable(&bnx2x_fp(bp, i, napi));
1882 }
1883}
1884
1885static void bnx2x_napi_disable(struct bnx2x *bp)
1886{
1887 int i;
1888
1889 for_each_eth_queue(bp, i) {
1890 napi_disable(&bnx2x_fp(bp, i, napi));
1891 }
1892}
1893
1894void bnx2x_netif_start(struct bnx2x *bp)
1895{
1896 if (netif_running(bp->dev)) {
1897 bnx2x_napi_enable(bp);
1898 if (CNIC_LOADED(bp))
1899 bnx2x_napi_enable_cnic(bp);
1900 bnx2x_int_enable(bp);
1901 if (bp->state == BNX2X_STATE_OPEN)
1902 netif_tx_wake_all_queues(bp->dev);
1903 }
1904}
1905
1906void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1907{
1908 bnx2x_int_disable_sync(bp, disable_hw);
1909 bnx2x_napi_disable(bp);
1910 if (CNIC_LOADED(bp))
1911 bnx2x_napi_disable_cnic(bp);
1912}
1913
1914u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1915 struct net_device *sb_dev)
1916{
1917 struct bnx2x *bp = netdev_priv(dev);
1918
1919 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1920 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1921 u16 ether_type = ntohs(hdr->h_proto);
1922
1923
1924 if (ether_type == ETH_P_8021Q) {
1925 struct vlan_ethhdr *vhdr =
1926 (struct vlan_ethhdr *)skb->data;
1927
1928 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1929 }
1930
1931
1932 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1933 return bnx2x_fcoe_tx(bp, txq_index);
1934 }
1935
1936
1937 return netdev_pick_tx(dev, skb, NULL) %
1938 (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1939}
1940
1941void bnx2x_set_num_queues(struct bnx2x *bp)
1942{
1943
1944 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1945
1946
1947 if (IS_MF_STORAGE_ONLY(bp))
1948 bp->num_ethernet_queues = 1;
1949
1950
1951 bp->num_cnic_queues = CNIC_SUPPORT(bp);
1952 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1953
1954 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1980{
1981 int rc, tx, rx;
1982
1983 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1984 rx = BNX2X_NUM_ETH_QUEUES(bp);
1985
1986
1987 if (include_cnic && !NO_FCOE(bp)) {
1988 rx++;
1989 tx++;
1990 }
1991
1992 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1993 if (rc) {
1994 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1995 return rc;
1996 }
1997 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1998 if (rc) {
1999 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2000 return rc;
2001 }
2002
2003 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2004 tx, rx);
2005
2006 return rc;
2007}
2008
2009static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2010{
2011 int i;
2012
2013 for_each_queue(bp, i) {
2014 struct bnx2x_fastpath *fp = &bp->fp[i];
2015 u32 mtu;
2016
2017
2018 if (IS_FCOE_IDX(i))
2019
2020
2021
2022
2023
2024
2025 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2026 else
2027 mtu = bp->dev->mtu;
2028 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2029 IP_HEADER_ALIGNMENT_PADDING +
2030 ETH_OVERHEAD +
2031 mtu +
2032 BNX2X_FW_RX_ALIGN_END;
2033 fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2034
2035 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2036 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2037 else
2038 fp->rx_frag_size = 0;
2039 }
2040}
2041
2042static int bnx2x_init_rss(struct bnx2x *bp)
2043{
2044 int i;
2045 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2046
2047
2048
2049
2050 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2051 bp->rss_conf_obj.ind_table[i] =
2052 bp->fp->cl_id +
2053 ethtool_rxfh_indir_default(i, num_eth_queues);
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2064}
2065
2066int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2067 bool config_hash, bool enable)
2068{
2069 struct bnx2x_config_rss_params params = {NULL};
2070
2071
2072
2073
2074
2075
2076
2077
2078 params.rss_obj = rss_obj;
2079
2080 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2081
2082 if (enable) {
2083 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2084
2085
2086 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2087 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2088 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2089 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2090 if (rss_obj->udp_rss_v4)
2091 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2092 if (rss_obj->udp_rss_v6)
2093 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2094
2095 if (!CHIP_IS_E1x(bp)) {
2096
2097 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2098 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2099
2100
2101 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2102 }
2103 } else {
2104 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2105 }
2106
2107
2108 params.rss_result_mask = MULTI_MASK;
2109
2110 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2111
2112 if (config_hash) {
2113
2114 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2115 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2116 }
2117
2118 if (IS_PF(bp))
2119 return bnx2x_config_rss(bp, ¶ms);
2120 else
2121 return bnx2x_vfpf_config_rss(bp, ¶ms);
2122}
2123
2124static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2125{
2126 struct bnx2x_func_state_params func_params = {NULL};
2127
2128
2129 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2130
2131 func_params.f_obj = &bp->func_obj;
2132 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2133
2134 func_params.params.hw_init.load_phase = load_code;
2135
2136 return bnx2x_func_state_change(bp, &func_params);
2137}
2138
2139
2140
2141
2142
2143void bnx2x_squeeze_objects(struct bnx2x *bp)
2144{
2145 int rc;
2146 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2147 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2148 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2149
2150
2151
2152
2153 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2154
2155 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2156
2157
2158 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2159 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2160 &ramrod_flags);
2161 if (rc != 0)
2162 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2163
2164
2165 vlan_mac_flags = 0;
2166 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2167 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2168 &ramrod_flags);
2169 if (rc != 0)
2170 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2171
2172
2173 rparam.mcast_obj = &bp->mcast_obj;
2174 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2175
2176
2177
2178
2179
2180 netif_addr_lock_bh(bp->dev);
2181 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2182 if (rc < 0)
2183 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2184 rc);
2185
2186
2187 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2188 while (rc != 0) {
2189 if (rc < 0) {
2190 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2191 rc);
2192 netif_addr_unlock_bh(bp->dev);
2193 return;
2194 }
2195
2196 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2197 }
2198 netif_addr_unlock_bh(bp->dev);
2199}
2200
2201#ifndef BNX2X_STOP_ON_ERROR
2202#define LOAD_ERROR_EXIT(bp, label) \
2203 do { \
2204 (bp)->state = BNX2X_STATE_ERROR; \
2205 goto label; \
2206 } while (0)
2207
2208#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2209 do { \
2210 bp->cnic_loaded = false; \
2211 goto label; \
2212 } while (0)
2213#else
2214#define LOAD_ERROR_EXIT(bp, label) \
2215 do { \
2216 (bp)->state = BNX2X_STATE_ERROR; \
2217 (bp)->panic = 1; \
2218 return -EBUSY; \
2219 } while (0)
2220#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2221 do { \
2222 bp->cnic_loaded = false; \
2223 (bp)->panic = 1; \
2224 return -EBUSY; \
2225 } while (0)
2226#endif
2227
2228static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2229{
2230 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2231 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2232 return;
2233}
2234
2235static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2236{
2237 int num_groups, vf_headroom = 0;
2238 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2239
2240
2241 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2242
2243
2244
2245
2246
2247
2248 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2249
2250
2251
2252
2253
2254
2255 if (IS_SRIOV(bp))
2256 vf_headroom = bnx2x_vf_headroom(bp);
2257
2258
2259
2260
2261
2262
2263 num_groups =
2264 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2265 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2266 1 : 0));
2267
2268 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2269 bp->fw_stats_num, vf_headroom, num_groups);
2270 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2271 num_groups * sizeof(struct stats_query_cmd_group);
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2282 sizeof(struct per_pf_stats) +
2283 sizeof(struct fcoe_statistics_params) +
2284 sizeof(struct per_queue_stats) * num_queue_stats +
2285 sizeof(struct stats_counter);
2286
2287 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2288 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2289 if (!bp->fw_stats)
2290 goto alloc_mem_err;
2291
2292
2293 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2294 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2295 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2296 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2297 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2298 bp->fw_stats_req_sz;
2299
2300 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2301 U64_HI(bp->fw_stats_req_mapping),
2302 U64_LO(bp->fw_stats_req_mapping));
2303 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2304 U64_HI(bp->fw_stats_data_mapping),
2305 U64_LO(bp->fw_stats_data_mapping));
2306 return 0;
2307
2308alloc_mem_err:
2309 bnx2x_free_fw_stats_mem(bp);
2310 BNX2X_ERR("Can't allocate FW stats memory\n");
2311 return -ENOMEM;
2312}
2313
2314
2315static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2316{
2317 u32 param;
2318
2319
2320 bp->fw_seq =
2321 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2322 DRV_MSG_SEQ_NUMBER_MASK);
2323 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2324
2325
2326 bp->fw_drv_pulse_wr_seq =
2327 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2328 DRV_PULSE_SEQ_MASK);
2329 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2330
2331 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2332
2333 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2334 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2335
2336
2337 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2338
2339
2340 if (!(*load_code)) {
2341 BNX2X_ERR("MCP response failure, aborting\n");
2342 return -EBUSY;
2343 }
2344
2345
2346
2347
2348 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2349 BNX2X_ERR("MCP refused load request, aborting\n");
2350 return -EBUSY;
2351 }
2352 return 0;
2353}
2354
2355
2356
2357
2358
2359int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2360{
2361
2362 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2363 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2364
2365 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2366 (BCM_5710_FW_MINOR_VERSION << 8) +
2367 (BCM_5710_FW_REVISION_VERSION << 16) +
2368 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2369
2370
2371 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2372
2373 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2374 loaded_fw, my_fw);
2375
2376
2377 if (my_fw != loaded_fw) {
2378 if (print_err)
2379 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2380 loaded_fw, my_fw);
2381 else
2382 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2383 loaded_fw, my_fw);
2384 return -EBUSY;
2385 }
2386 }
2387 return 0;
2388}
2389
2390
2391static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2392{
2393 int path = BP_PATH(bp);
2394
2395 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2396 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2397 bnx2x_load_count[path][2]);
2398 bnx2x_load_count[path][0]++;
2399 bnx2x_load_count[path][1 + port]++;
2400 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2401 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2402 bnx2x_load_count[path][2]);
2403 if (bnx2x_load_count[path][0] == 1)
2404 return FW_MSG_CODE_DRV_LOAD_COMMON;
2405 else if (bnx2x_load_count[path][1 + port] == 1)
2406 return FW_MSG_CODE_DRV_LOAD_PORT;
2407 else
2408 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2409}
2410
2411
2412static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2413{
2414 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2415 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2416 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2417 bp->port.pmf = 1;
2418
2419
2420
2421
2422 smp_mb();
2423 } else {
2424 bp->port.pmf = 0;
2425 }
2426
2427 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2428}
2429
2430static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2431{
2432 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2433 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2434 (bp->common.shmem2_base)) {
2435 if (SHMEM2_HAS(bp, dcc_support))
2436 SHMEM2_WR(bp, dcc_support,
2437 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2438 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2439 if (SHMEM2_HAS(bp, afex_driver_support))
2440 SHMEM2_WR(bp, afex_driver_support,
2441 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2442 }
2443
2444
2445 bp->afex_def_vlan_tag = -1;
2446}
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2458{
2459 struct bnx2x_fastpath *fp = &bp->fp[index];
2460 int cos;
2461 struct napi_struct orig_napi = fp->napi;
2462 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2463
2464
2465 if (fp->tpa_info)
2466 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2467 sizeof(struct bnx2x_agg_info));
2468 memset(fp, 0, sizeof(*fp));
2469
2470
2471 fp->napi = orig_napi;
2472 fp->tpa_info = orig_tpa_info;
2473 fp->bp = bp;
2474 fp->index = index;
2475 if (IS_ETH_FP(fp))
2476 fp->max_cos = bp->max_cos;
2477 else
2478
2479 fp->max_cos = 1;
2480
2481
2482 if (IS_FCOE_FP(fp))
2483 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2484 if (IS_ETH_FP(fp))
2485 for_each_cos_in_tx_queue(fp, cos)
2486 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2487 BNX2X_NUM_ETH_QUEUES(bp) + index];
2488
2489
2490
2491
2492 if (bp->dev->features & NETIF_F_LRO)
2493 fp->mode = TPA_MODE_LRO;
2494 else if (bp->dev->features & NETIF_F_GRO_HW)
2495 fp->mode = TPA_MODE_GRO;
2496 else
2497 fp->mode = TPA_MODE_DISABLED;
2498
2499
2500
2501
2502 if (bp->disable_tpa || IS_FCOE_FP(fp))
2503 fp->mode = TPA_MODE_DISABLED;
2504}
2505
2506void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2507{
2508 u32 cur;
2509
2510 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2511 return;
2512
2513 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2514 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2515 cur, state);
2516
2517 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2518}
2519
2520int bnx2x_load_cnic(struct bnx2x *bp)
2521{
2522 int i, rc, port = BP_PORT(bp);
2523
2524 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2525
2526 mutex_init(&bp->cnic_mutex);
2527
2528 if (IS_PF(bp)) {
2529 rc = bnx2x_alloc_mem_cnic(bp);
2530 if (rc) {
2531 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2532 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2533 }
2534 }
2535
2536 rc = bnx2x_alloc_fp_mem_cnic(bp);
2537 if (rc) {
2538 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2539 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2540 }
2541
2542
2543 rc = bnx2x_set_real_num_queues(bp, 1);
2544 if (rc) {
2545 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2546 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2547 }
2548
2549
2550 bnx2x_add_all_napi_cnic(bp);
2551 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2552 bnx2x_napi_enable_cnic(bp);
2553
2554 rc = bnx2x_init_hw_func_cnic(bp);
2555 if (rc)
2556 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2557
2558 bnx2x_nic_init_cnic(bp);
2559
2560 if (IS_PF(bp)) {
2561
2562 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2563
2564
2565 for_each_cnic_queue(bp, i) {
2566 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2567 if (rc) {
2568 BNX2X_ERR("Queue setup failed\n");
2569 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2570 }
2571 }
2572 }
2573
2574
2575 bnx2x_set_rx_mode_inner(bp);
2576
2577
2578 bnx2x_get_iscsi_info(bp);
2579 bnx2x_setup_cnic_irq_info(bp);
2580 bnx2x_setup_cnic_info(bp);
2581 bp->cnic_loaded = true;
2582 if (bp->state == BNX2X_STATE_OPEN)
2583 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2584
2585 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2586
2587 return 0;
2588
2589#ifndef BNX2X_STOP_ON_ERROR
2590load_error_cnic2:
2591
2592 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2593
2594load_error_cnic1:
2595 bnx2x_napi_disable_cnic(bp);
2596
2597 if (bnx2x_set_real_num_queues(bp, 0))
2598 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2599load_error_cnic0:
2600 BNX2X_ERR("CNIC-related load failed\n");
2601 bnx2x_free_fp_mem_cnic(bp);
2602 bnx2x_free_mem_cnic(bp);
2603 return rc;
2604#endif
2605}
2606
2607
2608int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2609{
2610 int port = BP_PORT(bp);
2611 int i, rc = 0, load_code = 0;
2612
2613 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2614 DP(NETIF_MSG_IFUP,
2615 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2616
2617#ifdef BNX2X_STOP_ON_ERROR
2618 if (unlikely(bp->panic)) {
2619 BNX2X_ERR("Can't load NIC when there is panic\n");
2620 return -EPERM;
2621 }
2622#endif
2623
2624 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2625
2626
2627 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2628 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2629 &bp->last_reported_link.link_report_flags);
2630
2631 if (IS_PF(bp))
2632
2633 bnx2x_ilt_set_info(bp);
2634
2635
2636
2637
2638
2639
2640 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2641 for_each_queue(bp, i)
2642 bnx2x_bz_fp(bp, i);
2643 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2644 bp->num_cnic_queues) *
2645 sizeof(struct bnx2x_fp_txdata));
2646
2647 bp->fcoe_init = false;
2648
2649
2650 bnx2x_set_rx_buf_size(bp);
2651
2652 if (IS_PF(bp)) {
2653 rc = bnx2x_alloc_mem(bp);
2654 if (rc) {
2655 BNX2X_ERR("Unable to allocate bp memory\n");
2656 return rc;
2657 }
2658 }
2659
2660
2661
2662
2663 rc = bnx2x_alloc_fp_mem(bp);
2664 if (rc) {
2665 BNX2X_ERR("Unable to allocate memory for fps\n");
2666 LOAD_ERROR_EXIT(bp, load_error0);
2667 }
2668
2669
2670 if (bnx2x_alloc_fw_stats_mem(bp))
2671 LOAD_ERROR_EXIT(bp, load_error0);
2672
2673
2674 if (IS_VF(bp)) {
2675 rc = bnx2x_vfpf_init(bp);
2676 if (rc)
2677 LOAD_ERROR_EXIT(bp, load_error0);
2678 }
2679
2680
2681
2682
2683
2684 rc = bnx2x_set_real_num_queues(bp, 0);
2685 if (rc) {
2686 BNX2X_ERR("Unable to set real_num_queues\n");
2687 LOAD_ERROR_EXIT(bp, load_error0);
2688 }
2689
2690
2691
2692
2693
2694 bnx2x_setup_tc(bp->dev, bp->max_cos);
2695
2696
2697 bnx2x_add_all_napi(bp);
2698 DP(NETIF_MSG_IFUP, "napi added\n");
2699 bnx2x_napi_enable(bp);
2700
2701 if (IS_PF(bp)) {
2702
2703 bnx2x_set_pf_load(bp);
2704
2705
2706 if (!BP_NOMCP(bp)) {
2707
2708 rc = bnx2x_nic_load_request(bp, &load_code);
2709 if (rc)
2710 LOAD_ERROR_EXIT(bp, load_error1);
2711
2712
2713 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2714 if (rc) {
2715 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2716 LOAD_ERROR_EXIT(bp, load_error2);
2717 }
2718 } else {
2719 load_code = bnx2x_nic_load_no_mcp(bp, port);
2720 }
2721
2722
2723 bnx2x_nic_load_pmf(bp, load_code);
2724
2725
2726 bnx2x__init_func_obj(bp);
2727
2728
2729 rc = bnx2x_init_hw(bp, load_code);
2730 if (rc) {
2731 BNX2X_ERR("HW init failed, aborting\n");
2732 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2733 LOAD_ERROR_EXIT(bp, load_error2);
2734 }
2735 }
2736
2737 bnx2x_pre_irq_nic_init(bp);
2738
2739
2740 rc = bnx2x_setup_irqs(bp);
2741 if (rc) {
2742 BNX2X_ERR("setup irqs failed\n");
2743 if (IS_PF(bp))
2744 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2745 LOAD_ERROR_EXIT(bp, load_error2);
2746 }
2747
2748
2749 if (IS_PF(bp)) {
2750
2751 bnx2x_post_irq_nic_init(bp, load_code);
2752
2753 bnx2x_init_bp_objs(bp);
2754 bnx2x_iov_nic_init(bp);
2755
2756
2757 bp->afex_def_vlan_tag = -1;
2758 bnx2x_nic_load_afex_dcc(bp, load_code);
2759 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2760 rc = bnx2x_func_start(bp);
2761 if (rc) {
2762 BNX2X_ERR("Function start failed!\n");
2763 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2764
2765 LOAD_ERROR_EXIT(bp, load_error3);
2766 }
2767
2768
2769 if (!BP_NOMCP(bp)) {
2770 load_code = bnx2x_fw_command(bp,
2771 DRV_MSG_CODE_LOAD_DONE, 0);
2772 if (!load_code) {
2773 BNX2X_ERR("MCP response failure, aborting\n");
2774 rc = -EBUSY;
2775 LOAD_ERROR_EXIT(bp, load_error3);
2776 }
2777 }
2778
2779
2780 bnx2x_update_coalesce(bp);
2781 }
2782
2783
2784 rc = bnx2x_setup_leading(bp);
2785 if (rc) {
2786 BNX2X_ERR("Setup leading failed!\n");
2787 LOAD_ERROR_EXIT(bp, load_error3);
2788 }
2789
2790
2791 for_each_nondefault_eth_queue(bp, i) {
2792 if (IS_PF(bp))
2793 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2794 else
2795 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2796 if (rc) {
2797 BNX2X_ERR("Queue %d setup failed\n", i);
2798 LOAD_ERROR_EXIT(bp, load_error3);
2799 }
2800 }
2801
2802
2803 rc = bnx2x_init_rss(bp);
2804 if (rc) {
2805 BNX2X_ERR("PF RSS init failed\n");
2806 LOAD_ERROR_EXIT(bp, load_error3);
2807 }
2808
2809
2810 bp->state = BNX2X_STATE_OPEN;
2811
2812
2813 if (IS_PF(bp))
2814 rc = bnx2x_set_eth_mac(bp, true);
2815 else
2816 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2817 true);
2818 if (rc) {
2819 BNX2X_ERR("Setting Ethernet MAC failed\n");
2820 LOAD_ERROR_EXIT(bp, load_error3);
2821 }
2822
2823 if (IS_PF(bp) && bp->pending_max) {
2824 bnx2x_update_max_mf_config(bp, bp->pending_max);
2825 bp->pending_max = 0;
2826 }
2827
2828 bp->force_link_down = false;
2829 if (bp->port.pmf) {
2830 rc = bnx2x_initial_phy_init(bp, load_mode);
2831 if (rc)
2832 LOAD_ERROR_EXIT(bp, load_error3);
2833 }
2834 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2835
2836
2837
2838
2839 rc = bnx2x_vlan_reconfigure_vid(bp);
2840 if (rc)
2841 LOAD_ERROR_EXIT(bp, load_error3);
2842
2843
2844 bnx2x_set_rx_mode_inner(bp);
2845
2846 if (bp->flags & PTP_SUPPORTED) {
2847 bnx2x_register_phc(bp);
2848 bnx2x_init_ptp(bp);
2849 bnx2x_configure_ptp_filters(bp);
2850 }
2851
2852 switch (load_mode) {
2853 case LOAD_NORMAL:
2854
2855 netif_tx_wake_all_queues(bp->dev);
2856 break;
2857
2858 case LOAD_OPEN:
2859 netif_tx_start_all_queues(bp->dev);
2860 smp_mb__after_atomic();
2861 break;
2862
2863 case LOAD_DIAG:
2864 case LOAD_LOOPBACK_EXT:
2865 bp->state = BNX2X_STATE_DIAG;
2866 break;
2867
2868 default:
2869 break;
2870 }
2871
2872 if (bp->port.pmf)
2873 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2874 else
2875 bnx2x__link_status_update(bp);
2876
2877
2878 mod_timer(&bp->timer, jiffies + bp->current_interval);
2879
2880 if (CNIC_ENABLED(bp))
2881 bnx2x_load_cnic(bp);
2882
2883 if (IS_PF(bp))
2884 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2885
2886 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2887
2888 u32 val;
2889 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2890 val &= ~DRV_FLAGS_MTU_MASK;
2891 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2892 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2893 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2894 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2895 }
2896
2897
2898 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2899 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2900 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2901 return -EBUSY;
2902 }
2903
2904
2905 if (IS_PF(bp))
2906 bnx2x_update_mfw_dump(bp);
2907
2908
2909 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2910 bnx2x_dcbx_init(bp, false);
2911
2912 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2913 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2914
2915 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2916
2917 return 0;
2918
2919#ifndef BNX2X_STOP_ON_ERROR
2920load_error3:
2921 if (IS_PF(bp)) {
2922 bnx2x_int_disable_sync(bp, 1);
2923
2924
2925 bnx2x_squeeze_objects(bp);
2926 }
2927
2928
2929 bnx2x_free_skbs(bp);
2930 for_each_rx_queue(bp, i)
2931 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2932
2933
2934 bnx2x_free_irq(bp);
2935load_error2:
2936 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2937 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2938 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2939 }
2940
2941 bp->port.pmf = 0;
2942load_error1:
2943 bnx2x_napi_disable(bp);
2944 bnx2x_del_all_napi(bp);
2945
2946
2947 if (IS_PF(bp))
2948 bnx2x_clear_pf_load(bp);
2949load_error0:
2950 bnx2x_free_fw_stats_mem(bp);
2951 bnx2x_free_fp_mem(bp);
2952 bnx2x_free_mem(bp);
2953
2954 return rc;
2955#endif
2956}
2957
2958int bnx2x_drain_tx_queues(struct bnx2x *bp)
2959{
2960 u8 rc = 0, cos, i;
2961
2962
2963 for_each_tx_queue(bp, i) {
2964 struct bnx2x_fastpath *fp = &bp->fp[i];
2965
2966 for_each_cos_in_tx_queue(fp, cos)
2967 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2968 if (rc)
2969 return rc;
2970 }
2971 return 0;
2972}
2973
2974
2975int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2976{
2977 int i;
2978 bool global = false;
2979
2980 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2981
2982 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2983 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2984
2985
2986 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2987 u32 val;
2988 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2989 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2990 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2991 }
2992
2993 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2994 (bp->state == BNX2X_STATE_CLOSED ||
2995 bp->state == BNX2X_STATE_ERROR)) {
2996
2997
2998
2999
3000
3001
3002
3003 bp->recovery_state = BNX2X_RECOVERY_DONE;
3004 bp->is_leader = 0;
3005 bnx2x_release_leader_lock(bp);
3006 smp_mb();
3007
3008 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3009 BNX2X_ERR("Can't unload in closed or error state\n");
3010 return -EINVAL;
3011 }
3012
3013
3014
3015
3016
3017
3018
3019 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3020 return 0;
3021
3022
3023
3024
3025
3026 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3027 smp_mb();
3028
3029
3030 bnx2x_iov_channel_down(bp);
3031
3032 if (CNIC_LOADED(bp))
3033 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3034
3035
3036 bnx2x_tx_disable(bp);
3037 netdev_reset_tc(bp->dev);
3038
3039 bp->rx_mode = BNX2X_RX_MODE_NONE;
3040
3041 del_timer_sync(&bp->timer);
3042
3043 if (IS_PF(bp) && !BP_NOMCP(bp)) {
3044
3045 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3046 bnx2x_drv_pulse(bp);
3047 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3048 bnx2x_save_statistics(bp);
3049 }
3050
3051
3052
3053
3054
3055 if (unload_mode != UNLOAD_RECOVERY)
3056 bnx2x_drain_tx_queues(bp);
3057
3058
3059
3060
3061 if (IS_VF(bp)) {
3062 bnx2x_clear_vlan_info(bp);
3063 bnx2x_vfpf_close_vf(bp);
3064 } else if (unload_mode != UNLOAD_RECOVERY) {
3065
3066 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3067 } else {
3068
3069 bnx2x_send_unload_req(bp, unload_mode);
3070
3071
3072
3073
3074
3075
3076
3077 if (!CHIP_IS_E1x(bp))
3078 bnx2x_pf_disable(bp);
3079
3080
3081 bnx2x_netif_stop(bp, 1);
3082
3083 bnx2x_del_all_napi(bp);
3084 if (CNIC_LOADED(bp))
3085 bnx2x_del_all_napi_cnic(bp);
3086
3087 bnx2x_free_irq(bp);
3088
3089
3090 bnx2x_send_unload_done(bp, false);
3091 }
3092
3093
3094
3095
3096
3097 if (IS_PF(bp))
3098 bnx2x_squeeze_objects(bp);
3099
3100
3101 bp->sp_state = 0;
3102
3103 bp->port.pmf = 0;
3104
3105
3106 bp->sp_rtnl_state = 0;
3107 smp_mb();
3108
3109
3110 bnx2x_free_skbs(bp);
3111 if (CNIC_LOADED(bp))
3112 bnx2x_free_skbs_cnic(bp);
3113 for_each_rx_queue(bp, i)
3114 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3115
3116 bnx2x_free_fp_mem(bp);
3117 if (CNIC_LOADED(bp))
3118 bnx2x_free_fp_mem_cnic(bp);
3119
3120 if (IS_PF(bp)) {
3121 if (CNIC_LOADED(bp))
3122 bnx2x_free_mem_cnic(bp);
3123 }
3124 bnx2x_free_mem(bp);
3125
3126 bp->state = BNX2X_STATE_CLOSED;
3127 bp->cnic_loaded = false;
3128
3129
3130 if (IS_PF(bp) && !BP_NOMCP(bp))
3131 bnx2x_update_mng_version(bp);
3132
3133
3134
3135
3136 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3137 bnx2x_set_reset_in_progress(bp);
3138
3139
3140 if (global)
3141 bnx2x_set_reset_global(bp);
3142 }
3143
3144
3145
3146
3147 if (IS_PF(bp) &&
3148 !bnx2x_clear_pf_load(bp) &&
3149 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3150 bnx2x_disable_close_the_gate(bp);
3151
3152 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3153
3154 return 0;
3155}
3156
3157int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3158{
3159 u16 pmcsr;
3160
3161
3162 if (!bp->pdev->pm_cap) {
3163 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3164 return 0;
3165 }
3166
3167 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3168
3169 switch (state) {
3170 case PCI_D0:
3171 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3172 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3173 PCI_PM_CTRL_PME_STATUS));
3174
3175 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3176
3177 msleep(20);
3178 break;
3179
3180 case PCI_D3hot:
3181
3182
3183 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3184 return 0;
3185
3186 if (CHIP_REV_IS_SLOW(bp))
3187 return 0;
3188
3189 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3190 pmcsr |= 3;
3191
3192 if (bp->wol)
3193 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3194
3195 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3196 pmcsr);
3197
3198
3199
3200
3201 break;
3202
3203 default:
3204 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3205 return -EINVAL;
3206 }
3207 return 0;
3208}
3209
3210
3211
3212
3213static int bnx2x_poll(struct napi_struct *napi, int budget)
3214{
3215 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3216 napi);
3217 struct bnx2x *bp = fp->bp;
3218 int rx_work_done;
3219 u8 cos;
3220
3221#ifdef BNX2X_STOP_ON_ERROR
3222 if (unlikely(bp->panic)) {
3223 napi_complete(napi);
3224 return 0;
3225 }
3226#endif
3227 for_each_cos_in_tx_queue(fp, cos)
3228 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3229 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3230
3231 rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3232
3233 if (rx_work_done < budget) {
3234
3235
3236
3237
3238 if (IS_FCOE_FP(fp)) {
3239 napi_complete_done(napi, rx_work_done);
3240 } else {
3241 bnx2x_update_fpsb_idx(fp);
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255 rmb();
3256
3257 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3258 if (napi_complete_done(napi, rx_work_done)) {
3259
3260 DP(NETIF_MSG_RX_STATUS,
3261 "Update index to %d\n", fp->fp_hc_idx);
3262 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3263 le16_to_cpu(fp->fp_hc_idx),
3264 IGU_INT_ENABLE, 1);
3265 }
3266 } else {
3267 rx_work_done = budget;
3268 }
3269 }
3270 }
3271
3272 return rx_work_done;
3273}
3274
3275
3276
3277
3278
3279static u16 bnx2x_tx_split(struct bnx2x *bp,
3280 struct bnx2x_fp_txdata *txdata,
3281 struct sw_tx_bd *tx_buf,
3282 struct eth_tx_start_bd **tx_bd, u16 hlen,
3283 u16 bd_prod)
3284{
3285 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3286 struct eth_tx_bd *d_tx_bd;
3287 dma_addr_t mapping;
3288 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3289
3290
3291 h_tx_bd->nbytes = cpu_to_le16(hlen);
3292
3293 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3294 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3295
3296
3297
3298 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3299 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3300
3301 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3302 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3303
3304 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3305 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3306 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3307
3308
3309 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3310
3311 DP(NETIF_MSG_TX_QUEUED,
3312 "TSO split data size is %d (%x:%x)\n",
3313 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3314
3315
3316 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3317
3318 return bd_prod;
3319}
3320
3321#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3322#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3323static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3324{
3325 __sum16 tsum = (__force __sum16) csum;
3326
3327 if (fix > 0)
3328 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3329 csum_partial(t_header - fix, fix, 0)));
3330
3331 else if (fix < 0)
3332 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3333 csum_partial(t_header, -fix, 0)));
3334
3335 return bswab16(tsum);
3336}
3337
3338static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3339{
3340 u32 rc;
3341 __u8 prot = 0;
3342 __be16 protocol;
3343
3344 if (skb->ip_summed != CHECKSUM_PARTIAL)
3345 return XMIT_PLAIN;
3346
3347 protocol = vlan_get_protocol(skb);
3348 if (protocol == htons(ETH_P_IPV6)) {
3349 rc = XMIT_CSUM_V6;
3350 prot = ipv6_hdr(skb)->nexthdr;
3351 } else {
3352 rc = XMIT_CSUM_V4;
3353 prot = ip_hdr(skb)->protocol;
3354 }
3355
3356 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3357 if (inner_ip_hdr(skb)->version == 6) {
3358 rc |= XMIT_CSUM_ENC_V6;
3359 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3360 rc |= XMIT_CSUM_TCP;
3361 } else {
3362 rc |= XMIT_CSUM_ENC_V4;
3363 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3364 rc |= XMIT_CSUM_TCP;
3365 }
3366 }
3367 if (prot == IPPROTO_TCP)
3368 rc |= XMIT_CSUM_TCP;
3369
3370 if (skb_is_gso(skb)) {
3371 if (skb_is_gso_v6(skb)) {
3372 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3373 if (rc & XMIT_CSUM_ENC)
3374 rc |= XMIT_GSO_ENC_V6;
3375 } else {
3376 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3377 if (rc & XMIT_CSUM_ENC)
3378 rc |= XMIT_GSO_ENC_V4;
3379 }
3380 }
3381
3382 return rc;
3383}
3384
3385
3386#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3387
3388
3389#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3390
3391#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3392
3393
3394
3395static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3396 u32 xmit_type)
3397{
3398 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3399 int to_copy = 0, hlen = 0;
3400
3401 if (xmit_type & XMIT_GSO_ENC)
3402 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3403
3404 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3405 if (xmit_type & XMIT_GSO) {
3406 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3407 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3408
3409 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3410 int wnd_idx = 0;
3411 int frag_idx = 0;
3412 u32 wnd_sum = 0;
3413
3414
3415 if (xmit_type & XMIT_GSO_ENC)
3416 hlen = (int)(skb_inner_transport_header(skb) -
3417 skb->data) +
3418 inner_tcp_hdrlen(skb);
3419 else
3420 hlen = (int)(skb_transport_header(skb) -
3421 skb->data) + tcp_hdrlen(skb);
3422
3423
3424 first_bd_sz = skb_headlen(skb) - hlen;
3425
3426 wnd_sum = first_bd_sz;
3427
3428
3429 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3430 wnd_sum +=
3431 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3432
3433
3434 if (first_bd_sz > 0) {
3435 if (unlikely(wnd_sum < lso_mss)) {
3436 to_copy = 1;
3437 goto exit_lbl;
3438 }
3439
3440 wnd_sum -= first_bd_sz;
3441 }
3442
3443
3444
3445 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3446 wnd_sum +=
3447 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3448
3449 if (unlikely(wnd_sum < lso_mss)) {
3450 to_copy = 1;
3451 break;
3452 }
3453 wnd_sum -=
3454 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3455 }
3456 } else {
3457
3458
3459 to_copy = 1;
3460 }
3461 }
3462
3463exit_lbl:
3464 if (unlikely(to_copy))
3465 DP(NETIF_MSG_TX_QUEUED,
3466 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3467 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3468 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3469
3470 return to_copy;
3471}
3472#endif
3473
3474
3475
3476
3477
3478
3479
3480
3481static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3482 struct eth_tx_parse_bd_e1x *pbd,
3483 u32 xmit_type)
3484{
3485 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3486 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3487 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3488
3489 if (xmit_type & XMIT_GSO_V4) {
3490 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3491 pbd->tcp_pseudo_csum =
3492 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3493 ip_hdr(skb)->daddr,
3494 0, IPPROTO_TCP, 0));
3495 } else {
3496 pbd->tcp_pseudo_csum =
3497 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3498 &ipv6_hdr(skb)->daddr,
3499 0, IPPROTO_TCP, 0));
3500 }
3501
3502 pbd->global_data |=
3503 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3504}
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3517 u32 *parsing_data, u32 xmit_type)
3518{
3519 *parsing_data |=
3520 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3521 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3522 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3523
3524 if (xmit_type & XMIT_CSUM_TCP) {
3525 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3526 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3527 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3528
3529 return skb_inner_transport_header(skb) +
3530 inner_tcp_hdrlen(skb) - skb->data;
3531 }
3532
3533
3534
3535
3536 return skb_inner_transport_header(skb) +
3537 sizeof(struct udphdr) - skb->data;
3538}
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3551 u32 *parsing_data, u32 xmit_type)
3552{
3553 *parsing_data |=
3554 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3555 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3556 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3557
3558 if (xmit_type & XMIT_CSUM_TCP) {
3559 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3560 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3561 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3562
3563 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3564 }
3565
3566
3567
3568 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3569}
3570
3571
3572static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3573 struct eth_tx_start_bd *tx_start_bd,
3574 u32 xmit_type)
3575{
3576 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3577
3578 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3579 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3580
3581 if (!(xmit_type & XMIT_CSUM_TCP))
3582 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3583}
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3594 struct eth_tx_parse_bd_e1x *pbd,
3595 u32 xmit_type)
3596{
3597 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3598
3599
3600 pbd->global_data =
3601 cpu_to_le16(hlen |
3602 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3603 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3604
3605 pbd->ip_hlen_w = (skb_transport_header(skb) -
3606 skb_network_header(skb)) >> 1;
3607
3608 hlen += pbd->ip_hlen_w;
3609
3610
3611 if (xmit_type & XMIT_CSUM_TCP)
3612 hlen += tcp_hdrlen(skb) / 2;
3613 else
3614 hlen += sizeof(struct udphdr) / 2;
3615
3616 pbd->total_hlen_w = cpu_to_le16(hlen);
3617 hlen = hlen*2;
3618
3619 if (xmit_type & XMIT_CSUM_TCP) {
3620 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3621
3622 } else {
3623 s8 fix = SKB_CS_OFF(skb);
3624
3625 DP(NETIF_MSG_TX_QUEUED,
3626 "hlen %d fix %d csum before fix %x\n",
3627 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3628
3629
3630 pbd->tcp_pseudo_csum =
3631 bnx2x_csum_fix(skb_transport_header(skb),
3632 SKB_CS(skb), fix);
3633
3634 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3635 pbd->tcp_pseudo_csum);
3636 }
3637
3638 return hlen;
3639}
3640
3641static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3642 struct eth_tx_parse_bd_e2 *pbd_e2,
3643 struct eth_tx_parse_2nd_bd *pbd2,
3644 u16 *global_data,
3645 u32 xmit_type)
3646{
3647 u16 hlen_w = 0;
3648 u8 outerip_off, outerip_len = 0;
3649
3650
3651 hlen_w = (skb_inner_transport_header(skb) -
3652 skb_network_header(skb)) >> 1;
3653
3654
3655 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3656
3657 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3658
3659
3660 if (xmit_type & XMIT_CSUM_V4) {
3661 struct iphdr *iph = ip_hdr(skb);
3662 u32 csum = (__force u32)(~iph->check) -
3663 (__force u32)iph->tot_len -
3664 (__force u32)iph->frag_off;
3665
3666 outerip_len = iph->ihl << 1;
3667
3668 pbd2->fw_ip_csum_wo_len_flags_frag =
3669 bswab16(csum_fold((__force __wsum)csum));
3670 } else {
3671 pbd2->fw_ip_hdr_to_payload_w =
3672 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3673 pbd_e2->data.tunnel_data.flags |=
3674 ETH_TUNNEL_DATA_IPV6_OUTER;
3675 }
3676
3677 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3678
3679 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3680
3681
3682 if (xmit_type & XMIT_CSUM_ENC_V4) {
3683 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3684
3685 pbd_e2->data.tunnel_data.pseudo_csum =
3686 bswab16(~csum_tcpudp_magic(
3687 inner_ip_hdr(skb)->saddr,
3688 inner_ip_hdr(skb)->daddr,
3689 0, IPPROTO_TCP, 0));
3690 } else {
3691 pbd_e2->data.tunnel_data.pseudo_csum =
3692 bswab16(~csum_ipv6_magic(
3693 &inner_ipv6_hdr(skb)->saddr,
3694 &inner_ipv6_hdr(skb)->daddr,
3695 0, IPPROTO_TCP, 0));
3696 }
3697
3698 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3699
3700 *global_data |=
3701 outerip_off |
3702 (outerip_len <<
3703 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3704 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3705 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3706
3707 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3708 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3709 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3710 }
3711}
3712
3713static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3714 u32 xmit_type)
3715{
3716 struct ipv6hdr *ipv6;
3717
3718 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3719 return;
3720
3721 if (xmit_type & XMIT_GSO_ENC_V6)
3722 ipv6 = inner_ipv6_hdr(skb);
3723 else
3724 ipv6 = ipv6_hdr(skb);
3725
3726 if (ipv6->nexthdr == NEXTHDR_IPV6)
3727 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3728}
3729
3730
3731
3732
3733
3734netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3735{
3736 struct bnx2x *bp = netdev_priv(dev);
3737
3738 struct netdev_queue *txq;
3739 struct bnx2x_fp_txdata *txdata;
3740 struct sw_tx_bd *tx_buf;
3741 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3742 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3743 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3744 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3745 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3746 u32 pbd_e2_parsing_data = 0;
3747 u16 pkt_prod, bd_prod;
3748 int nbd, txq_index;
3749 dma_addr_t mapping;
3750 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3751 int i;
3752 u8 hlen = 0;
3753 __le16 pkt_size = 0;
3754 struct ethhdr *eth;
3755 u8 mac_type = UNICAST_ADDRESS;
3756
3757#ifdef BNX2X_STOP_ON_ERROR
3758 if (unlikely(bp->panic))
3759 return NETDEV_TX_BUSY;
3760#endif
3761
3762 txq_index = skb_get_queue_mapping(skb);
3763 txq = netdev_get_tx_queue(dev, txq_index);
3764
3765 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3766
3767 txdata = &bp->bnx2x_txq[txq_index];
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3779 skb_shinfo(skb)->nr_frags +
3780 BDS_PER_TX_PKT +
3781 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3782
3783 if (txdata->tx_ring_size == 0) {
3784 struct bnx2x_eth_q_stats *q_stats =
3785 bnx2x_fp_qstats(bp, txdata->parent_fp);
3786 q_stats->driver_filtered_tx_pkt++;
3787 dev_kfree_skb(skb);
3788 return NETDEV_TX_OK;
3789 }
3790 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3791 netif_tx_stop_queue(txq);
3792 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3793
3794 return NETDEV_TX_BUSY;
3795 }
3796
3797 DP(NETIF_MSG_TX_QUEUED,
3798 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3799 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3800 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3801 skb->len);
3802
3803 eth = (struct ethhdr *)skb->data;
3804
3805
3806 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3807 if (is_broadcast_ether_addr(eth->h_dest))
3808 mac_type = BROADCAST_ADDRESS;
3809 else
3810 mac_type = MULTICAST_ADDRESS;
3811 }
3812
3813#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3814
3815
3816
3817 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3818
3819 bp->lin_cnt++;
3820 if (skb_linearize(skb) != 0) {
3821 DP(NETIF_MSG_TX_QUEUED,
3822 "SKB linearization failed - silently dropping this SKB\n");
3823 dev_kfree_skb_any(skb);
3824 return NETDEV_TX_OK;
3825 }
3826 }
3827#endif
3828
3829 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3830 skb_headlen(skb), DMA_TO_DEVICE);
3831 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3832 DP(NETIF_MSG_TX_QUEUED,
3833 "SKB mapping failed - silently dropping this SKB\n");
3834 dev_kfree_skb_any(skb);
3835 return NETDEV_TX_OK;
3836 }
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849 pkt_prod = txdata->tx_pkt_prod;
3850 bd_prod = TX_BD(txdata->tx_bd_prod);
3851
3852
3853
3854
3855
3856 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3857 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3858 first_bd = tx_start_bd;
3859
3860 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3861
3862 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3863 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3864 bp->eth_stats.ptp_skip_tx_ts++;
3865 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3866 } else if (bp->ptp_tx_skb) {
3867 bp->eth_stats.ptp_skip_tx_ts++;
3868 netdev_err_once(bp->dev,
3869 "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3870 } else {
3871 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3872
3873 bp->ptp_tx_skb = skb_get(skb);
3874 bp->ptp_tx_start = jiffies;
3875 schedule_work(&bp->ptp_task);
3876 }
3877 }
3878
3879
3880 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3881
3882
3883 tx_buf->first_bd = txdata->tx_bd_prod;
3884 tx_buf->skb = skb;
3885 tx_buf->flags = 0;
3886
3887 DP(NETIF_MSG_TX_QUEUED,
3888 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3889 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3890
3891 if (skb_vlan_tag_present(skb)) {
3892 tx_start_bd->vlan_or_ethertype =
3893 cpu_to_le16(skb_vlan_tag_get(skb));
3894 tx_start_bd->bd_flags.as_bitfield |=
3895 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3896 } else {
3897
3898
3899
3900 u16 vlan_tci = 0;
3901#ifndef BNX2X_STOP_ON_ERROR
3902 if (IS_VF(bp)) {
3903#endif
3904
3905 if (__vlan_get_tag(skb, &vlan_tci)) {
3906 tx_start_bd->vlan_or_ethertype =
3907 cpu_to_le16(ntohs(eth->h_proto));
3908 } else {
3909 tx_start_bd->bd_flags.as_bitfield |=
3910 (X_ETH_INBAND_VLAN <<
3911 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3912 tx_start_bd->vlan_or_ethertype =
3913 cpu_to_le16(vlan_tci);
3914 }
3915#ifndef BNX2X_STOP_ON_ERROR
3916 } else {
3917
3918 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3919 }
3920#endif
3921 }
3922
3923 nbd = 2;
3924
3925
3926 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3927
3928 if (xmit_type & XMIT_CSUM)
3929 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3930
3931 if (!CHIP_IS_E1x(bp)) {
3932 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3933 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3934
3935 if (xmit_type & XMIT_CSUM_ENC) {
3936 u16 global_data = 0;
3937
3938
3939 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3940 &pbd_e2_parsing_data,
3941 xmit_type);
3942
3943
3944 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3945
3946 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3947
3948 memset(pbd2, 0, sizeof(*pbd2));
3949
3950 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3951 (skb_inner_network_header(skb) -
3952 skb->data) >> 1;
3953
3954 if (xmit_type & XMIT_GSO_ENC)
3955 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3956 &global_data,
3957 xmit_type);
3958
3959 pbd2->global_data = cpu_to_le16(global_data);
3960
3961
3962 SET_FLAG(tx_start_bd->general_data,
3963 ETH_TX_START_BD_PARSE_NBDS, 1);
3964
3965 SET_FLAG(tx_start_bd->general_data,
3966 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3967
3968 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3969
3970 nbd++;
3971 } else if (xmit_type & XMIT_CSUM) {
3972
3973 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3974 &pbd_e2_parsing_data,
3975 xmit_type);
3976 }
3977
3978 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3979
3980
3981
3982 if (IS_VF(bp)) {
3983
3984 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3985 &pbd_e2->data.mac_addr.src_mid,
3986 &pbd_e2->data.mac_addr.src_lo,
3987 eth->h_source);
3988
3989 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3990 &pbd_e2->data.mac_addr.dst_mid,
3991 &pbd_e2->data.mac_addr.dst_lo,
3992 eth->h_dest);
3993 } else {
3994 if (bp->flags & TX_SWITCHING)
3995 bnx2x_set_fw_mac_addr(
3996 &pbd_e2->data.mac_addr.dst_hi,
3997 &pbd_e2->data.mac_addr.dst_mid,
3998 &pbd_e2->data.mac_addr.dst_lo,
3999 eth->h_dest);
4000#ifdef BNX2X_STOP_ON_ERROR
4001
4002
4003
4004 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4005 &pbd_e2->data.mac_addr.src_mid,
4006 &pbd_e2->data.mac_addr.src_lo,
4007 eth->h_source);
4008#endif
4009 }
4010
4011 SET_FLAG(pbd_e2_parsing_data,
4012 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4013 } else {
4014 u16 global_data = 0;
4015 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4016 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4017
4018 if (xmit_type & XMIT_CSUM)
4019 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4020
4021 SET_FLAG(global_data,
4022 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4023 pbd_e1x->global_data |= cpu_to_le16(global_data);
4024 }
4025
4026
4027 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4028 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4029 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4030 pkt_size = tx_start_bd->nbytes;
4031
4032 DP(NETIF_MSG_TX_QUEUED,
4033 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4034 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4035 le16_to_cpu(tx_start_bd->nbytes),
4036 tx_start_bd->bd_flags.as_bitfield,
4037 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4038
4039 if (xmit_type & XMIT_GSO) {
4040
4041 DP(NETIF_MSG_TX_QUEUED,
4042 "TSO packet len %d hlen %d total len %d tso size %d\n",
4043 skb->len, hlen, skb_headlen(skb),
4044 skb_shinfo(skb)->gso_size);
4045
4046 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4047
4048 if (unlikely(skb_headlen(skb) > hlen)) {
4049 nbd++;
4050 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4051 &tx_start_bd, hlen,
4052 bd_prod);
4053 }
4054 if (!CHIP_IS_E1x(bp))
4055 pbd_e2_parsing_data |=
4056 (skb_shinfo(skb)->gso_size <<
4057 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4058 ETH_TX_PARSE_BD_E2_LSO_MSS;
4059 else
4060 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4061 }
4062
4063
4064
4065
4066 if (pbd_e2_parsing_data)
4067 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4068
4069 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4070
4071
4072 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4073 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4074
4075 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4076 skb_frag_size(frag), DMA_TO_DEVICE);
4077 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4078 unsigned int pkts_compl = 0, bytes_compl = 0;
4079
4080 DP(NETIF_MSG_TX_QUEUED,
4081 "Unable to map page - dropping packet...\n");
4082
4083
4084
4085
4086
4087
4088 first_bd->nbd = cpu_to_le16(nbd);
4089 bnx2x_free_tx_pkt(bp, txdata,
4090 TX_BD(txdata->tx_pkt_prod),
4091 &pkts_compl, &bytes_compl);
4092 return NETDEV_TX_OK;
4093 }
4094
4095 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4096 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4097 if (total_pkt_bd == NULL)
4098 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4099
4100 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4101 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4102 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4103 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4104 nbd++;
4105
4106 DP(NETIF_MSG_TX_QUEUED,
4107 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4108 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4109 le16_to_cpu(tx_data_bd->nbytes));
4110 }
4111
4112 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4113
4114
4115 first_bd->nbd = cpu_to_le16(nbd);
4116
4117 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4118
4119
4120
4121
4122 if (TX_BD_POFF(bd_prod) < nbd)
4123 nbd++;
4124
4125
4126
4127
4128
4129
4130
4131
4132 if (total_pkt_bd != NULL)
4133 total_pkt_bd->total_pkt_bytes = pkt_size;
4134
4135 if (pbd_e1x)
4136 DP(NETIF_MSG_TX_QUEUED,
4137 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4138 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4139 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4140 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4141 le16_to_cpu(pbd_e1x->total_hlen_w));
4142 if (pbd_e2)
4143 DP(NETIF_MSG_TX_QUEUED,
4144 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4145 pbd_e2,
4146 pbd_e2->data.mac_addr.dst_hi,
4147 pbd_e2->data.mac_addr.dst_mid,
4148 pbd_e2->data.mac_addr.dst_lo,
4149 pbd_e2->data.mac_addr.src_hi,
4150 pbd_e2->data.mac_addr.src_mid,
4151 pbd_e2->data.mac_addr.src_lo,
4152 pbd_e2->parsing_data);
4153 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4154
4155 netdev_tx_sent_queue(txq, skb->len);
4156
4157 skb_tx_timestamp(skb);
4158
4159 txdata->tx_pkt_prod++;
4160
4161
4162
4163
4164
4165
4166
4167 wmb();
4168
4169 txdata->tx_db.data.prod += nbd;
4170
4171 wmb();
4172
4173 DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4174
4175 txdata->tx_bd_prod += nbd;
4176
4177 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4178 netif_tx_stop_queue(txq);
4179
4180
4181
4182
4183 smp_mb();
4184
4185 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4186 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4187 netif_tx_wake_queue(txq);
4188 }
4189 txdata->tx_pkt++;
4190
4191 return NETDEV_TX_OK;
4192}
4193
4194void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4195{
4196 int mfw_vn = BP_FW_MB_IDX(bp);
4197 u32 tmp;
4198
4199
4200 if (!IS_MF_BD(bp)) {
4201 int i;
4202
4203 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4204 c2s_map[i] = i;
4205 *c2s_default = 0;
4206
4207 return;
4208 }
4209
4210 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4211 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4212 c2s_map[0] = tmp & 0xff;
4213 c2s_map[1] = (tmp >> 8) & 0xff;
4214 c2s_map[2] = (tmp >> 16) & 0xff;
4215 c2s_map[3] = (tmp >> 24) & 0xff;
4216
4217 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4218 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4219 c2s_map[4] = tmp & 0xff;
4220 c2s_map[5] = (tmp >> 8) & 0xff;
4221 c2s_map[6] = (tmp >> 16) & 0xff;
4222 c2s_map[7] = (tmp >> 24) & 0xff;
4223
4224 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4225 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4226 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4227}
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4238{
4239 struct bnx2x *bp = netdev_priv(dev);
4240 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4241 int cos, prio, count, offset;
4242
4243
4244 ASSERT_RTNL();
4245
4246
4247 if (!num_tc) {
4248 netdev_reset_tc(dev);
4249 return 0;
4250 }
4251
4252
4253 if (num_tc > bp->max_cos) {
4254 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4255 num_tc, bp->max_cos);
4256 return -EINVAL;
4257 }
4258
4259
4260 if (netdev_set_num_tc(dev, num_tc)) {
4261 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4262 return -EINVAL;
4263 }
4264
4265 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4266
4267
4268 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4269 int outer_prio = c2s_map[prio];
4270
4271 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4272 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4273 "mapping priority %d to tc %d\n",
4274 outer_prio, bp->prio_to_cos[outer_prio]);
4275 }
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288 for (cos = 0; cos < bp->max_cos; cos++) {
4289 count = BNX2X_NUM_ETH_QUEUES(bp);
4290 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4291 netdev_set_tc_queue(dev, cos, count, offset);
4292 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4293 "mapping tc %d to offset %d count %d\n",
4294 cos, offset, count);
4295 }
4296
4297 return 0;
4298}
4299
4300int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4301 void *type_data)
4302{
4303 struct tc_mqprio_qopt *mqprio = type_data;
4304
4305 if (type != TC_SETUP_QDISC_MQPRIO)
4306 return -EOPNOTSUPP;
4307
4308 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4309
4310 return bnx2x_setup_tc(dev, mqprio->num_tc);
4311}
4312
4313
4314int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4315{
4316 struct sockaddr *addr = p;
4317 struct bnx2x *bp = netdev_priv(dev);
4318 int rc = 0;
4319
4320 if (!is_valid_ether_addr(addr->sa_data)) {
4321 BNX2X_ERR("Requested MAC address is not valid\n");
4322 return -EINVAL;
4323 }
4324
4325 if (IS_MF_STORAGE_ONLY(bp)) {
4326 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4327 return -EINVAL;
4328 }
4329
4330 if (netif_running(dev)) {
4331 rc = bnx2x_set_eth_mac(bp, false);
4332 if (rc)
4333 return rc;
4334 }
4335
4336 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4337
4338 if (netif_running(dev))
4339 rc = bnx2x_set_eth_mac(bp, true);
4340
4341 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4342 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4343
4344 return rc;
4345}
4346
4347static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4348{
4349 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4350 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4351 u8 cos;
4352
4353
4354
4355 if (IS_FCOE_IDX(fp_index)) {
4356 memset(sb, 0, sizeof(union host_hc_status_block));
4357 fp->status_blk_mapping = 0;
4358 } else {
4359
4360 if (!CHIP_IS_E1x(bp))
4361 BNX2X_PCI_FREE(sb->e2_sb,
4362 bnx2x_fp(bp, fp_index,
4363 status_blk_mapping),
4364 sizeof(struct host_hc_status_block_e2));
4365 else
4366 BNX2X_PCI_FREE(sb->e1x_sb,
4367 bnx2x_fp(bp, fp_index,
4368 status_blk_mapping),
4369 sizeof(struct host_hc_status_block_e1x));
4370 }
4371
4372
4373 if (!skip_rx_queue(bp, fp_index)) {
4374 bnx2x_free_rx_bds(fp);
4375
4376
4377 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4378 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4379 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4380 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4381
4382 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4383 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4384 sizeof(struct eth_fast_path_rx_cqe) *
4385 NUM_RCQ_BD);
4386
4387
4388 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4389 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4390 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4391 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4392 }
4393
4394
4395 if (!skip_tx_queue(bp, fp_index)) {
4396
4397 for_each_cos_in_tx_queue(fp, cos) {
4398 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4399
4400 DP(NETIF_MSG_IFDOWN,
4401 "freeing tx memory of fp %d cos %d cid %d\n",
4402 fp_index, cos, txdata->cid);
4403
4404 BNX2X_FREE(txdata->tx_buf_ring);
4405 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4406 txdata->tx_desc_mapping,
4407 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4408 }
4409 }
4410
4411}
4412
4413static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4414{
4415 int i;
4416 for_each_cnic_queue(bp, i)
4417 bnx2x_free_fp_mem_at(bp, i);
4418}
4419
4420void bnx2x_free_fp_mem(struct bnx2x *bp)
4421{
4422 int i;
4423 for_each_eth_queue(bp, i)
4424 bnx2x_free_fp_mem_at(bp, i);
4425}
4426
4427static void set_sb_shortcuts(struct bnx2x *bp, int index)
4428{
4429 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4430 if (!CHIP_IS_E1x(bp)) {
4431 bnx2x_fp(bp, index, sb_index_values) =
4432 (__le16 *)status_blk.e2_sb->sb.index_values;
4433 bnx2x_fp(bp, index, sb_running_index) =
4434 (__le16 *)status_blk.e2_sb->sb.running_index;
4435 } else {
4436 bnx2x_fp(bp, index, sb_index_values) =
4437 (__le16 *)status_blk.e1x_sb->sb.index_values;
4438 bnx2x_fp(bp, index, sb_running_index) =
4439 (__le16 *)status_blk.e1x_sb->sb.running_index;
4440 }
4441}
4442
4443
4444static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4445 int rx_ring_size)
4446{
4447 struct bnx2x *bp = fp->bp;
4448 u16 ring_prod, cqe_ring_prod;
4449 int i, failure_cnt = 0;
4450
4451 fp->rx_comp_cons = 0;
4452 cqe_ring_prod = ring_prod = 0;
4453
4454
4455
4456
4457 for (i = 0; i < rx_ring_size; i++) {
4458 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4459 failure_cnt++;
4460 continue;
4461 }
4462 ring_prod = NEXT_RX_IDX(ring_prod);
4463 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4464 WARN_ON(ring_prod <= (i - failure_cnt));
4465 }
4466
4467 if (failure_cnt)
4468 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4469 i - failure_cnt, fp->index);
4470
4471 fp->rx_bd_prod = ring_prod;
4472
4473 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4474 cqe_ring_prod);
4475
4476 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4477
4478 return i - failure_cnt;
4479}
4480
4481static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4482{
4483 int i;
4484
4485 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4486 struct eth_rx_cqe_next_page *nextpg;
4487
4488 nextpg = (struct eth_rx_cqe_next_page *)
4489 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4490 nextpg->addr_hi =
4491 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4492 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4493 nextpg->addr_lo =
4494 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4495 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4496 }
4497}
4498
4499static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4500{
4501 union host_hc_status_block *sb;
4502 struct bnx2x_fastpath *fp = &bp->fp[index];
4503 int ring_size = 0;
4504 u8 cos;
4505 int rx_ring_size = 0;
4506
4507 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4508 rx_ring_size = MIN_RX_SIZE_NONTPA;
4509 bp->rx_ring_size = rx_ring_size;
4510 } else if (!bp->rx_ring_size) {
4511 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4512
4513 if (CHIP_IS_E3(bp)) {
4514 u32 cfg = SHMEM_RD(bp,
4515 dev_info.port_hw_config[BP_PORT(bp)].
4516 default_cfg);
4517
4518
4519 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4520 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4521 rx_ring_size /= 10;
4522 }
4523
4524
4525 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4526 MIN_RX_SIZE_TPA, rx_ring_size);
4527
4528 bp->rx_ring_size = rx_ring_size;
4529 } else
4530 rx_ring_size = bp->rx_ring_size;
4531
4532 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4533
4534
4535 sb = &bnx2x_fp(bp, index, status_blk);
4536
4537 if (!IS_FCOE_IDX(index)) {
4538
4539 if (!CHIP_IS_E1x(bp)) {
4540 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4541 sizeof(struct host_hc_status_block_e2));
4542 if (!sb->e2_sb)
4543 goto alloc_mem_err;
4544 } else {
4545 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4546 sizeof(struct host_hc_status_block_e1x));
4547 if (!sb->e1x_sb)
4548 goto alloc_mem_err;
4549 }
4550 }
4551
4552
4553
4554
4555 if (!IS_FCOE_IDX(index))
4556 set_sb_shortcuts(bp, index);
4557
4558
4559 if (!skip_tx_queue(bp, index)) {
4560
4561 for_each_cos_in_tx_queue(fp, cos) {
4562 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4563
4564 DP(NETIF_MSG_IFUP,
4565 "allocating tx memory of fp %d cos %d\n",
4566 index, cos);
4567
4568 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4569 sizeof(struct sw_tx_bd),
4570 GFP_KERNEL);
4571 if (!txdata->tx_buf_ring)
4572 goto alloc_mem_err;
4573 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4574 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4575 if (!txdata->tx_desc_ring)
4576 goto alloc_mem_err;
4577 }
4578 }
4579
4580
4581 if (!skip_rx_queue(bp, index)) {
4582
4583 bnx2x_fp(bp, index, rx_buf_ring) =
4584 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4585 if (!bnx2x_fp(bp, index, rx_buf_ring))
4586 goto alloc_mem_err;
4587 bnx2x_fp(bp, index, rx_desc_ring) =
4588 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4589 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4590 if (!bnx2x_fp(bp, index, rx_desc_ring))
4591 goto alloc_mem_err;
4592
4593
4594 bnx2x_fp(bp, index, rx_comp_ring) =
4595 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4596 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4597 if (!bnx2x_fp(bp, index, rx_comp_ring))
4598 goto alloc_mem_err;
4599
4600
4601 bnx2x_fp(bp, index, rx_page_ring) =
4602 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4603 GFP_KERNEL);
4604 if (!bnx2x_fp(bp, index, rx_page_ring))
4605 goto alloc_mem_err;
4606 bnx2x_fp(bp, index, rx_sge_ring) =
4607 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4608 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4609 if (!bnx2x_fp(bp, index, rx_sge_ring))
4610 goto alloc_mem_err;
4611
4612 bnx2x_set_next_page_rx_bd(fp);
4613
4614
4615 bnx2x_set_next_page_rx_cq(fp);
4616
4617
4618 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4619 if (ring_size < rx_ring_size)
4620 goto alloc_mem_err;
4621 }
4622
4623 return 0;
4624
4625
4626alloc_mem_err:
4627 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4628 index, ring_size);
4629
4630
4631
4632
4633 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4634 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4635
4636 bnx2x_free_fp_mem_at(bp, index);
4637 return -ENOMEM;
4638 }
4639 return 0;
4640}
4641
4642static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4643{
4644 if (!NO_FCOE(bp))
4645
4646 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4647
4648
4649
4650 return -ENOMEM;
4651
4652 return 0;
4653}
4654
4655static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4656{
4657 int i;
4658
4659
4660
4661
4662
4663
4664 if (bnx2x_alloc_fp_mem_at(bp, 0))
4665 return -ENOMEM;
4666
4667
4668 for_each_nondefault_eth_queue(bp, i)
4669 if (bnx2x_alloc_fp_mem_at(bp, i))
4670 break;
4671
4672
4673 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4674 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4675
4676 WARN_ON(delta < 0);
4677 bnx2x_shrink_eth_fp(bp, delta);
4678 if (CNIC_SUPPORT(bp))
4679
4680
4681
4682
4683
4684
4685 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4686 bp->num_ethernet_queues -= delta;
4687 bp->num_queues = bp->num_ethernet_queues +
4688 bp->num_cnic_queues;
4689 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4690 bp->num_queues + delta, bp->num_queues);
4691 }
4692
4693 return 0;
4694}
4695
4696void bnx2x_free_mem_bp(struct bnx2x *bp)
4697{
4698 int i;
4699
4700 for (i = 0; i < bp->fp_array_size; i++)
4701 kfree(bp->fp[i].tpa_info);
4702 kfree(bp->fp);
4703 kfree(bp->sp_objs);
4704 kfree(bp->fp_stats);
4705 kfree(bp->bnx2x_txq);
4706 kfree(bp->msix_table);
4707 kfree(bp->ilt);
4708}
4709
4710int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4711{
4712 struct bnx2x_fastpath *fp;
4713 struct msix_entry *tbl;
4714 struct bnx2x_ilt *ilt;
4715 int msix_table_size = 0;
4716 int fp_array_size, txq_array_size;
4717 int i;
4718
4719
4720
4721
4722
4723 msix_table_size = bp->igu_sb_cnt;
4724 if (IS_PF(bp))
4725 msix_table_size++;
4726 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4727
4728
4729 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4730 bp->fp_array_size = fp_array_size;
4731 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4732
4733 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4734 if (!fp)
4735 goto alloc_err;
4736 for (i = 0; i < bp->fp_array_size; i++) {
4737 fp[i].tpa_info =
4738 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4739 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4740 if (!(fp[i].tpa_info))
4741 goto alloc_err;
4742 }
4743
4744 bp->fp = fp;
4745
4746
4747 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4748 GFP_KERNEL);
4749 if (!bp->sp_objs)
4750 goto alloc_err;
4751
4752
4753 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4754 GFP_KERNEL);
4755 if (!bp->fp_stats)
4756 goto alloc_err;
4757
4758
4759 txq_array_size =
4760 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4761 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4762
4763 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4764 GFP_KERNEL);
4765 if (!bp->bnx2x_txq)
4766 goto alloc_err;
4767
4768
4769 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4770 if (!tbl)
4771 goto alloc_err;
4772 bp->msix_table = tbl;
4773
4774
4775 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4776 if (!ilt)
4777 goto alloc_err;
4778 bp->ilt = ilt;
4779
4780 return 0;
4781alloc_err:
4782 bnx2x_free_mem_bp(bp);
4783 return -ENOMEM;
4784}
4785
4786int bnx2x_reload_if_running(struct net_device *dev)
4787{
4788 struct bnx2x *bp = netdev_priv(dev);
4789
4790 if (unlikely(!netif_running(dev)))
4791 return 0;
4792
4793 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4794 return bnx2x_nic_load(bp, LOAD_NORMAL);
4795}
4796
4797int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4798{
4799 u32 sel_phy_idx = 0;
4800 if (bp->link_params.num_phys <= 1)
4801 return INT_PHY;
4802
4803 if (bp->link_vars.link_up) {
4804 sel_phy_idx = EXT_PHY1;
4805
4806 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4807 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4808 sel_phy_idx = EXT_PHY2;
4809 } else {
4810
4811 switch (bnx2x_phy_selection(&bp->link_params)) {
4812 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4813 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4814 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4815 sel_phy_idx = EXT_PHY1;
4816 break;
4817 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4818 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4819 sel_phy_idx = EXT_PHY2;
4820 break;
4821 }
4822 }
4823
4824 return sel_phy_idx;
4825}
4826int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4827{
4828 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4829
4830
4831
4832
4833
4834
4835 if (bp->link_params.multi_phy_config &
4836 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4837 if (sel_phy_idx == EXT_PHY1)
4838 sel_phy_idx = EXT_PHY2;
4839 else if (sel_phy_idx == EXT_PHY2)
4840 sel_phy_idx = EXT_PHY1;
4841 }
4842 return LINK_CONFIG_IDX(sel_phy_idx);
4843}
4844
4845#ifdef NETDEV_FCOE_WWNN
4846int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4847{
4848 struct bnx2x *bp = netdev_priv(dev);
4849 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4850
4851 switch (type) {
4852 case NETDEV_FCOE_WWNN:
4853 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4854 cp->fcoe_wwn_node_name_lo);
4855 break;
4856 case NETDEV_FCOE_WWPN:
4857 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4858 cp->fcoe_wwn_port_name_lo);
4859 break;
4860 default:
4861 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4862 return -EINVAL;
4863 }
4864
4865 return 0;
4866}
4867#endif
4868
4869
4870int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4871{
4872 struct bnx2x *bp = netdev_priv(dev);
4873
4874 if (pci_num_vf(bp->pdev)) {
4875 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4876 return -EPERM;
4877 }
4878
4879 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4880 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4881 return -EAGAIN;
4882 }
4883
4884
4885
4886
4887
4888 dev->mtu = new_mtu;
4889
4890 if (!bnx2x_mtu_allows_gro(new_mtu))
4891 dev->features &= ~NETIF_F_GRO_HW;
4892
4893 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4894 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4895
4896 return bnx2x_reload_if_running(dev);
4897}
4898
4899netdev_features_t bnx2x_fix_features(struct net_device *dev,
4900 netdev_features_t features)
4901{
4902 struct bnx2x *bp = netdev_priv(dev);
4903
4904 if (pci_num_vf(bp->pdev)) {
4905 netdev_features_t changed = dev->features ^ features;
4906
4907
4908
4909
4910 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4911 features &= ~NETIF_F_RXCSUM;
4912 features |= dev->features & NETIF_F_RXCSUM;
4913 }
4914
4915 if (changed & NETIF_F_LOOPBACK) {
4916 features &= ~NETIF_F_LOOPBACK;
4917 features |= dev->features & NETIF_F_LOOPBACK;
4918 }
4919 }
4920
4921
4922 if (!(features & NETIF_F_RXCSUM))
4923 features &= ~NETIF_F_LRO;
4924
4925 if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4926 features &= ~NETIF_F_GRO_HW;
4927 if (features & NETIF_F_GRO_HW)
4928 features &= ~NETIF_F_LRO;
4929
4930 return features;
4931}
4932
4933int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4934{
4935 struct bnx2x *bp = netdev_priv(dev);
4936 netdev_features_t changes = features ^ dev->features;
4937 bool bnx2x_reload = false;
4938 int rc;
4939
4940
4941 if (!pci_num_vf(bp->pdev)) {
4942 if (features & NETIF_F_LOOPBACK) {
4943 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4944 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4945 bnx2x_reload = true;
4946 }
4947 } else {
4948 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4949 bp->link_params.loopback_mode = LOOPBACK_NONE;
4950 bnx2x_reload = true;
4951 }
4952 }
4953 }
4954
4955
4956 changes &= ~NETIF_F_GRO;
4957
4958 if (changes)
4959 bnx2x_reload = true;
4960
4961 if (bnx2x_reload) {
4962 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4963 dev->features = features;
4964 rc = bnx2x_reload_if_running(dev);
4965 return rc ? rc : 1;
4966 }
4967
4968 }
4969
4970 return 0;
4971}
4972
4973void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
4974{
4975 struct bnx2x *bp = netdev_priv(dev);
4976
4977
4978
4979
4980 if (!bp->panic)
4981#ifndef BNX2X_STOP_ON_ERROR
4982 bnx2x_panic_dump(bp, false);
4983#else
4984 bnx2x_panic();
4985#endif
4986
4987
4988 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4989}
4990
4991int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4992{
4993 struct net_device *dev = pci_get_drvdata(pdev);
4994 struct bnx2x *bp;
4995
4996 if (!dev) {
4997 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4998 return -ENODEV;
4999 }
5000 bp = netdev_priv(dev);
5001
5002 rtnl_lock();
5003
5004 pci_save_state(pdev);
5005
5006 if (!netif_running(dev)) {
5007 rtnl_unlock();
5008 return 0;
5009 }
5010
5011 netif_device_detach(dev);
5012
5013 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5014
5015 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5016
5017 rtnl_unlock();
5018
5019 return 0;
5020}
5021
5022int bnx2x_resume(struct pci_dev *pdev)
5023{
5024 struct net_device *dev = pci_get_drvdata(pdev);
5025 struct bnx2x *bp;
5026 int rc;
5027
5028 if (!dev) {
5029 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5030 return -ENODEV;
5031 }
5032 bp = netdev_priv(dev);
5033
5034 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5035 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5036 return -EAGAIN;
5037 }
5038
5039 rtnl_lock();
5040
5041 pci_restore_state(pdev);
5042
5043 if (!netif_running(dev)) {
5044 rtnl_unlock();
5045 return 0;
5046 }
5047
5048 bnx2x_set_power_state(bp, PCI_D0);
5049 netif_device_attach(dev);
5050
5051 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5052
5053 rtnl_unlock();
5054
5055 return rc;
5056}
5057
5058void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5059 u32 cid)
5060{
5061 if (!cxt) {
5062 BNX2X_ERR("bad context pointer %p\n", cxt);
5063 return;
5064 }
5065
5066
5067 cxt->ustorm_ag_context.cdu_usage =
5068 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5069 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5070
5071 cxt->xstorm_ag_context.cdu_reserved =
5072 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5073 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5074}
5075
5076static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5077 u8 fw_sb_id, u8 sb_index,
5078 u8 ticks)
5079{
5080 u32 addr = BAR_CSTRORM_INTMEM +
5081 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5082 REG_WR8(bp, addr, ticks);
5083 DP(NETIF_MSG_IFUP,
5084 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5085 port, fw_sb_id, sb_index, ticks);
5086}
5087
5088static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5089 u16 fw_sb_id, u8 sb_index,
5090 u8 disable)
5091{
5092 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5093 u32 addr = BAR_CSTRORM_INTMEM +
5094 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5095 u8 flags = REG_RD8(bp, addr);
5096
5097 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5098 flags |= enable_flag;
5099 REG_WR8(bp, addr, flags);
5100 DP(NETIF_MSG_IFUP,
5101 "port %x fw_sb_id %d sb_index %d disable %d\n",
5102 port, fw_sb_id, sb_index, disable);
5103}
5104
5105void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5106 u8 sb_index, u8 disable, u16 usec)
5107{
5108 int port = BP_PORT(bp);
5109 u8 ticks = usec / BNX2X_BTR;
5110
5111 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5112
5113 disable = disable ? 1 : (usec ? 0 : 1);
5114 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5115}
5116
5117void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5118 u32 verbose)
5119{
5120 smp_mb__before_atomic();
5121 set_bit(flag, &bp->sp_rtnl_state);
5122 smp_mb__after_atomic();
5123 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5124 flag);
5125 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5126}
5127