1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/etherdevice.h>
23#include <linux/if_vlan.h>
24#include <linux/interrupt.h>
25#include <linux/ip.h>
26#include <linux/crash_dump.h>
27#include <net/tcp.h>
28#include <net/ipv6.h>
29#include <net/ip6_checksum.h>
30#include <net/busy_poll.h>
31#include <linux/prefetch.h>
32#include "bnx2x_cmn.h"
33#include "bnx2x_init.h"
34#include "bnx2x_sp.h"
35
36static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42{
43 int i;
44
45
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
49 napi_hash_add(&bnx2x_fp(bp, i, napi));
50 }
51}
52
53static void bnx2x_add_all_napi(struct bnx2x *bp)
54{
55 int i;
56
57
58 for_each_eth_queue(bp, i) {
59 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
60 bnx2x_poll, NAPI_POLL_WEIGHT);
61 napi_hash_add(&bnx2x_fp(bp, i, napi));
62 }
63}
64
65static int bnx2x_calc_num_queues(struct bnx2x *bp)
66{
67 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
68
69
70 if (is_kdump_kernel())
71 nq = 1;
72
73 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
74 return nq;
75}
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
91{
92 struct bnx2x_fastpath *from_fp = &bp->fp[from];
93 struct bnx2x_fastpath *to_fp = &bp->fp[to];
94 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
95 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
96 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
97 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
98 int old_max_eth_txqs, new_max_eth_txqs;
99 int old_txdata_index = 0, new_txdata_index = 0;
100 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
101
102
103 from_fp->napi = to_fp->napi;
104
105
106 memcpy(to_fp, from_fp, sizeof(*to_fp));
107 to_fp->index = to;
108
109
110
111
112 to_fp->tpa_info = old_tpa_info;
113
114
115 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
116
117
118 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
119
120
121
122
123
124
125 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
126 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
127 (bp)->max_cos;
128 if (from == FCOE_IDX(bp)) {
129 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
131 }
132
133 memcpy(&bp->bnx2x_txq[new_txdata_index],
134 &bp->bnx2x_txq[old_txdata_index],
135 sizeof(struct bnx2x_fp_txdata));
136 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
137}
138
139
140
141
142
143
144
145
146
147void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
148{
149 if (IS_PF(bp)) {
150 u8 phy_fw_ver[PHY_FW_VER_LEN];
151
152 phy_fw_ver[0] = '\0';
153 bnx2x_get_ext_phy_fw_version(&bp->link_params,
154 phy_fw_ver, PHY_FW_VER_LEN);
155 strlcpy(buf, bp->fw_ver, buf_len);
156 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
157 "bc %d.%d.%d%s%s",
158 (bp->common.bc_ver & 0xff0000) >> 16,
159 (bp->common.bc_ver & 0xff00) >> 8,
160 (bp->common.bc_ver & 0xff),
161 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
162 } else {
163 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
164 }
165}
166
167
168
169
170
171
172
173static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
174{
175 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
176
177
178
179
180 for (cos = 1; cos < bp->max_cos; cos++) {
181 for (i = 0; i < old_eth_num - delta; i++) {
182 struct bnx2x_fastpath *fp = &bp->fp[i];
183 int new_idx = cos * (old_eth_num - delta) + i;
184
185 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
186 sizeof(struct bnx2x_fp_txdata));
187 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
188 }
189 }
190}
191
192int bnx2x_load_count[2][3] = { {0} };
193
194
195
196
197static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
198 u16 idx, unsigned int *pkts_compl,
199 unsigned int *bytes_compl)
200{
201 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
202 struct eth_tx_start_bd *tx_start_bd;
203 struct eth_tx_bd *tx_data_bd;
204 struct sk_buff *skb = tx_buf->skb;
205 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
206 int nbd;
207 u16 split_bd_len = 0;
208
209
210 prefetch(&skb->end);
211
212 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
213 txdata->txq_index, idx, tx_buf, skb);
214
215 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
216
217 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
218#ifdef BNX2X_STOP_ON_ERROR
219 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
220 BNX2X_ERR("BAD nbd!\n");
221 bnx2x_panic();
222 }
223#endif
224 new_cons = nbd + tx_buf->first_bd;
225
226
227 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
228
229
230 --nbd;
231 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
232
233 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
234
235 --nbd;
236 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
237 }
238
239
240 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
241 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
242 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
243 --nbd;
244 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
245 }
246
247
248 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
249 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
250 DMA_TO_DEVICE);
251
252
253 while (nbd > 0) {
254
255 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
256 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
257 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
258 if (--nbd)
259 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
260 }
261
262
263 WARN_ON(!skb);
264 if (likely(skb)) {
265 (*pkts_compl)++;
266 (*bytes_compl) += skb->len;
267 dev_kfree_skb_any(skb);
268 }
269
270 tx_buf->first_bd = 0;
271 tx_buf->skb = NULL;
272
273 return new_cons;
274}
275
276int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
277{
278 struct netdev_queue *txq;
279 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
280 unsigned int pkts_compl = 0, bytes_compl = 0;
281
282#ifdef BNX2X_STOP_ON_ERROR
283 if (unlikely(bp->panic))
284 return -1;
285#endif
286
287 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
288 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
289 sw_cons = txdata->tx_pkt_cons;
290
291 while (sw_cons != hw_cons) {
292 u16 pkt_cons;
293
294 pkt_cons = TX_BD(sw_cons);
295
296 DP(NETIF_MSG_TX_DONE,
297 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
298 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
299
300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
301 &pkts_compl, &bytes_compl);
302
303 sw_cons++;
304 }
305
306 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
307
308 txdata->tx_pkt_cons = sw_cons;
309 txdata->tx_bd_cons = bd_cons;
310
311
312
313
314
315
316
317
318
319
320 smp_mb();
321
322 if (unlikely(netif_tx_queue_stopped(txq))) {
323
324
325
326
327
328
329
330
331
332
333 __netif_tx_lock(txq, smp_processor_id());
334
335 if ((netif_tx_queue_stopped(txq)) &&
336 (bp->state == BNX2X_STATE_OPEN) &&
337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
338 netif_tx_wake_queue(txq);
339
340 __netif_tx_unlock(txq);
341 }
342 return 0;
343}
344
345static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
346 u16 idx)
347{
348 u16 last_max = fp->last_max_sge;
349
350 if (SUB_S16(idx, last_max) > 0)
351 fp->last_max_sge = idx;
352}
353
354static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
355 u16 sge_len,
356 struct eth_end_agg_rx_cqe *cqe)
357{
358 struct bnx2x *bp = fp->bp;
359 u16 last_max, last_elem, first_elem;
360 u16 delta = 0;
361 u16 i;
362
363 if (!sge_len)
364 return;
365
366
367 for (i = 0; i < sge_len; i++)
368 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
369 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
370
371 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
372 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
373
374
375 prefetch((void *)(fp->sge_mask));
376 bnx2x_update_last_max_sge(fp,
377 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
378
379 last_max = RX_SGE(fp->last_max_sge);
380 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
382
383
384 if (last_elem + 1 != first_elem)
385 last_elem++;
386
387
388 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
389 if (likely(fp->sge_mask[i]))
390 break;
391
392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
393 delta += BIT_VEC64_ELEM_SZ;
394 }
395
396 if (delta > 0) {
397 fp->rx_sge_prod += delta;
398
399 bnx2x_clear_sge_mask_next_elems(fp);
400 }
401
402 DP(NETIF_MSG_RX_STATUS,
403 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
404 fp->last_max_sge, fp->rx_sge_prod);
405}
406
407
408
409
410static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
411 const struct eth_fast_path_rx_cqe *cqe,
412 enum pkt_hash_types *rxhash_type)
413{
414
415 if ((bp->dev->features & NETIF_F_RXHASH) &&
416 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417 enum eth_rss_hash_type htype;
418
419 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
420 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
421 (htype == TCP_IPV6_HASH_TYPE)) ?
422 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
423
424 return le32_to_cpu(cqe->rss_hash_result);
425 }
426 *rxhash_type = PKT_HASH_TYPE_NONE;
427 return 0;
428}
429
430static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
431 u16 cons, u16 prod,
432 struct eth_fast_path_rx_cqe *cqe)
433{
434 struct bnx2x *bp = fp->bp;
435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
438 dma_addr_t mapping;
439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
440 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
441
442
443 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
444 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
445
446
447 mapping = dma_map_single(&bp->pdev->dev,
448 first_buf->data + NET_SKB_PAD,
449 fp->rx_buf_size, DMA_FROM_DEVICE);
450
451
452
453
454
455
456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
457
458 bnx2x_reuse_rx_data(fp, cons, prod);
459 tpa_info->tpa_state = BNX2X_TPA_ERROR;
460 return;
461 }
462
463
464 prod_rx_buf->data = first_buf->data;
465 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
466
467 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
468 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
469
470
471 *first_buf = *cons_rx_buf;
472
473
474 tpa_info->parsing_flags =
475 le16_to_cpu(cqe->pars_flags.flags);
476 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
477 tpa_info->tpa_state = BNX2X_TPA_START;
478 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
479 tpa_info->placement_offset = cqe->placement_offset;
480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
481 if (fp->mode == TPA_MODE_GRO) {
482 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
483 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
484 tpa_info->gro_size = gro_size;
485 }
486
487#ifdef BNX2X_STOP_ON_ERROR
488 fp->tpa_queue_used |= (1 << queue);
489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
490 fp->tpa_queue_used);
491#endif
492}
493
494
495
496
497
498#define TPA_TSTAMP_OPT_LEN 12
499
500
501
502
503
504
505
506
507
508
509
510
511
512static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
513 u16 len_on_bd, unsigned int pkt_len,
514 u16 num_of_coalesced_segs)
515{
516
517
518
519 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
520
521 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
522 PRS_FLAG_OVERETH_IPV6) {
523 hdrs_len += sizeof(struct ipv6hdr);
524 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
525 } else {
526 hdrs_len += sizeof(struct iphdr);
527 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
528 }
529
530
531
532
533
534
535 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
536 hdrs_len += TPA_TSTAMP_OPT_LEN;
537
538 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
539
540
541
542
543 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
544}
545
546static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
547 u16 index, gfp_t gfp_mask)
548{
549 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
550 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
551 struct bnx2x_alloc_pool *pool = &fp->page_pool;
552 dma_addr_t mapping;
553
554 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
555
556
557
558
559 if (pool->page)
560 put_page(pool->page);
561
562 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
563 if (unlikely(!pool->page)) {
564 BNX2X_ERR("Can't alloc sge\n");
565 return -ENOMEM;
566 }
567
568 pool->offset = 0;
569 }
570
571 mapping = dma_map_page(&bp->pdev->dev, pool->page,
572 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
573 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
574 BNX2X_ERR("Can't map sge\n");
575 return -ENOMEM;
576 }
577
578 get_page(pool->page);
579 sw_buf->page = pool->page;
580 sw_buf->offset = pool->offset;
581
582 dma_unmap_addr_set(sw_buf, mapping, mapping);
583
584 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
585 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
586
587 pool->offset += SGE_PAGE_SIZE;
588
589 return 0;
590}
591
592static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
593 struct bnx2x_agg_info *tpa_info,
594 u16 pages,
595 struct sk_buff *skb,
596 struct eth_end_agg_rx_cqe *cqe,
597 u16 cqe_idx)
598{
599 struct sw_rx_page *rx_pg, old_rx_pg;
600 u32 i, frag_len, frag_size;
601 int err, j, frag_id = 0;
602 u16 len_on_bd = tpa_info->len_on_bd;
603 u16 full_page = 0, gro_size = 0;
604
605 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
606
607 if (fp->mode == TPA_MODE_GRO) {
608 gro_size = tpa_info->gro_size;
609 full_page = tpa_info->full_page;
610 }
611
612
613 if (frag_size)
614 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
615 le16_to_cpu(cqe->pkt_len),
616 le16_to_cpu(cqe->num_of_coalesced_segs));
617
618#ifdef BNX2X_STOP_ON_ERROR
619 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
620 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
621 pages, cqe_idx);
622 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
623 bnx2x_panic();
624 return -EINVAL;
625 }
626#endif
627
628
629 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
630 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
631
632
633
634 if (fp->mode == TPA_MODE_GRO)
635 frag_len = min_t(u32, frag_size, (u32)full_page);
636 else
637 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
638
639 rx_pg = &fp->rx_page_ring[sge_idx];
640 old_rx_pg = *rx_pg;
641
642
643
644 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
645 if (unlikely(err)) {
646 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
647 return err;
648 }
649
650 dma_unmap_page(&bp->pdev->dev,
651 dma_unmap_addr(&old_rx_pg, mapping),
652 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
653
654 if (fp->mode == TPA_MODE_LRO)
655 skb_fill_page_desc(skb, j, old_rx_pg.page,
656 old_rx_pg.offset, frag_len);
657 else {
658 int rem;
659 int offset = 0;
660 for (rem = frag_len; rem > 0; rem -= gro_size) {
661 int len = rem > gro_size ? gro_size : rem;
662 skb_fill_page_desc(skb, frag_id++,
663 old_rx_pg.page,
664 old_rx_pg.offset + offset,
665 len);
666 if (offset)
667 get_page(old_rx_pg.page);
668 offset += len;
669 }
670 }
671
672 skb->data_len += frag_len;
673 skb->truesize += SGE_PAGES;
674 skb->len += frag_len;
675
676 frag_size -= frag_len;
677 }
678
679 return 0;
680}
681
682static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
683{
684 if (fp->rx_frag_size)
685 skb_free_frag(data);
686 else
687 kfree(data);
688}
689
690static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
691{
692 if (fp->rx_frag_size) {
693
694 if (unlikely(gfpflags_allow_blocking(gfp_mask)))
695 return (void *)__get_free_page(gfp_mask);
696
697 return netdev_alloc_frag(fp->rx_frag_size);
698 }
699
700 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
701}
702
703#ifdef CONFIG_INET
704static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
705{
706 const struct iphdr *iph = ip_hdr(skb);
707 struct tcphdr *th;
708
709 skb_set_transport_header(skb, sizeof(struct iphdr));
710 th = tcp_hdr(skb);
711
712 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
713 iph->saddr, iph->daddr, 0);
714}
715
716static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
717{
718 struct ipv6hdr *iph = ipv6_hdr(skb);
719 struct tcphdr *th;
720
721 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
722 th = tcp_hdr(skb);
723
724 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
725 &iph->saddr, &iph->daddr, 0);
726}
727
728static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
729 void (*gro_func)(struct bnx2x*, struct sk_buff*))
730{
731 skb_set_network_header(skb, 0);
732 gro_func(bp, skb);
733 tcp_gro_complete(skb);
734}
735#endif
736
737static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
738 struct sk_buff *skb)
739{
740#ifdef CONFIG_INET
741 if (skb_shinfo(skb)->gso_size) {
742 switch (be16_to_cpu(skb->protocol)) {
743 case ETH_P_IP:
744 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
745 break;
746 case ETH_P_IPV6:
747 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
748 break;
749 default:
750 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
751 be16_to_cpu(skb->protocol));
752 }
753 }
754#endif
755 skb_record_rx_queue(skb, fp->rx_queue);
756 napi_gro_receive(&fp->napi, skb);
757}
758
759static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
760 struct bnx2x_agg_info *tpa_info,
761 u16 pages,
762 struct eth_end_agg_rx_cqe *cqe,
763 u16 cqe_idx)
764{
765 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
766 u8 pad = tpa_info->placement_offset;
767 u16 len = tpa_info->len_on_bd;
768 struct sk_buff *skb = NULL;
769 u8 *new_data, *data = rx_buf->data;
770 u8 old_tpa_state = tpa_info->tpa_state;
771
772 tpa_info->tpa_state = BNX2X_TPA_STOP;
773
774
775
776
777 if (old_tpa_state == BNX2X_TPA_ERROR)
778 goto drop;
779
780
781 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
782
783
784
785 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
786 fp->rx_buf_size, DMA_FROM_DEVICE);
787 if (likely(new_data))
788 skb = build_skb(data, fp->rx_frag_size);
789
790 if (likely(skb)) {
791#ifdef BNX2X_STOP_ON_ERROR
792 if (pad + len > fp->rx_buf_size) {
793 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
794 pad, len, fp->rx_buf_size);
795 bnx2x_panic();
796 return;
797 }
798#endif
799
800 skb_reserve(skb, pad + NET_SKB_PAD);
801 skb_put(skb, len);
802 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
803
804 skb->protocol = eth_type_trans(skb, bp->dev);
805 skb->ip_summed = CHECKSUM_UNNECESSARY;
806
807 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
808 skb, cqe, cqe_idx)) {
809 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
810 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
811 bnx2x_gro_receive(bp, fp, skb);
812 } else {
813 DP(NETIF_MSG_RX_STATUS,
814 "Failed to allocate new pages - dropping packet!\n");
815 dev_kfree_skb_any(skb);
816 }
817
818
819 rx_buf->data = new_data;
820
821 return;
822 }
823 if (new_data)
824 bnx2x_frag_free(fp, new_data);
825drop:
826
827 DP(NETIF_MSG_RX_STATUS,
828 "Failed to allocate or map a new skb - dropping packet!\n");
829 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
830}
831
832static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
833 u16 index, gfp_t gfp_mask)
834{
835 u8 *data;
836 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
837 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
838 dma_addr_t mapping;
839
840 data = bnx2x_frag_alloc(fp, gfp_mask);
841 if (unlikely(data == NULL))
842 return -ENOMEM;
843
844 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
845 fp->rx_buf_size,
846 DMA_FROM_DEVICE);
847 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
848 bnx2x_frag_free(fp, data);
849 BNX2X_ERR("Can't map rx data\n");
850 return -ENOMEM;
851 }
852
853 rx_buf->data = data;
854 dma_unmap_addr_set(rx_buf, mapping, mapping);
855
856 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
857 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
858
859 return 0;
860}
861
862static
863void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
864 struct bnx2x_fastpath *fp,
865 struct bnx2x_eth_q_stats *qstats)
866{
867
868
869
870
871
872 if (cqe->fast_path_cqe.status_flags &
873 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
874 return;
875
876
877
878 if (cqe->fast_path_cqe.type_error_flags &
879 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
880 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
881 qstats->hw_csum_err++;
882 else
883 skb->ip_summed = CHECKSUM_UNNECESSARY;
884}
885
886static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
887{
888 struct bnx2x *bp = fp->bp;
889 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
890 u16 sw_comp_cons, sw_comp_prod;
891 int rx_pkt = 0;
892 union eth_rx_cqe *cqe;
893 struct eth_fast_path_rx_cqe *cqe_fp;
894
895#ifdef BNX2X_STOP_ON_ERROR
896 if (unlikely(bp->panic))
897 return 0;
898#endif
899 if (budget <= 0)
900 return rx_pkt;
901
902 bd_cons = fp->rx_bd_cons;
903 bd_prod = fp->rx_bd_prod;
904 bd_prod_fw = bd_prod;
905 sw_comp_cons = fp->rx_comp_cons;
906 sw_comp_prod = fp->rx_comp_prod;
907
908 comp_ring_cons = RCQ_BD(sw_comp_cons);
909 cqe = &fp->rx_comp_ring[comp_ring_cons];
910 cqe_fp = &cqe->fast_path_cqe;
911
912 DP(NETIF_MSG_RX_STATUS,
913 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
914
915 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
916 struct sw_rx_bd *rx_buf = NULL;
917 struct sk_buff *skb;
918 u8 cqe_fp_flags;
919 enum eth_rx_cqe_type cqe_fp_type;
920 u16 len, pad, queue;
921 u8 *data;
922 u32 rxhash;
923 enum pkt_hash_types rxhash_type;
924
925#ifdef BNX2X_STOP_ON_ERROR
926 if (unlikely(bp->panic))
927 return 0;
928#endif
929
930 bd_prod = RX_BD(bd_prod);
931 bd_cons = RX_BD(bd_cons);
932
933
934
935
936
937
938
939
940
941
942
943 rmb();
944
945 cqe_fp_flags = cqe_fp->type_error_flags;
946 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
947
948 DP(NETIF_MSG_RX_STATUS,
949 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
950 CQE_TYPE(cqe_fp_flags),
951 cqe_fp_flags, cqe_fp->status_flags,
952 le32_to_cpu(cqe_fp->rss_hash_result),
953 le16_to_cpu(cqe_fp->vlan_tag),
954 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
955
956
957 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
958 bnx2x_sp_event(fp, cqe);
959 goto next_cqe;
960 }
961
962 rx_buf = &fp->rx_buf_ring[bd_cons];
963 data = rx_buf->data;
964
965 if (!CQE_TYPE_FAST(cqe_fp_type)) {
966 struct bnx2x_agg_info *tpa_info;
967 u16 frag_size, pages;
968#ifdef BNX2X_STOP_ON_ERROR
969
970 if (fp->mode == TPA_MODE_DISABLED &&
971 (CQE_TYPE_START(cqe_fp_type) ||
972 CQE_TYPE_STOP(cqe_fp_type)))
973 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
974 CQE_TYPE(cqe_fp_type));
975#endif
976
977 if (CQE_TYPE_START(cqe_fp_type)) {
978 u16 queue = cqe_fp->queue_index;
979 DP(NETIF_MSG_RX_STATUS,
980 "calling tpa_start on queue %d\n",
981 queue);
982
983 bnx2x_tpa_start(fp, queue,
984 bd_cons, bd_prod,
985 cqe_fp);
986
987 goto next_rx;
988 }
989 queue = cqe->end_agg_cqe.queue_index;
990 tpa_info = &fp->tpa_info[queue];
991 DP(NETIF_MSG_RX_STATUS,
992 "calling tpa_stop on queue %d\n",
993 queue);
994
995 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
996 tpa_info->len_on_bd;
997
998 if (fp->mode == TPA_MODE_GRO)
999 pages = (frag_size + tpa_info->full_page - 1) /
1000 tpa_info->full_page;
1001 else
1002 pages = SGE_PAGE_ALIGN(frag_size) >>
1003 SGE_PAGE_SHIFT;
1004
1005 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1006 &cqe->end_agg_cqe, comp_ring_cons);
1007#ifdef BNX2X_STOP_ON_ERROR
1008 if (bp->panic)
1009 return 0;
1010#endif
1011
1012 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1013 goto next_cqe;
1014 }
1015
1016 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1017 pad = cqe_fp->placement_offset;
1018 dma_sync_single_for_cpu(&bp->pdev->dev,
1019 dma_unmap_addr(rx_buf, mapping),
1020 pad + RX_COPY_THRESH,
1021 DMA_FROM_DEVICE);
1022 pad += NET_SKB_PAD;
1023 prefetch(data + pad);
1024
1025 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1026 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1027 "ERROR flags %x rx packet %u\n",
1028 cqe_fp_flags, sw_comp_cons);
1029 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1030 goto reuse_rx;
1031 }
1032
1033
1034
1035
1036 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1037 (len <= RX_COPY_THRESH)) {
1038 skb = napi_alloc_skb(&fp->napi, len);
1039 if (skb == NULL) {
1040 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1041 "ERROR packet dropped because of alloc failure\n");
1042 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1043 goto reuse_rx;
1044 }
1045 memcpy(skb->data, data + pad, len);
1046 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1047 } else {
1048 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1049 GFP_ATOMIC) == 0)) {
1050 dma_unmap_single(&bp->pdev->dev,
1051 dma_unmap_addr(rx_buf, mapping),
1052 fp->rx_buf_size,
1053 DMA_FROM_DEVICE);
1054 skb = build_skb(data, fp->rx_frag_size);
1055 if (unlikely(!skb)) {
1056 bnx2x_frag_free(fp, data);
1057 bnx2x_fp_qstats(bp, fp)->
1058 rx_skb_alloc_failed++;
1059 goto next_rx;
1060 }
1061 skb_reserve(skb, pad);
1062 } else {
1063 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1064 "ERROR packet dropped because of alloc failure\n");
1065 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1066reuse_rx:
1067 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1068 goto next_rx;
1069 }
1070 }
1071
1072 skb_put(skb, len);
1073 skb->protocol = eth_type_trans(skb, bp->dev);
1074
1075
1076 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1077 skb_set_hash(skb, rxhash, rxhash_type);
1078
1079 skb_checksum_none_assert(skb);
1080
1081 if (bp->dev->features & NETIF_F_RXCSUM)
1082 bnx2x_csum_validate(skb, cqe, fp,
1083 bnx2x_fp_qstats(bp, fp));
1084
1085 skb_record_rx_queue(skb, fp->rx_queue);
1086
1087
1088 if (unlikely(cqe->fast_path_cqe.type_error_flags &
1089 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1090 bnx2x_set_rx_ts(bp, skb);
1091
1092 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1093 PARSING_FLAGS_VLAN)
1094 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1095 le16_to_cpu(cqe_fp->vlan_tag));
1096
1097 skb_mark_napi_id(skb, &fp->napi);
1098
1099 if (bnx2x_fp_ll_polling(fp))
1100 netif_receive_skb(skb);
1101 else
1102 napi_gro_receive(&fp->napi, skb);
1103next_rx:
1104 rx_buf->data = NULL;
1105
1106 bd_cons = NEXT_RX_IDX(bd_cons);
1107 bd_prod = NEXT_RX_IDX(bd_prod);
1108 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1109 rx_pkt++;
1110next_cqe:
1111 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1112 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1113
1114
1115 BNX2X_SEED_CQE(cqe_fp);
1116
1117 if (rx_pkt == budget)
1118 break;
1119
1120 comp_ring_cons = RCQ_BD(sw_comp_cons);
1121 cqe = &fp->rx_comp_ring[comp_ring_cons];
1122 cqe_fp = &cqe->fast_path_cqe;
1123 }
1124
1125 fp->rx_bd_cons = bd_cons;
1126 fp->rx_bd_prod = bd_prod_fw;
1127 fp->rx_comp_cons = sw_comp_cons;
1128 fp->rx_comp_prod = sw_comp_prod;
1129
1130
1131 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1132 fp->rx_sge_prod);
1133
1134 fp->rx_pkt += rx_pkt;
1135 fp->rx_calls++;
1136
1137 return rx_pkt;
1138}
1139
1140static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1141{
1142 struct bnx2x_fastpath *fp = fp_cookie;
1143 struct bnx2x *bp = fp->bp;
1144 u8 cos;
1145
1146 DP(NETIF_MSG_INTR,
1147 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1148 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1149
1150 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1151
1152#ifdef BNX2X_STOP_ON_ERROR
1153 if (unlikely(bp->panic))
1154 return IRQ_HANDLED;
1155#endif
1156
1157
1158 for_each_cos_in_tx_queue(fp, cos)
1159 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1160
1161 prefetch(&fp->sb_running_index[SM_RX_ID]);
1162 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1163
1164 return IRQ_HANDLED;
1165}
1166
1167
1168void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1169{
1170 mutex_lock(&bp->port.phy_mutex);
1171
1172 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1173}
1174
1175void bnx2x_release_phy_lock(struct bnx2x *bp)
1176{
1177 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1178
1179 mutex_unlock(&bp->port.phy_mutex);
1180}
1181
1182
1183u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1184{
1185 u16 line_speed = bp->link_vars.line_speed;
1186 if (IS_MF(bp)) {
1187 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1188 bp->mf_config[BP_VN(bp)]);
1189
1190
1191
1192
1193 if (IS_MF_PERCENT_BW(bp))
1194 line_speed = (line_speed * maxCfg) / 100;
1195 else {
1196 u16 vn_max_rate = maxCfg * 100;
1197
1198 if (vn_max_rate < line_speed)
1199 line_speed = vn_max_rate;
1200 }
1201 }
1202
1203 return line_speed;
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214static void bnx2x_fill_report_data(struct bnx2x *bp,
1215 struct bnx2x_link_report_data *data)
1216{
1217 memset(data, 0, sizeof(*data));
1218
1219 if (IS_PF(bp)) {
1220
1221 data->line_speed = bnx2x_get_mf_speed(bp);
1222
1223
1224 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1225 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1226 &data->link_report_flags);
1227
1228 if (!BNX2X_NUM_ETH_QUEUES(bp))
1229 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1230 &data->link_report_flags);
1231
1232
1233 if (bp->link_vars.duplex == DUPLEX_FULL)
1234 __set_bit(BNX2X_LINK_REPORT_FD,
1235 &data->link_report_flags);
1236
1237
1238 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1239 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1240 &data->link_report_flags);
1241
1242
1243 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1244 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1245 &data->link_report_flags);
1246 } else {
1247 *data = bp->vf_link_vars;
1248 }
1249}
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261void bnx2x_link_report(struct bnx2x *bp)
1262{
1263 bnx2x_acquire_phy_lock(bp);
1264 __bnx2x_link_report(bp);
1265 bnx2x_release_phy_lock(bp);
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276void __bnx2x_link_report(struct bnx2x *bp)
1277{
1278 struct bnx2x_link_report_data cur_data;
1279
1280
1281 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1282 bnx2x_read_mf_cfg(bp);
1283
1284
1285 bnx2x_fill_report_data(bp, &cur_data);
1286
1287
1288 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1289 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290 &bp->last_reported_link.link_report_flags) &&
1291 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1292 &cur_data.link_report_flags)))
1293 return;
1294
1295 bp->link_cnt++;
1296
1297
1298
1299
1300 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1301
1302
1303 if (IS_PF(bp))
1304 bnx2x_iov_link_update(bp);
1305
1306 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1307 &cur_data.link_report_flags)) {
1308 netif_carrier_off(bp->dev);
1309 netdev_err(bp->dev, "NIC Link is Down\n");
1310 return;
1311 } else {
1312 const char *duplex;
1313 const char *flow;
1314
1315 netif_carrier_on(bp->dev);
1316
1317 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1318 &cur_data.link_report_flags))
1319 duplex = "full";
1320 else
1321 duplex = "half";
1322
1323
1324
1325
1326
1327 if (cur_data.link_report_flags) {
1328 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1329 &cur_data.link_report_flags)) {
1330 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1331 &cur_data.link_report_flags))
1332 flow = "ON - receive & transmit";
1333 else
1334 flow = "ON - receive";
1335 } else {
1336 flow = "ON - transmit";
1337 }
1338 } else {
1339 flow = "none";
1340 }
1341 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1342 cur_data.line_speed, duplex, flow);
1343 }
1344}
1345
1346static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1347{
1348 int i;
1349
1350 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1351 struct eth_rx_sge *sge;
1352
1353 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1354 sge->addr_hi =
1355 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1356 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1357
1358 sge->addr_lo =
1359 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1360 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1361 }
1362}
1363
1364static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1365 struct bnx2x_fastpath *fp, int last)
1366{
1367 int i;
1368
1369 for (i = 0; i < last; i++) {
1370 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1371 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1372 u8 *data = first_buf->data;
1373
1374 if (data == NULL) {
1375 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1376 continue;
1377 }
1378 if (tpa_info->tpa_state == BNX2X_TPA_START)
1379 dma_unmap_single(&bp->pdev->dev,
1380 dma_unmap_addr(first_buf, mapping),
1381 fp->rx_buf_size, DMA_FROM_DEVICE);
1382 bnx2x_frag_free(fp, data);
1383 first_buf->data = NULL;
1384 }
1385}
1386
1387void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1388{
1389 int j;
1390
1391 for_each_rx_queue_cnic(bp, j) {
1392 struct bnx2x_fastpath *fp = &bp->fp[j];
1393
1394 fp->rx_bd_cons = 0;
1395
1396
1397
1398
1399
1400
1401 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1402 fp->rx_sge_prod);
1403 }
1404}
1405
1406void bnx2x_init_rx_rings(struct bnx2x *bp)
1407{
1408 int func = BP_FUNC(bp);
1409 u16 ring_prod;
1410 int i, j;
1411
1412
1413 for_each_eth_queue(bp, j) {
1414 struct bnx2x_fastpath *fp = &bp->fp[j];
1415
1416 DP(NETIF_MSG_IFUP,
1417 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1418
1419 if (fp->mode != TPA_MODE_DISABLED) {
1420
1421 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1422 struct bnx2x_agg_info *tpa_info =
1423 &fp->tpa_info[i];
1424 struct sw_rx_bd *first_buf =
1425 &tpa_info->first_buf;
1426
1427 first_buf->data =
1428 bnx2x_frag_alloc(fp, GFP_KERNEL);
1429 if (!first_buf->data) {
1430 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1431 j);
1432 bnx2x_free_tpa_pool(bp, fp, i);
1433 fp->mode = TPA_MODE_DISABLED;
1434 break;
1435 }
1436 dma_unmap_addr_set(first_buf, mapping, 0);
1437 tpa_info->tpa_state = BNX2X_TPA_STOP;
1438 }
1439
1440
1441 bnx2x_set_next_page_sgl(fp);
1442
1443
1444 bnx2x_init_sge_ring_bit_mask(fp);
1445
1446
1447 for (i = 0, ring_prod = 0;
1448 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1449
1450 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1451 GFP_KERNEL) < 0) {
1452 BNX2X_ERR("was only able to allocate %d rx sges\n",
1453 i);
1454 BNX2X_ERR("disabling TPA for queue[%d]\n",
1455 j);
1456
1457 bnx2x_free_rx_sge_range(bp, fp,
1458 ring_prod);
1459 bnx2x_free_tpa_pool(bp, fp,
1460 MAX_AGG_QS(bp));
1461 fp->mode = TPA_MODE_DISABLED;
1462 ring_prod = 0;
1463 break;
1464 }
1465 ring_prod = NEXT_SGE_IDX(ring_prod);
1466 }
1467
1468 fp->rx_sge_prod = ring_prod;
1469 }
1470 }
1471
1472 for_each_eth_queue(bp, j) {
1473 struct bnx2x_fastpath *fp = &bp->fp[j];
1474
1475 fp->rx_bd_cons = 0;
1476
1477
1478
1479
1480
1481
1482 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1483 fp->rx_sge_prod);
1484
1485 if (j != 0)
1486 continue;
1487
1488 if (CHIP_IS_E1(bp)) {
1489 REG_WR(bp, BAR_USTRORM_INTMEM +
1490 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1491 U64_LO(fp->rx_comp_mapping));
1492 REG_WR(bp, BAR_USTRORM_INTMEM +
1493 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1494 U64_HI(fp->rx_comp_mapping));
1495 }
1496 }
1497}
1498
1499static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1500{
1501 u8 cos;
1502 struct bnx2x *bp = fp->bp;
1503
1504 for_each_cos_in_tx_queue(fp, cos) {
1505 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1506 unsigned pkts_compl = 0, bytes_compl = 0;
1507
1508 u16 sw_prod = txdata->tx_pkt_prod;
1509 u16 sw_cons = txdata->tx_pkt_cons;
1510
1511 while (sw_cons != sw_prod) {
1512 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1513 &pkts_compl, &bytes_compl);
1514 sw_cons++;
1515 }
1516
1517 netdev_tx_reset_queue(
1518 netdev_get_tx_queue(bp->dev,
1519 txdata->txq_index));
1520 }
1521}
1522
1523static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1524{
1525 int i;
1526
1527 for_each_tx_queue_cnic(bp, i) {
1528 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1529 }
1530}
1531
1532static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1533{
1534 int i;
1535
1536 for_each_eth_queue(bp, i) {
1537 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1538 }
1539}
1540
1541static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1542{
1543 struct bnx2x *bp = fp->bp;
1544 int i;
1545
1546
1547 if (fp->rx_buf_ring == NULL)
1548 return;
1549
1550 for (i = 0; i < NUM_RX_BD; i++) {
1551 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1552 u8 *data = rx_buf->data;
1553
1554 if (data == NULL)
1555 continue;
1556 dma_unmap_single(&bp->pdev->dev,
1557 dma_unmap_addr(rx_buf, mapping),
1558 fp->rx_buf_size, DMA_FROM_DEVICE);
1559
1560 rx_buf->data = NULL;
1561 bnx2x_frag_free(fp, data);
1562 }
1563}
1564
1565static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1566{
1567 int j;
1568
1569 for_each_rx_queue_cnic(bp, j) {
1570 bnx2x_free_rx_bds(&bp->fp[j]);
1571 }
1572}
1573
1574static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1575{
1576 int j;
1577
1578 for_each_eth_queue(bp, j) {
1579 struct bnx2x_fastpath *fp = &bp->fp[j];
1580
1581 bnx2x_free_rx_bds(fp);
1582
1583 if (fp->mode != TPA_MODE_DISABLED)
1584 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1585 }
1586}
1587
1588static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1589{
1590 bnx2x_free_tx_skbs_cnic(bp);
1591 bnx2x_free_rx_skbs_cnic(bp);
1592}
1593
1594void bnx2x_free_skbs(struct bnx2x *bp)
1595{
1596 bnx2x_free_tx_skbs(bp);
1597 bnx2x_free_rx_skbs(bp);
1598}
1599
1600void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1601{
1602
1603 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1604
1605 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1606
1607 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1608
1609
1610 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1611 & FUNC_MF_CFG_MAX_BW_MASK;
1612
1613 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1614 }
1615}
1616
1617
1618
1619
1620
1621
1622
1623static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1624{
1625 int i, offset = 0;
1626
1627 if (nvecs == offset)
1628 return;
1629
1630
1631 if (IS_PF(bp)) {
1632 free_irq(bp->msix_table[offset].vector, bp->dev);
1633 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1634 bp->msix_table[offset].vector);
1635 offset++;
1636 }
1637
1638 if (CNIC_SUPPORT(bp)) {
1639 if (nvecs == offset)
1640 return;
1641 offset++;
1642 }
1643
1644 for_each_eth_queue(bp, i) {
1645 if (nvecs == offset)
1646 return;
1647 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1648 i, bp->msix_table[offset].vector);
1649
1650 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1651 }
1652}
1653
1654void bnx2x_free_irq(struct bnx2x *bp)
1655{
1656 if (bp->flags & USING_MSIX_FLAG &&
1657 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1658 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1659
1660
1661 if (IS_PF(bp))
1662 nvecs++;
1663
1664 bnx2x_free_msix_irqs(bp, nvecs);
1665 } else {
1666 free_irq(bp->dev->irq, bp->dev);
1667 }
1668}
1669
1670int bnx2x_enable_msix(struct bnx2x *bp)
1671{
1672 int msix_vec = 0, i, rc;
1673
1674
1675 if (IS_PF(bp)) {
1676 bp->msix_table[msix_vec].entry = msix_vec;
1677 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1678 bp->msix_table[0].entry);
1679 msix_vec++;
1680 }
1681
1682
1683 if (CNIC_SUPPORT(bp)) {
1684 bp->msix_table[msix_vec].entry = msix_vec;
1685 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1686 msix_vec, bp->msix_table[msix_vec].entry);
1687 msix_vec++;
1688 }
1689
1690
1691 for_each_eth_queue(bp, i) {
1692 bp->msix_table[msix_vec].entry = msix_vec;
1693 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1694 msix_vec, msix_vec, i);
1695 msix_vec++;
1696 }
1697
1698 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1699 msix_vec);
1700
1701 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1702 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1703
1704
1705
1706
1707 if (rc == -ENOSPC) {
1708
1709 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1710 if (rc < 0) {
1711 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1712 rc);
1713 goto no_msix;
1714 }
1715
1716 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1717 bp->flags |= USING_SINGLE_MSIX_FLAG;
1718
1719 BNX2X_DEV_INFO("set number of queues to 1\n");
1720 bp->num_ethernet_queues = 1;
1721 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1722 } else if (rc < 0) {
1723 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1724 goto no_msix;
1725 } else if (rc < msix_vec) {
1726
1727 int diff = msix_vec - rc;
1728
1729 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1730
1731
1732
1733
1734 bp->num_ethernet_queues -= diff;
1735 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1736
1737 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1738 bp->num_queues);
1739 }
1740
1741 bp->flags |= USING_MSIX_FLAG;
1742
1743 return 0;
1744
1745no_msix:
1746
1747 if (rc == -ENOMEM)
1748 bp->flags |= DISABLE_MSI_FLAG;
1749
1750 return rc;
1751}
1752
1753static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1754{
1755 int i, rc, offset = 0;
1756
1757
1758 if (IS_PF(bp)) {
1759 rc = request_irq(bp->msix_table[offset++].vector,
1760 bnx2x_msix_sp_int, 0,
1761 bp->dev->name, bp->dev);
1762 if (rc) {
1763 BNX2X_ERR("request sp irq failed\n");
1764 return -EBUSY;
1765 }
1766 }
1767
1768 if (CNIC_SUPPORT(bp))
1769 offset++;
1770
1771 for_each_eth_queue(bp, i) {
1772 struct bnx2x_fastpath *fp = &bp->fp[i];
1773 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1774 bp->dev->name, i);
1775
1776 rc = request_irq(bp->msix_table[offset].vector,
1777 bnx2x_msix_fp_int, 0, fp->name, fp);
1778 if (rc) {
1779 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1780 bp->msix_table[offset].vector, rc);
1781 bnx2x_free_msix_irqs(bp, offset);
1782 return -EBUSY;
1783 }
1784
1785 offset++;
1786 }
1787
1788 i = BNX2X_NUM_ETH_QUEUES(bp);
1789 if (IS_PF(bp)) {
1790 offset = 1 + CNIC_SUPPORT(bp);
1791 netdev_info(bp->dev,
1792 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1793 bp->msix_table[0].vector,
1794 0, bp->msix_table[offset].vector,
1795 i - 1, bp->msix_table[offset + i - 1].vector);
1796 } else {
1797 offset = CNIC_SUPPORT(bp);
1798 netdev_info(bp->dev,
1799 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1800 0, bp->msix_table[offset].vector,
1801 i - 1, bp->msix_table[offset + i - 1].vector);
1802 }
1803 return 0;
1804}
1805
1806int bnx2x_enable_msi(struct bnx2x *bp)
1807{
1808 int rc;
1809
1810 rc = pci_enable_msi(bp->pdev);
1811 if (rc) {
1812 BNX2X_DEV_INFO("MSI is not attainable\n");
1813 return -1;
1814 }
1815 bp->flags |= USING_MSI_FLAG;
1816
1817 return 0;
1818}
1819
1820static int bnx2x_req_irq(struct bnx2x *bp)
1821{
1822 unsigned long flags;
1823 unsigned int irq;
1824
1825 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1826 flags = 0;
1827 else
1828 flags = IRQF_SHARED;
1829
1830 if (bp->flags & USING_MSIX_FLAG)
1831 irq = bp->msix_table[0].vector;
1832 else
1833 irq = bp->pdev->irq;
1834
1835 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1836}
1837
1838static int bnx2x_setup_irqs(struct bnx2x *bp)
1839{
1840 int rc = 0;
1841 if (bp->flags & USING_MSIX_FLAG &&
1842 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1843 rc = bnx2x_req_msix_irqs(bp);
1844 if (rc)
1845 return rc;
1846 } else {
1847 rc = bnx2x_req_irq(bp);
1848 if (rc) {
1849 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1850 return rc;
1851 }
1852 if (bp->flags & USING_MSI_FLAG) {
1853 bp->dev->irq = bp->pdev->irq;
1854 netdev_info(bp->dev, "using MSI IRQ %d\n",
1855 bp->dev->irq);
1856 }
1857 if (bp->flags & USING_MSIX_FLAG) {
1858 bp->dev->irq = bp->msix_table[0].vector;
1859 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1860 bp->dev->irq);
1861 }
1862 }
1863
1864 return 0;
1865}
1866
1867static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1868{
1869 int i;
1870
1871 for_each_rx_queue_cnic(bp, i) {
1872 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1873 napi_enable(&bnx2x_fp(bp, i, napi));
1874 }
1875}
1876
1877static void bnx2x_napi_enable(struct bnx2x *bp)
1878{
1879 int i;
1880
1881 for_each_eth_queue(bp, i) {
1882 bnx2x_fp_busy_poll_init(&bp->fp[i]);
1883 napi_enable(&bnx2x_fp(bp, i, napi));
1884 }
1885}
1886
1887static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1888{
1889 int i;
1890
1891 for_each_rx_queue_cnic(bp, i) {
1892 napi_disable(&bnx2x_fp(bp, i, napi));
1893 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1894 usleep_range(1000, 2000);
1895 }
1896}
1897
1898static void bnx2x_napi_disable(struct bnx2x *bp)
1899{
1900 int i;
1901
1902 for_each_eth_queue(bp, i) {
1903 napi_disable(&bnx2x_fp(bp, i, napi));
1904 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1905 usleep_range(1000, 2000);
1906 }
1907}
1908
1909void bnx2x_netif_start(struct bnx2x *bp)
1910{
1911 if (netif_running(bp->dev)) {
1912 bnx2x_napi_enable(bp);
1913 if (CNIC_LOADED(bp))
1914 bnx2x_napi_enable_cnic(bp);
1915 bnx2x_int_enable(bp);
1916 if (bp->state == BNX2X_STATE_OPEN)
1917 netif_tx_wake_all_queues(bp->dev);
1918 }
1919}
1920
1921void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1922{
1923 bnx2x_int_disable_sync(bp, disable_hw);
1924 bnx2x_napi_disable(bp);
1925 if (CNIC_LOADED(bp))
1926 bnx2x_napi_disable_cnic(bp);
1927}
1928
1929u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1930 void *accel_priv, select_queue_fallback_t fallback)
1931{
1932 struct bnx2x *bp = netdev_priv(dev);
1933
1934 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1935 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1936 u16 ether_type = ntohs(hdr->h_proto);
1937
1938
1939 if (ether_type == ETH_P_8021Q) {
1940 struct vlan_ethhdr *vhdr =
1941 (struct vlan_ethhdr *)skb->data;
1942
1943 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1944 }
1945
1946
1947 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1948 return bnx2x_fcoe_tx(bp, txq_index);
1949 }
1950
1951
1952 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1953}
1954
1955void bnx2x_set_num_queues(struct bnx2x *bp)
1956{
1957
1958 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1959
1960
1961 if (IS_MF_STORAGE_ONLY(bp))
1962 bp->num_ethernet_queues = 1;
1963
1964
1965 bp->num_cnic_queues = CNIC_SUPPORT(bp);
1966 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1967
1968 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1969}
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1994{
1995 int rc, tx, rx;
1996
1997 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1998 rx = BNX2X_NUM_ETH_QUEUES(bp);
1999
2000
2001 if (include_cnic && !NO_FCOE(bp)) {
2002 rx++;
2003 tx++;
2004 }
2005
2006 rc = netif_set_real_num_tx_queues(bp->dev, tx);
2007 if (rc) {
2008 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2009 return rc;
2010 }
2011 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2012 if (rc) {
2013 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2014 return rc;
2015 }
2016
2017 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2018 tx, rx);
2019
2020 return rc;
2021}
2022
2023static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2024{
2025 int i;
2026
2027 for_each_queue(bp, i) {
2028 struct bnx2x_fastpath *fp = &bp->fp[i];
2029 u32 mtu;
2030
2031
2032 if (IS_FCOE_IDX(i))
2033
2034
2035
2036
2037
2038
2039 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2040 else
2041 mtu = bp->dev->mtu;
2042 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2043 IP_HEADER_ALIGNMENT_PADDING +
2044 ETH_OVREHEAD +
2045 mtu +
2046 BNX2X_FW_RX_ALIGN_END;
2047
2048 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2049 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2050 else
2051 fp->rx_frag_size = 0;
2052 }
2053}
2054
2055static int bnx2x_init_rss(struct bnx2x *bp)
2056{
2057 int i;
2058 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2059
2060
2061
2062
2063 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2064 bp->rss_conf_obj.ind_table[i] =
2065 bp->fp->cl_id +
2066 ethtool_rxfh_indir_default(i, num_eth_queues);
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2077}
2078
2079int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2080 bool config_hash, bool enable)
2081{
2082 struct bnx2x_config_rss_params params = {NULL};
2083
2084
2085
2086
2087
2088
2089
2090
2091 params.rss_obj = rss_obj;
2092
2093 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2094
2095 if (enable) {
2096 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2097
2098
2099 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2100 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2101 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2102 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2103 if (rss_obj->udp_rss_v4)
2104 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2105 if (rss_obj->udp_rss_v6)
2106 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2107
2108 if (!CHIP_IS_E1x(bp)) {
2109
2110 __set_bit(BNX2X_RSS_IPV4_VXLAN, ¶ms.rss_flags);
2111 __set_bit(BNX2X_RSS_IPV6_VXLAN, ¶ms.rss_flags);
2112
2113
2114 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, ¶ms.rss_flags);
2115 }
2116 } else {
2117 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2118 }
2119
2120
2121 params.rss_result_mask = MULTI_MASK;
2122
2123 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2124
2125 if (config_hash) {
2126
2127 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2128 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2129 }
2130
2131 if (IS_PF(bp))
2132 return bnx2x_config_rss(bp, ¶ms);
2133 else
2134 return bnx2x_vfpf_config_rss(bp, ¶ms);
2135}
2136
2137static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2138{
2139 struct bnx2x_func_state_params func_params = {NULL};
2140
2141
2142 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2143
2144 func_params.f_obj = &bp->func_obj;
2145 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2146
2147 func_params.params.hw_init.load_phase = load_code;
2148
2149 return bnx2x_func_state_change(bp, &func_params);
2150}
2151
2152
2153
2154
2155
2156void bnx2x_squeeze_objects(struct bnx2x *bp)
2157{
2158 int rc;
2159 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2160 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2161 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2162
2163
2164
2165
2166 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2167
2168 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2169
2170
2171 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2172 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2173 &ramrod_flags);
2174 if (rc != 0)
2175 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2176
2177
2178 vlan_mac_flags = 0;
2179 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2180 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2181 &ramrod_flags);
2182 if (rc != 0)
2183 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2184
2185
2186 rparam.mcast_obj = &bp->mcast_obj;
2187 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2188
2189
2190
2191
2192
2193 netif_addr_lock_bh(bp->dev);
2194 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2195 if (rc < 0)
2196 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2197 rc);
2198
2199
2200 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2201 while (rc != 0) {
2202 if (rc < 0) {
2203 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2204 rc);
2205 netif_addr_unlock_bh(bp->dev);
2206 return;
2207 }
2208
2209 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2210 }
2211 netif_addr_unlock_bh(bp->dev);
2212}
2213
2214#ifndef BNX2X_STOP_ON_ERROR
2215#define LOAD_ERROR_EXIT(bp, label) \
2216 do { \
2217 (bp)->state = BNX2X_STATE_ERROR; \
2218 goto label; \
2219 } while (0)
2220
2221#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2222 do { \
2223 bp->cnic_loaded = false; \
2224 goto label; \
2225 } while (0)
2226#else
2227#define LOAD_ERROR_EXIT(bp, label) \
2228 do { \
2229 (bp)->state = BNX2X_STATE_ERROR; \
2230 (bp)->panic = 1; \
2231 return -EBUSY; \
2232 } while (0)
2233#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2234 do { \
2235 bp->cnic_loaded = false; \
2236 (bp)->panic = 1; \
2237 return -EBUSY; \
2238 } while (0)
2239#endif
2240
2241static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2242{
2243 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2244 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2245 return;
2246}
2247
2248static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2249{
2250 int num_groups, vf_headroom = 0;
2251 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2252
2253
2254 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2255
2256
2257
2258
2259
2260
2261 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2262
2263
2264
2265
2266
2267
2268 if (IS_SRIOV(bp))
2269 vf_headroom = bnx2x_vf_headroom(bp);
2270
2271
2272
2273
2274
2275
2276 num_groups =
2277 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2278 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2279 1 : 0));
2280
2281 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2282 bp->fw_stats_num, vf_headroom, num_groups);
2283 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2284 num_groups * sizeof(struct stats_query_cmd_group);
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2295 sizeof(struct per_pf_stats) +
2296 sizeof(struct fcoe_statistics_params) +
2297 sizeof(struct per_queue_stats) * num_queue_stats +
2298 sizeof(struct stats_counter);
2299
2300 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2301 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2302 if (!bp->fw_stats)
2303 goto alloc_mem_err;
2304
2305
2306 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2307 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2308 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2309 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2310 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2311 bp->fw_stats_req_sz;
2312
2313 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2314 U64_HI(bp->fw_stats_req_mapping),
2315 U64_LO(bp->fw_stats_req_mapping));
2316 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2317 U64_HI(bp->fw_stats_data_mapping),
2318 U64_LO(bp->fw_stats_data_mapping));
2319 return 0;
2320
2321alloc_mem_err:
2322 bnx2x_free_fw_stats_mem(bp);
2323 BNX2X_ERR("Can't allocate FW stats memory\n");
2324 return -ENOMEM;
2325}
2326
2327
2328static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2329{
2330 u32 param;
2331
2332
2333 bp->fw_seq =
2334 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2335 DRV_MSG_SEQ_NUMBER_MASK);
2336 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2337
2338
2339 bp->fw_drv_pulse_wr_seq =
2340 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2341 DRV_PULSE_SEQ_MASK);
2342 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2343
2344 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2345
2346 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2347 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2348
2349
2350 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2351
2352
2353 if (!(*load_code)) {
2354 BNX2X_ERR("MCP response failure, aborting\n");
2355 return -EBUSY;
2356 }
2357
2358
2359
2360
2361 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2362 BNX2X_ERR("MCP refused load request, aborting\n");
2363 return -EBUSY;
2364 }
2365 return 0;
2366}
2367
2368
2369
2370
2371
2372int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2373{
2374
2375 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2376 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2377
2378 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2379 (BCM_5710_FW_MINOR_VERSION << 8) +
2380 (BCM_5710_FW_REVISION_VERSION << 16) +
2381 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2382
2383
2384 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2385
2386 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2387 loaded_fw, my_fw);
2388
2389
2390 if (my_fw != loaded_fw) {
2391 if (print_err)
2392 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2393 loaded_fw, my_fw);
2394 else
2395 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2396 loaded_fw, my_fw);
2397 return -EBUSY;
2398 }
2399 }
2400 return 0;
2401}
2402
2403
2404static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2405{
2406 int path = BP_PATH(bp);
2407
2408 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2409 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2410 bnx2x_load_count[path][2]);
2411 bnx2x_load_count[path][0]++;
2412 bnx2x_load_count[path][1 + port]++;
2413 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2414 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2415 bnx2x_load_count[path][2]);
2416 if (bnx2x_load_count[path][0] == 1)
2417 return FW_MSG_CODE_DRV_LOAD_COMMON;
2418 else if (bnx2x_load_count[path][1 + port] == 1)
2419 return FW_MSG_CODE_DRV_LOAD_PORT;
2420 else
2421 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2422}
2423
2424
2425static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2426{
2427 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2428 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2429 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2430 bp->port.pmf = 1;
2431
2432
2433
2434
2435 smp_mb();
2436 } else {
2437 bp->port.pmf = 0;
2438 }
2439
2440 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2441}
2442
2443static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2444{
2445 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2446 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2447 (bp->common.shmem2_base)) {
2448 if (SHMEM2_HAS(bp, dcc_support))
2449 SHMEM2_WR(bp, dcc_support,
2450 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2451 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2452 if (SHMEM2_HAS(bp, afex_driver_support))
2453 SHMEM2_WR(bp, afex_driver_support,
2454 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2455 }
2456
2457
2458 bp->afex_def_vlan_tag = -1;
2459}
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2471{
2472 struct bnx2x_fastpath *fp = &bp->fp[index];
2473 int cos;
2474 struct napi_struct orig_napi = fp->napi;
2475 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2476
2477
2478 if (fp->tpa_info)
2479 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2480 sizeof(struct bnx2x_agg_info));
2481 memset(fp, 0, sizeof(*fp));
2482
2483
2484 fp->napi = orig_napi;
2485 fp->tpa_info = orig_tpa_info;
2486 fp->bp = bp;
2487 fp->index = index;
2488 if (IS_ETH_FP(fp))
2489 fp->max_cos = bp->max_cos;
2490 else
2491
2492 fp->max_cos = 1;
2493
2494
2495 if (IS_FCOE_FP(fp))
2496 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2497 if (IS_ETH_FP(fp))
2498 for_each_cos_in_tx_queue(fp, cos)
2499 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2500 BNX2X_NUM_ETH_QUEUES(bp) + index];
2501
2502
2503
2504
2505 if (bp->dev->features & NETIF_F_LRO)
2506 fp->mode = TPA_MODE_LRO;
2507 else if (bp->dev->features & NETIF_F_GRO &&
2508 bnx2x_mtu_allows_gro(bp->dev->mtu))
2509 fp->mode = TPA_MODE_GRO;
2510 else
2511 fp->mode = TPA_MODE_DISABLED;
2512
2513
2514
2515
2516 if (bp->disable_tpa || IS_FCOE_FP(fp))
2517 fp->mode = TPA_MODE_DISABLED;
2518}
2519
2520void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2521{
2522 u32 cur;
2523
2524 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2525 return;
2526
2527 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2528 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2529 cur, state);
2530
2531 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2532}
2533
2534int bnx2x_load_cnic(struct bnx2x *bp)
2535{
2536 int i, rc, port = BP_PORT(bp);
2537
2538 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2539
2540 mutex_init(&bp->cnic_mutex);
2541
2542 if (IS_PF(bp)) {
2543 rc = bnx2x_alloc_mem_cnic(bp);
2544 if (rc) {
2545 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2546 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2547 }
2548 }
2549
2550 rc = bnx2x_alloc_fp_mem_cnic(bp);
2551 if (rc) {
2552 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2553 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2554 }
2555
2556
2557 rc = bnx2x_set_real_num_queues(bp, 1);
2558 if (rc) {
2559 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2560 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2561 }
2562
2563
2564 bnx2x_add_all_napi_cnic(bp);
2565 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2566 bnx2x_napi_enable_cnic(bp);
2567
2568 rc = bnx2x_init_hw_func_cnic(bp);
2569 if (rc)
2570 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2571
2572 bnx2x_nic_init_cnic(bp);
2573
2574 if (IS_PF(bp)) {
2575
2576 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2577
2578
2579 for_each_cnic_queue(bp, i) {
2580 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2581 if (rc) {
2582 BNX2X_ERR("Queue setup failed\n");
2583 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2584 }
2585 }
2586 }
2587
2588
2589 bnx2x_set_rx_mode_inner(bp);
2590
2591
2592 bnx2x_get_iscsi_info(bp);
2593 bnx2x_setup_cnic_irq_info(bp);
2594 bnx2x_setup_cnic_info(bp);
2595 bp->cnic_loaded = true;
2596 if (bp->state == BNX2X_STATE_OPEN)
2597 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2598
2599 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2600
2601 return 0;
2602
2603#ifndef BNX2X_STOP_ON_ERROR
2604load_error_cnic2:
2605
2606 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2607
2608load_error_cnic1:
2609 bnx2x_napi_disable_cnic(bp);
2610
2611 if (bnx2x_set_real_num_queues(bp, 0))
2612 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2613load_error_cnic0:
2614 BNX2X_ERR("CNIC-related load failed\n");
2615 bnx2x_free_fp_mem_cnic(bp);
2616 bnx2x_free_mem_cnic(bp);
2617 return rc;
2618#endif
2619}
2620
2621
2622int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2623{
2624 int port = BP_PORT(bp);
2625 int i, rc = 0, load_code = 0;
2626
2627 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2628 DP(NETIF_MSG_IFUP,
2629 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2630
2631#ifdef BNX2X_STOP_ON_ERROR
2632 if (unlikely(bp->panic)) {
2633 BNX2X_ERR("Can't load NIC when there is panic\n");
2634 return -EPERM;
2635 }
2636#endif
2637
2638 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2639
2640
2641 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2642 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2643 &bp->last_reported_link.link_report_flags);
2644
2645 if (IS_PF(bp))
2646
2647 bnx2x_ilt_set_info(bp);
2648
2649
2650
2651
2652
2653
2654 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2655 for_each_queue(bp, i)
2656 bnx2x_bz_fp(bp, i);
2657 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2658 bp->num_cnic_queues) *
2659 sizeof(struct bnx2x_fp_txdata));
2660
2661 bp->fcoe_init = false;
2662
2663
2664 bnx2x_set_rx_buf_size(bp);
2665
2666 if (IS_PF(bp)) {
2667 rc = bnx2x_alloc_mem(bp);
2668 if (rc) {
2669 BNX2X_ERR("Unable to allocate bp memory\n");
2670 return rc;
2671 }
2672 }
2673
2674
2675
2676
2677 rc = bnx2x_alloc_fp_mem(bp);
2678 if (rc) {
2679 BNX2X_ERR("Unable to allocate memory for fps\n");
2680 LOAD_ERROR_EXIT(bp, load_error0);
2681 }
2682
2683
2684 if (bnx2x_alloc_fw_stats_mem(bp))
2685 LOAD_ERROR_EXIT(bp, load_error0);
2686
2687
2688 if (IS_VF(bp)) {
2689 rc = bnx2x_vfpf_init(bp);
2690 if (rc)
2691 LOAD_ERROR_EXIT(bp, load_error0);
2692 }
2693
2694
2695
2696
2697
2698 rc = bnx2x_set_real_num_queues(bp, 0);
2699 if (rc) {
2700 BNX2X_ERR("Unable to set real_num_queues\n");
2701 LOAD_ERROR_EXIT(bp, load_error0);
2702 }
2703
2704
2705
2706
2707
2708 bnx2x_setup_tc(bp->dev, bp->max_cos);
2709
2710
2711 bnx2x_add_all_napi(bp);
2712 DP(NETIF_MSG_IFUP, "napi added\n");
2713 bnx2x_napi_enable(bp);
2714
2715 if (IS_PF(bp)) {
2716
2717 bnx2x_set_pf_load(bp);
2718
2719
2720 if (!BP_NOMCP(bp)) {
2721
2722 rc = bnx2x_nic_load_request(bp, &load_code);
2723 if (rc)
2724 LOAD_ERROR_EXIT(bp, load_error1);
2725
2726
2727 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2728 if (rc) {
2729 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2730 LOAD_ERROR_EXIT(bp, load_error2);
2731 }
2732 } else {
2733 load_code = bnx2x_nic_load_no_mcp(bp, port);
2734 }
2735
2736
2737 bnx2x_nic_load_pmf(bp, load_code);
2738
2739
2740 bnx2x__init_func_obj(bp);
2741
2742
2743 rc = bnx2x_init_hw(bp, load_code);
2744 if (rc) {
2745 BNX2X_ERR("HW init failed, aborting\n");
2746 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2747 LOAD_ERROR_EXIT(bp, load_error2);
2748 }
2749 }
2750
2751 bnx2x_pre_irq_nic_init(bp);
2752
2753
2754 rc = bnx2x_setup_irqs(bp);
2755 if (rc) {
2756 BNX2X_ERR("setup irqs failed\n");
2757 if (IS_PF(bp))
2758 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2759 LOAD_ERROR_EXIT(bp, load_error2);
2760 }
2761
2762
2763 if (IS_PF(bp)) {
2764
2765 bnx2x_post_irq_nic_init(bp, load_code);
2766
2767 bnx2x_init_bp_objs(bp);
2768 bnx2x_iov_nic_init(bp);
2769
2770
2771 bp->afex_def_vlan_tag = -1;
2772 bnx2x_nic_load_afex_dcc(bp, load_code);
2773 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2774 rc = bnx2x_func_start(bp);
2775 if (rc) {
2776 BNX2X_ERR("Function start failed!\n");
2777 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2778
2779 LOAD_ERROR_EXIT(bp, load_error3);
2780 }
2781
2782
2783 if (!BP_NOMCP(bp)) {
2784 load_code = bnx2x_fw_command(bp,
2785 DRV_MSG_CODE_LOAD_DONE, 0);
2786 if (!load_code) {
2787 BNX2X_ERR("MCP response failure, aborting\n");
2788 rc = -EBUSY;
2789 LOAD_ERROR_EXIT(bp, load_error3);
2790 }
2791 }
2792
2793
2794 bnx2x_update_coalesce(bp);
2795 }
2796
2797
2798 rc = bnx2x_setup_leading(bp);
2799 if (rc) {
2800 BNX2X_ERR("Setup leading failed!\n");
2801 LOAD_ERROR_EXIT(bp, load_error3);
2802 }
2803
2804
2805 for_each_nondefault_eth_queue(bp, i) {
2806 if (IS_PF(bp))
2807 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2808 else
2809 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2810 if (rc) {
2811 BNX2X_ERR("Queue %d setup failed\n", i);
2812 LOAD_ERROR_EXIT(bp, load_error3);
2813 }
2814 }
2815
2816
2817 rc = bnx2x_init_rss(bp);
2818 if (rc) {
2819 BNX2X_ERR("PF RSS init failed\n");
2820 LOAD_ERROR_EXIT(bp, load_error3);
2821 }
2822
2823
2824 bp->state = BNX2X_STATE_OPEN;
2825
2826
2827 if (IS_PF(bp))
2828 rc = bnx2x_set_eth_mac(bp, true);
2829 else
2830 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2831 true);
2832 if (rc) {
2833 BNX2X_ERR("Setting Ethernet MAC failed\n");
2834 LOAD_ERROR_EXIT(bp, load_error3);
2835 }
2836
2837 if (IS_PF(bp) && bp->pending_max) {
2838 bnx2x_update_max_mf_config(bp, bp->pending_max);
2839 bp->pending_max = 0;
2840 }
2841
2842 if (bp->port.pmf) {
2843 rc = bnx2x_initial_phy_init(bp, load_mode);
2844 if (rc)
2845 LOAD_ERROR_EXIT(bp, load_error3);
2846 }
2847 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2848
2849
2850
2851
2852 rc = bnx2x_vlan_reconfigure_vid(bp);
2853 if (rc)
2854 LOAD_ERROR_EXIT(bp, load_error3);
2855
2856
2857 bnx2x_set_rx_mode_inner(bp);
2858
2859 if (bp->flags & PTP_SUPPORTED) {
2860 bnx2x_init_ptp(bp);
2861 bnx2x_configure_ptp_filters(bp);
2862 }
2863
2864 switch (load_mode) {
2865 case LOAD_NORMAL:
2866
2867 netif_tx_wake_all_queues(bp->dev);
2868 break;
2869
2870 case LOAD_OPEN:
2871 netif_tx_start_all_queues(bp->dev);
2872 smp_mb__after_atomic();
2873 break;
2874
2875 case LOAD_DIAG:
2876 case LOAD_LOOPBACK_EXT:
2877 bp->state = BNX2X_STATE_DIAG;
2878 break;
2879
2880 default:
2881 break;
2882 }
2883
2884 if (bp->port.pmf)
2885 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2886 else
2887 bnx2x__link_status_update(bp);
2888
2889
2890 mod_timer(&bp->timer, jiffies + bp->current_interval);
2891
2892 if (CNIC_ENABLED(bp))
2893 bnx2x_load_cnic(bp);
2894
2895 if (IS_PF(bp))
2896 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2897
2898 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2899
2900 u32 val;
2901 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2902 val &= ~DRV_FLAGS_MTU_MASK;
2903 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2904 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2905 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2906 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2907 }
2908
2909
2910 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2911 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2912 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2913 return -EBUSY;
2914 }
2915
2916
2917 if (IS_PF(bp))
2918 bnx2x_update_mfw_dump(bp);
2919
2920
2921 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2922 bnx2x_dcbx_init(bp, false);
2923
2924 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2925 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2926
2927 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2928
2929 return 0;
2930
2931#ifndef BNX2X_STOP_ON_ERROR
2932load_error3:
2933 if (IS_PF(bp)) {
2934 bnx2x_int_disable_sync(bp, 1);
2935
2936
2937 bnx2x_squeeze_objects(bp);
2938 }
2939
2940
2941 bnx2x_free_skbs(bp);
2942 for_each_rx_queue(bp, i)
2943 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2944
2945
2946 bnx2x_free_irq(bp);
2947load_error2:
2948 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2949 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2950 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2951 }
2952
2953 bp->port.pmf = 0;
2954load_error1:
2955 bnx2x_napi_disable(bp);
2956 bnx2x_del_all_napi(bp);
2957
2958
2959 if (IS_PF(bp))
2960 bnx2x_clear_pf_load(bp);
2961load_error0:
2962 bnx2x_free_fw_stats_mem(bp);
2963 bnx2x_free_fp_mem(bp);
2964 bnx2x_free_mem(bp);
2965
2966 return rc;
2967#endif
2968}
2969
2970int bnx2x_drain_tx_queues(struct bnx2x *bp)
2971{
2972 u8 rc = 0, cos, i;
2973
2974
2975 for_each_tx_queue(bp, i) {
2976 struct bnx2x_fastpath *fp = &bp->fp[i];
2977
2978 for_each_cos_in_tx_queue(fp, cos)
2979 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2980 if (rc)
2981 return rc;
2982 }
2983 return 0;
2984}
2985
2986
2987int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2988{
2989 int i;
2990 bool global = false;
2991
2992 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2993
2994 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2995 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2996
2997
2998 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2999 u32 val;
3000 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
3001 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
3002 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
3003 }
3004
3005 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
3006 (bp->state == BNX2X_STATE_CLOSED ||
3007 bp->state == BNX2X_STATE_ERROR)) {
3008
3009
3010
3011
3012
3013
3014
3015 bp->recovery_state = BNX2X_RECOVERY_DONE;
3016 bp->is_leader = 0;
3017 bnx2x_release_leader_lock(bp);
3018 smp_mb();
3019
3020 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3021 BNX2X_ERR("Can't unload in closed or error state\n");
3022 return -EINVAL;
3023 }
3024
3025
3026
3027
3028
3029
3030
3031 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3032 return 0;
3033
3034
3035
3036
3037
3038 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3039 smp_mb();
3040
3041
3042 bnx2x_iov_channel_down(bp);
3043
3044 if (CNIC_LOADED(bp))
3045 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3046
3047
3048 bnx2x_tx_disable(bp);
3049 netdev_reset_tc(bp->dev);
3050
3051 bp->rx_mode = BNX2X_RX_MODE_NONE;
3052
3053 del_timer_sync(&bp->timer);
3054
3055 if (IS_PF(bp)) {
3056
3057 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3058 bnx2x_drv_pulse(bp);
3059 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3060 bnx2x_save_statistics(bp);
3061 }
3062
3063
3064 bnx2x_drain_tx_queues(bp);
3065
3066
3067
3068
3069 if (IS_VF(bp))
3070 bnx2x_vfpf_close_vf(bp);
3071 else if (unload_mode != UNLOAD_RECOVERY)
3072
3073 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3074 else {
3075
3076 bnx2x_send_unload_req(bp, unload_mode);
3077
3078
3079
3080
3081
3082
3083
3084 if (!CHIP_IS_E1x(bp))
3085 bnx2x_pf_disable(bp);
3086
3087
3088 bnx2x_netif_stop(bp, 1);
3089
3090 bnx2x_del_all_napi(bp);
3091 if (CNIC_LOADED(bp))
3092 bnx2x_del_all_napi_cnic(bp);
3093
3094 bnx2x_free_irq(bp);
3095
3096
3097 bnx2x_send_unload_done(bp, false);
3098 }
3099
3100
3101
3102
3103
3104 if (IS_PF(bp))
3105 bnx2x_squeeze_objects(bp);
3106
3107
3108 bp->sp_state = 0;
3109
3110 bp->port.pmf = 0;
3111
3112
3113 bp->sp_rtnl_state = 0;
3114 smp_mb();
3115
3116
3117 bnx2x_free_skbs(bp);
3118 if (CNIC_LOADED(bp))
3119 bnx2x_free_skbs_cnic(bp);
3120 for_each_rx_queue(bp, i)
3121 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3122
3123 bnx2x_free_fp_mem(bp);
3124 if (CNIC_LOADED(bp))
3125 bnx2x_free_fp_mem_cnic(bp);
3126
3127 if (IS_PF(bp)) {
3128 if (CNIC_LOADED(bp))
3129 bnx2x_free_mem_cnic(bp);
3130 }
3131 bnx2x_free_mem(bp);
3132
3133 bp->state = BNX2X_STATE_CLOSED;
3134 bp->cnic_loaded = false;
3135
3136
3137 if (IS_PF(bp))
3138 bnx2x_update_mng_version(bp);
3139
3140
3141
3142
3143 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3144 bnx2x_set_reset_in_progress(bp);
3145
3146
3147 if (global)
3148 bnx2x_set_reset_global(bp);
3149 }
3150
3151
3152
3153
3154 if (IS_PF(bp) &&
3155 !bnx2x_clear_pf_load(bp) &&
3156 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3157 bnx2x_disable_close_the_gate(bp);
3158
3159 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3160
3161 return 0;
3162}
3163
3164int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3165{
3166 u16 pmcsr;
3167
3168
3169 if (!bp->pdev->pm_cap) {
3170 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3171 return 0;
3172 }
3173
3174 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3175
3176 switch (state) {
3177 case PCI_D0:
3178 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3179 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3180 PCI_PM_CTRL_PME_STATUS));
3181
3182 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3183
3184 msleep(20);
3185 break;
3186
3187 case PCI_D3hot:
3188
3189
3190 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3191 return 0;
3192
3193 if (CHIP_REV_IS_SLOW(bp))
3194 return 0;
3195
3196 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3197 pmcsr |= 3;
3198
3199 if (bp->wol)
3200 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3201
3202 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3203 pmcsr);
3204
3205
3206
3207
3208 break;
3209
3210 default:
3211 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3212 return -EINVAL;
3213 }
3214 return 0;
3215}
3216
3217
3218
3219
3220static int bnx2x_poll(struct napi_struct *napi, int budget)
3221{
3222 int work_done = 0;
3223 u8 cos;
3224 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3225 napi);
3226 struct bnx2x *bp = fp->bp;
3227
3228 while (1) {
3229#ifdef BNX2X_STOP_ON_ERROR
3230 if (unlikely(bp->panic)) {
3231 napi_complete(napi);
3232 return 0;
3233 }
3234#endif
3235 if (!bnx2x_fp_lock_napi(fp))
3236 return budget;
3237
3238 for_each_cos_in_tx_queue(fp, cos)
3239 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3240 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3241
3242 if (bnx2x_has_rx_work(fp)) {
3243 work_done += bnx2x_rx_int(fp, budget - work_done);
3244
3245
3246 if (work_done >= budget) {
3247 bnx2x_fp_unlock_napi(fp);
3248 break;
3249 }
3250 }
3251
3252 bnx2x_fp_unlock_napi(fp);
3253
3254
3255 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3256
3257
3258
3259
3260
3261 if (IS_FCOE_FP(fp)) {
3262 napi_complete(napi);
3263 break;
3264 }
3265 bnx2x_update_fpsb_idx(fp);
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279 rmb();
3280
3281 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3282 napi_complete(napi);
3283
3284 DP(NETIF_MSG_RX_STATUS,
3285 "Update index to %d\n", fp->fp_hc_idx);
3286 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3287 le16_to_cpu(fp->fp_hc_idx),
3288 IGU_INT_ENABLE, 1);
3289 break;
3290 }
3291 }
3292 }
3293
3294 return work_done;
3295}
3296
3297#ifdef CONFIG_NET_RX_BUSY_POLL
3298
3299int bnx2x_low_latency_recv(struct napi_struct *napi)
3300{
3301 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3302 napi);
3303 struct bnx2x *bp = fp->bp;
3304 int found = 0;
3305
3306 if ((bp->state == BNX2X_STATE_CLOSED) ||
3307 (bp->state == BNX2X_STATE_ERROR) ||
3308 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
3309 return LL_FLUSH_FAILED;
3310
3311 if (!bnx2x_fp_lock_poll(fp))
3312 return LL_FLUSH_BUSY;
3313
3314 if (bnx2x_has_rx_work(fp))
3315 found = bnx2x_rx_int(fp, 4);
3316
3317 bnx2x_fp_unlock_poll(fp);
3318
3319 return found;
3320}
3321#endif
3322
3323
3324
3325
3326
3327static u16 bnx2x_tx_split(struct bnx2x *bp,
3328 struct bnx2x_fp_txdata *txdata,
3329 struct sw_tx_bd *tx_buf,
3330 struct eth_tx_start_bd **tx_bd, u16 hlen,
3331 u16 bd_prod)
3332{
3333 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3334 struct eth_tx_bd *d_tx_bd;
3335 dma_addr_t mapping;
3336 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3337
3338
3339 h_tx_bd->nbytes = cpu_to_le16(hlen);
3340
3341 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3342 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3343
3344
3345
3346 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3347 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3348
3349 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3350 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3351
3352 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3353 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3354 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3355
3356
3357 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3358
3359 DP(NETIF_MSG_TX_QUEUED,
3360 "TSO split data size is %d (%x:%x)\n",
3361 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3362
3363
3364 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3365
3366 return bd_prod;
3367}
3368
3369#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3370#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3371static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3372{
3373 __sum16 tsum = (__force __sum16) csum;
3374
3375 if (fix > 0)
3376 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3377 csum_partial(t_header - fix, fix, 0)));
3378
3379 else if (fix < 0)
3380 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3381 csum_partial(t_header, -fix, 0)));
3382
3383 return bswab16(tsum);
3384}
3385
3386static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3387{
3388 u32 rc;
3389 __u8 prot = 0;
3390 __be16 protocol;
3391
3392 if (skb->ip_summed != CHECKSUM_PARTIAL)
3393 return XMIT_PLAIN;
3394
3395 protocol = vlan_get_protocol(skb);
3396 if (protocol == htons(ETH_P_IPV6)) {
3397 rc = XMIT_CSUM_V6;
3398 prot = ipv6_hdr(skb)->nexthdr;
3399 } else {
3400 rc = XMIT_CSUM_V4;
3401 prot = ip_hdr(skb)->protocol;
3402 }
3403
3404 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3405 if (inner_ip_hdr(skb)->version == 6) {
3406 rc |= XMIT_CSUM_ENC_V6;
3407 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3408 rc |= XMIT_CSUM_TCP;
3409 } else {
3410 rc |= XMIT_CSUM_ENC_V4;
3411 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3412 rc |= XMIT_CSUM_TCP;
3413 }
3414 }
3415 if (prot == IPPROTO_TCP)
3416 rc |= XMIT_CSUM_TCP;
3417
3418 if (skb_is_gso(skb)) {
3419 if (skb_is_gso_v6(skb)) {
3420 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3421 if (rc & XMIT_CSUM_ENC)
3422 rc |= XMIT_GSO_ENC_V6;
3423 } else {
3424 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3425 if (rc & XMIT_CSUM_ENC)
3426 rc |= XMIT_GSO_ENC_V4;
3427 }
3428 }
3429
3430 return rc;
3431}
3432
3433
3434#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
3435
3436
3437#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
3438
3439#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3440
3441
3442
3443static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3444 u32 xmit_type)
3445{
3446 int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3447 int to_copy = 0, hlen = 0;
3448
3449 if (xmit_type & XMIT_GSO_ENC)
3450 num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3451
3452 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3453 if (xmit_type & XMIT_GSO) {
3454 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3455 int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3456
3457 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3458 int wnd_idx = 0;
3459 int frag_idx = 0;
3460 u32 wnd_sum = 0;
3461
3462
3463 if (xmit_type & XMIT_GSO_ENC)
3464 hlen = (int)(skb_inner_transport_header(skb) -
3465 skb->data) +
3466 inner_tcp_hdrlen(skb);
3467 else
3468 hlen = (int)(skb_transport_header(skb) -
3469 skb->data) + tcp_hdrlen(skb);
3470
3471
3472 first_bd_sz = skb_headlen(skb) - hlen;
3473
3474 wnd_sum = first_bd_sz;
3475
3476
3477 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3478 wnd_sum +=
3479 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3480
3481
3482 if (first_bd_sz > 0) {
3483 if (unlikely(wnd_sum < lso_mss)) {
3484 to_copy = 1;
3485 goto exit_lbl;
3486 }
3487
3488 wnd_sum -= first_bd_sz;
3489 }
3490
3491
3492
3493 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3494 wnd_sum +=
3495 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3496
3497 if (unlikely(wnd_sum < lso_mss)) {
3498 to_copy = 1;
3499 break;
3500 }
3501 wnd_sum -=
3502 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3503 }
3504 } else {
3505
3506
3507 to_copy = 1;
3508 }
3509 }
3510
3511exit_lbl:
3512 if (unlikely(to_copy))
3513 DP(NETIF_MSG_TX_QUEUED,
3514 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3515 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3516 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3517
3518 return to_copy;
3519}
3520#endif
3521
3522
3523
3524
3525
3526
3527
3528
3529static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3530 struct eth_tx_parse_bd_e1x *pbd,
3531 u32 xmit_type)
3532{
3533 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3534 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3535 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3536
3537 if (xmit_type & XMIT_GSO_V4) {
3538 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3539 pbd->tcp_pseudo_csum =
3540 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3541 ip_hdr(skb)->daddr,
3542 0, IPPROTO_TCP, 0));
3543 } else {
3544 pbd->tcp_pseudo_csum =
3545 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3546 &ipv6_hdr(skb)->daddr,
3547 0, IPPROTO_TCP, 0));
3548 }
3549
3550 pbd->global_data |=
3551 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3552}
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3565 u32 *parsing_data, u32 xmit_type)
3566{
3567 *parsing_data |=
3568 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3569 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3570 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3571
3572 if (xmit_type & XMIT_CSUM_TCP) {
3573 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3574 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3575 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3576
3577 return skb_inner_transport_header(skb) +
3578 inner_tcp_hdrlen(skb) - skb->data;
3579 }
3580
3581
3582
3583
3584 return skb_inner_transport_header(skb) +
3585 sizeof(struct udphdr) - skb->data;
3586}
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3599 u32 *parsing_data, u32 xmit_type)
3600{
3601 *parsing_data |=
3602 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3603 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3604 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3605
3606 if (xmit_type & XMIT_CSUM_TCP) {
3607 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3608 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3609 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3610
3611 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3612 }
3613
3614
3615
3616 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3617}
3618
3619
3620static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3621 struct eth_tx_start_bd *tx_start_bd,
3622 u32 xmit_type)
3623{
3624 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3625
3626 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3627 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3628
3629 if (!(xmit_type & XMIT_CSUM_TCP))
3630 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3631}
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3642 struct eth_tx_parse_bd_e1x *pbd,
3643 u32 xmit_type)
3644{
3645 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3646
3647
3648 pbd->global_data =
3649 cpu_to_le16(hlen |
3650 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3651 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3652
3653 pbd->ip_hlen_w = (skb_transport_header(skb) -
3654 skb_network_header(skb)) >> 1;
3655
3656 hlen += pbd->ip_hlen_w;
3657
3658
3659 if (xmit_type & XMIT_CSUM_TCP)
3660 hlen += tcp_hdrlen(skb) / 2;
3661 else
3662 hlen += sizeof(struct udphdr) / 2;
3663
3664 pbd->total_hlen_w = cpu_to_le16(hlen);
3665 hlen = hlen*2;
3666
3667 if (xmit_type & XMIT_CSUM_TCP) {
3668 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3669
3670 } else {
3671 s8 fix = SKB_CS_OFF(skb);
3672
3673 DP(NETIF_MSG_TX_QUEUED,
3674 "hlen %d fix %d csum before fix %x\n",
3675 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3676
3677
3678 pbd->tcp_pseudo_csum =
3679 bnx2x_csum_fix(skb_transport_header(skb),
3680 SKB_CS(skb), fix);
3681
3682 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3683 pbd->tcp_pseudo_csum);
3684 }
3685
3686 return hlen;
3687}
3688
3689static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3690 struct eth_tx_parse_bd_e2 *pbd_e2,
3691 struct eth_tx_parse_2nd_bd *pbd2,
3692 u16 *global_data,
3693 u32 xmit_type)
3694{
3695 u16 hlen_w = 0;
3696 u8 outerip_off, outerip_len = 0;
3697
3698
3699 hlen_w = (skb_inner_transport_header(skb) -
3700 skb_network_header(skb)) >> 1;
3701
3702
3703 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3704
3705 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3706
3707
3708 if (xmit_type & XMIT_CSUM_V4) {
3709 struct iphdr *iph = ip_hdr(skb);
3710 u32 csum = (__force u32)(~iph->check) -
3711 (__force u32)iph->tot_len -
3712 (__force u32)iph->frag_off;
3713
3714 outerip_len = iph->ihl << 1;
3715
3716 pbd2->fw_ip_csum_wo_len_flags_frag =
3717 bswab16(csum_fold((__force __wsum)csum));
3718 } else {
3719 pbd2->fw_ip_hdr_to_payload_w =
3720 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3721 pbd_e2->data.tunnel_data.flags |=
3722 ETH_TUNNEL_DATA_IPV6_OUTER;
3723 }
3724
3725 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3726
3727 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3728
3729
3730 if (xmit_type & XMIT_CSUM_ENC_V4) {
3731 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3732
3733 pbd_e2->data.tunnel_data.pseudo_csum =
3734 bswab16(~csum_tcpudp_magic(
3735 inner_ip_hdr(skb)->saddr,
3736 inner_ip_hdr(skb)->daddr,
3737 0, IPPROTO_TCP, 0));
3738 } else {
3739 pbd_e2->data.tunnel_data.pseudo_csum =
3740 bswab16(~csum_ipv6_magic(
3741 &inner_ipv6_hdr(skb)->saddr,
3742 &inner_ipv6_hdr(skb)->daddr,
3743 0, IPPROTO_TCP, 0));
3744 }
3745
3746 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3747
3748 *global_data |=
3749 outerip_off |
3750 (outerip_len <<
3751 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3752 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3753 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3754
3755 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3756 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3757 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3758 }
3759}
3760
3761static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3762 u32 xmit_type)
3763{
3764 struct ipv6hdr *ipv6;
3765
3766 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3767 return;
3768
3769 if (xmit_type & XMIT_GSO_ENC_V6)
3770 ipv6 = inner_ipv6_hdr(skb);
3771 else
3772 ipv6 = ipv6_hdr(skb);
3773
3774 if (ipv6->nexthdr == NEXTHDR_IPV6)
3775 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3776}
3777
3778
3779
3780
3781
3782netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3783{
3784 struct bnx2x *bp = netdev_priv(dev);
3785
3786 struct netdev_queue *txq;
3787 struct bnx2x_fp_txdata *txdata;
3788 struct sw_tx_bd *tx_buf;
3789 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3790 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3791 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3792 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3793 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3794 u32 pbd_e2_parsing_data = 0;
3795 u16 pkt_prod, bd_prod;
3796 int nbd, txq_index;
3797 dma_addr_t mapping;
3798 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3799 int i;
3800 u8 hlen = 0;
3801 __le16 pkt_size = 0;
3802 struct ethhdr *eth;
3803 u8 mac_type = UNICAST_ADDRESS;
3804
3805#ifdef BNX2X_STOP_ON_ERROR
3806 if (unlikely(bp->panic))
3807 return NETDEV_TX_BUSY;
3808#endif
3809
3810 txq_index = skb_get_queue_mapping(skb);
3811 txq = netdev_get_tx_queue(dev, txq_index);
3812
3813 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3814
3815 txdata = &bp->bnx2x_txq[txq_index];
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3827 skb_shinfo(skb)->nr_frags +
3828 BDS_PER_TX_PKT +
3829 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3830
3831 if (txdata->tx_ring_size == 0) {
3832 struct bnx2x_eth_q_stats *q_stats =
3833 bnx2x_fp_qstats(bp, txdata->parent_fp);
3834 q_stats->driver_filtered_tx_pkt++;
3835 dev_kfree_skb(skb);
3836 return NETDEV_TX_OK;
3837 }
3838 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3839 netif_tx_stop_queue(txq);
3840 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3841
3842 return NETDEV_TX_BUSY;
3843 }
3844
3845 DP(NETIF_MSG_TX_QUEUED,
3846 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3847 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3848 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3849 skb->len);
3850
3851 eth = (struct ethhdr *)skb->data;
3852
3853
3854 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3855 if (is_broadcast_ether_addr(eth->h_dest))
3856 mac_type = BROADCAST_ADDRESS;
3857 else
3858 mac_type = MULTICAST_ADDRESS;
3859 }
3860
3861#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3862
3863
3864
3865 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3866
3867 bp->lin_cnt++;
3868 if (skb_linearize(skb) != 0) {
3869 DP(NETIF_MSG_TX_QUEUED,
3870 "SKB linearization failed - silently dropping this SKB\n");
3871 dev_kfree_skb_any(skb);
3872 return NETDEV_TX_OK;
3873 }
3874 }
3875#endif
3876
3877 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3878 skb_headlen(skb), DMA_TO_DEVICE);
3879 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3880 DP(NETIF_MSG_TX_QUEUED,
3881 "SKB mapping failed - silently dropping this SKB\n");
3882 dev_kfree_skb_any(skb);
3883 return NETDEV_TX_OK;
3884 }
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897 pkt_prod = txdata->tx_pkt_prod;
3898 bd_prod = TX_BD(txdata->tx_bd_prod);
3899
3900
3901
3902
3903
3904 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3905 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3906 first_bd = tx_start_bd;
3907
3908 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3909
3910 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3911 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3912 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3913 } else if (bp->ptp_tx_skb) {
3914 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3915 } else {
3916 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3917
3918 bp->ptp_tx_skb = skb_get(skb);
3919 bp->ptp_tx_start = jiffies;
3920 schedule_work(&bp->ptp_task);
3921 }
3922 }
3923
3924
3925 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3926
3927
3928 tx_buf->first_bd = txdata->tx_bd_prod;
3929 tx_buf->skb = skb;
3930 tx_buf->flags = 0;
3931
3932 DP(NETIF_MSG_TX_QUEUED,
3933 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3934 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3935
3936 if (skb_vlan_tag_present(skb)) {
3937 tx_start_bd->vlan_or_ethertype =
3938 cpu_to_le16(skb_vlan_tag_get(skb));
3939 tx_start_bd->bd_flags.as_bitfield |=
3940 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3941 } else {
3942
3943
3944
3945#ifndef BNX2X_STOP_ON_ERROR
3946 if (IS_VF(bp))
3947#endif
3948 tx_start_bd->vlan_or_ethertype =
3949 cpu_to_le16(ntohs(eth->h_proto));
3950#ifndef BNX2X_STOP_ON_ERROR
3951 else
3952
3953 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3954#endif
3955 }
3956
3957 nbd = 2;
3958
3959
3960 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3961
3962 if (xmit_type & XMIT_CSUM)
3963 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3964
3965 if (!CHIP_IS_E1x(bp)) {
3966 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3967 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3968
3969 if (xmit_type & XMIT_CSUM_ENC) {
3970 u16 global_data = 0;
3971
3972
3973 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3974 &pbd_e2_parsing_data,
3975 xmit_type);
3976
3977
3978 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3979
3980 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3981
3982 memset(pbd2, 0, sizeof(*pbd2));
3983
3984 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3985 (skb_inner_network_header(skb) -
3986 skb->data) >> 1;
3987
3988 if (xmit_type & XMIT_GSO_ENC)
3989 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3990 &global_data,
3991 xmit_type);
3992
3993 pbd2->global_data = cpu_to_le16(global_data);
3994
3995
3996 SET_FLAG(tx_start_bd->general_data,
3997 ETH_TX_START_BD_PARSE_NBDS, 1);
3998
3999 SET_FLAG(tx_start_bd->general_data,
4000 ETH_TX_START_BD_TUNNEL_EXIST, 1);
4001
4002 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
4003
4004 nbd++;
4005 } else if (xmit_type & XMIT_CSUM) {
4006
4007 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
4008 &pbd_e2_parsing_data,
4009 xmit_type);
4010 }
4011
4012 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
4013
4014
4015
4016 if (IS_VF(bp)) {
4017
4018 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4019 &pbd_e2->data.mac_addr.src_mid,
4020 &pbd_e2->data.mac_addr.src_lo,
4021 eth->h_source);
4022
4023 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
4024 &pbd_e2->data.mac_addr.dst_mid,
4025 &pbd_e2->data.mac_addr.dst_lo,
4026 eth->h_dest);
4027 } else {
4028 if (bp->flags & TX_SWITCHING)
4029 bnx2x_set_fw_mac_addr(
4030 &pbd_e2->data.mac_addr.dst_hi,
4031 &pbd_e2->data.mac_addr.dst_mid,
4032 &pbd_e2->data.mac_addr.dst_lo,
4033 eth->h_dest);
4034#ifdef BNX2X_STOP_ON_ERROR
4035
4036
4037
4038 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4039 &pbd_e2->data.mac_addr.src_mid,
4040 &pbd_e2->data.mac_addr.src_lo,
4041 eth->h_source);
4042#endif
4043 }
4044
4045 SET_FLAG(pbd_e2_parsing_data,
4046 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4047 } else {
4048 u16 global_data = 0;
4049 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4050 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4051
4052 if (xmit_type & XMIT_CSUM)
4053 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4054
4055 SET_FLAG(global_data,
4056 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4057 pbd_e1x->global_data |= cpu_to_le16(global_data);
4058 }
4059
4060
4061 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4062 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4063 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4064 pkt_size = tx_start_bd->nbytes;
4065
4066 DP(NETIF_MSG_TX_QUEUED,
4067 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4068 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4069 le16_to_cpu(tx_start_bd->nbytes),
4070 tx_start_bd->bd_flags.as_bitfield,
4071 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4072
4073 if (xmit_type & XMIT_GSO) {
4074
4075 DP(NETIF_MSG_TX_QUEUED,
4076 "TSO packet len %d hlen %d total len %d tso size %d\n",
4077 skb->len, hlen, skb_headlen(skb),
4078 skb_shinfo(skb)->gso_size);
4079
4080 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4081
4082 if (unlikely(skb_headlen(skb) > hlen)) {
4083 nbd++;
4084 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4085 &tx_start_bd, hlen,
4086 bd_prod);
4087 }
4088 if (!CHIP_IS_E1x(bp))
4089 pbd_e2_parsing_data |=
4090 (skb_shinfo(skb)->gso_size <<
4091 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4092 ETH_TX_PARSE_BD_E2_LSO_MSS;
4093 else
4094 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4095 }
4096
4097
4098
4099
4100 if (pbd_e2_parsing_data)
4101 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4102
4103 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4104
4105
4106 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4107 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4108
4109 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4110 skb_frag_size(frag), DMA_TO_DEVICE);
4111 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4112 unsigned int pkts_compl = 0, bytes_compl = 0;
4113
4114 DP(NETIF_MSG_TX_QUEUED,
4115 "Unable to map page - dropping packet...\n");
4116
4117
4118
4119
4120
4121
4122 first_bd->nbd = cpu_to_le16(nbd);
4123 bnx2x_free_tx_pkt(bp, txdata,
4124 TX_BD(txdata->tx_pkt_prod),
4125 &pkts_compl, &bytes_compl);
4126 return NETDEV_TX_OK;
4127 }
4128
4129 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4130 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4131 if (total_pkt_bd == NULL)
4132 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4133
4134 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4135 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4136 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4137 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4138 nbd++;
4139
4140 DP(NETIF_MSG_TX_QUEUED,
4141 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4142 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4143 le16_to_cpu(tx_data_bd->nbytes));
4144 }
4145
4146 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4147
4148
4149 first_bd->nbd = cpu_to_le16(nbd);
4150
4151 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4152
4153
4154
4155
4156 if (TX_BD_POFF(bd_prod) < nbd)
4157 nbd++;
4158
4159
4160
4161
4162
4163
4164
4165
4166 if (total_pkt_bd != NULL)
4167 total_pkt_bd->total_pkt_bytes = pkt_size;
4168
4169 if (pbd_e1x)
4170 DP(NETIF_MSG_TX_QUEUED,
4171 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4172 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4173 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4174 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4175 le16_to_cpu(pbd_e1x->total_hlen_w));
4176 if (pbd_e2)
4177 DP(NETIF_MSG_TX_QUEUED,
4178 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4179 pbd_e2,
4180 pbd_e2->data.mac_addr.dst_hi,
4181 pbd_e2->data.mac_addr.dst_mid,
4182 pbd_e2->data.mac_addr.dst_lo,
4183 pbd_e2->data.mac_addr.src_hi,
4184 pbd_e2->data.mac_addr.src_mid,
4185 pbd_e2->data.mac_addr.src_lo,
4186 pbd_e2->parsing_data);
4187 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4188
4189 netdev_tx_sent_queue(txq, skb->len);
4190
4191 skb_tx_timestamp(skb);
4192
4193 txdata->tx_pkt_prod++;
4194
4195
4196
4197
4198
4199
4200
4201 wmb();
4202
4203 txdata->tx_db.data.prod += nbd;
4204 barrier();
4205
4206 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4207
4208 mmiowb();
4209
4210 txdata->tx_bd_prod += nbd;
4211
4212 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4213 netif_tx_stop_queue(txq);
4214
4215
4216
4217
4218 smp_mb();
4219
4220 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4221 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4222 netif_tx_wake_queue(txq);
4223 }
4224 txdata->tx_pkt++;
4225
4226 return NETDEV_TX_OK;
4227}
4228
4229void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4230{
4231 int mfw_vn = BP_FW_MB_IDX(bp);
4232 u32 tmp;
4233
4234
4235 if (!IS_MF_BD(bp)) {
4236 int i;
4237
4238 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4239 c2s_map[i] = i;
4240 *c2s_default = 0;
4241
4242 return;
4243 }
4244
4245 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4246 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4247 c2s_map[0] = tmp & 0xff;
4248 c2s_map[1] = (tmp >> 8) & 0xff;
4249 c2s_map[2] = (tmp >> 16) & 0xff;
4250 c2s_map[3] = (tmp >> 24) & 0xff;
4251
4252 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4253 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4254 c2s_map[4] = tmp & 0xff;
4255 c2s_map[5] = (tmp >> 8) & 0xff;
4256 c2s_map[6] = (tmp >> 16) & 0xff;
4257 c2s_map[7] = (tmp >> 24) & 0xff;
4258
4259 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4260 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4261 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4262}
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4273{
4274 struct bnx2x *bp = netdev_priv(dev);
4275 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4276 int cos, prio, count, offset;
4277
4278
4279 ASSERT_RTNL();
4280
4281
4282 if (!num_tc) {
4283 netdev_reset_tc(dev);
4284 return 0;
4285 }
4286
4287
4288 if (num_tc > bp->max_cos) {
4289 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4290 num_tc, bp->max_cos);
4291 return -EINVAL;
4292 }
4293
4294
4295 if (netdev_set_num_tc(dev, num_tc)) {
4296 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4297 return -EINVAL;
4298 }
4299
4300 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4301
4302
4303 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4304 int outer_prio = c2s_map[prio];
4305
4306 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4307 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4308 "mapping priority %d to tc %d\n",
4309 outer_prio, bp->prio_to_cos[outer_prio]);
4310 }
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323 for (cos = 0; cos < bp->max_cos; cos++) {
4324 count = BNX2X_NUM_ETH_QUEUES(bp);
4325 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4326 netdev_set_tc_queue(dev, cos, count, offset);
4327 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4328 "mapping tc %d to offset %d count %d\n",
4329 cos, offset, count);
4330 }
4331
4332 return 0;
4333}
4334
4335
4336int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4337{
4338 struct sockaddr *addr = p;
4339 struct bnx2x *bp = netdev_priv(dev);
4340 int rc = 0;
4341
4342 if (!is_valid_ether_addr(addr->sa_data)) {
4343 BNX2X_ERR("Requested MAC address is not valid\n");
4344 return -EINVAL;
4345 }
4346
4347 if (IS_MF_STORAGE_ONLY(bp)) {
4348 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4349 return -EINVAL;
4350 }
4351
4352 if (netif_running(dev)) {
4353 rc = bnx2x_set_eth_mac(bp, false);
4354 if (rc)
4355 return rc;
4356 }
4357
4358 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4359
4360 if (netif_running(dev))
4361 rc = bnx2x_set_eth_mac(bp, true);
4362
4363 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4364 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4365
4366 return rc;
4367}
4368
4369static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4370{
4371 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4372 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4373 u8 cos;
4374
4375
4376
4377 if (IS_FCOE_IDX(fp_index)) {
4378 memset(sb, 0, sizeof(union host_hc_status_block));
4379 fp->status_blk_mapping = 0;
4380 } else {
4381
4382 if (!CHIP_IS_E1x(bp))
4383 BNX2X_PCI_FREE(sb->e2_sb,
4384 bnx2x_fp(bp, fp_index,
4385 status_blk_mapping),
4386 sizeof(struct host_hc_status_block_e2));
4387 else
4388 BNX2X_PCI_FREE(sb->e1x_sb,
4389 bnx2x_fp(bp, fp_index,
4390 status_blk_mapping),
4391 sizeof(struct host_hc_status_block_e1x));
4392 }
4393
4394
4395 if (!skip_rx_queue(bp, fp_index)) {
4396 bnx2x_free_rx_bds(fp);
4397
4398
4399 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4400 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4401 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4402 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4403
4404 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4405 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4406 sizeof(struct eth_fast_path_rx_cqe) *
4407 NUM_RCQ_BD);
4408
4409
4410 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4411 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4412 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4413 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4414 }
4415
4416
4417 if (!skip_tx_queue(bp, fp_index)) {
4418
4419 for_each_cos_in_tx_queue(fp, cos) {
4420 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4421
4422 DP(NETIF_MSG_IFDOWN,
4423 "freeing tx memory of fp %d cos %d cid %d\n",
4424 fp_index, cos, txdata->cid);
4425
4426 BNX2X_FREE(txdata->tx_buf_ring);
4427 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4428 txdata->tx_desc_mapping,
4429 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4430 }
4431 }
4432
4433}
4434
4435static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4436{
4437 int i;
4438 for_each_cnic_queue(bp, i)
4439 bnx2x_free_fp_mem_at(bp, i);
4440}
4441
4442void bnx2x_free_fp_mem(struct bnx2x *bp)
4443{
4444 int i;
4445 for_each_eth_queue(bp, i)
4446 bnx2x_free_fp_mem_at(bp, i);
4447}
4448
4449static void set_sb_shortcuts(struct bnx2x *bp, int index)
4450{
4451 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4452 if (!CHIP_IS_E1x(bp)) {
4453 bnx2x_fp(bp, index, sb_index_values) =
4454 (__le16 *)status_blk.e2_sb->sb.index_values;
4455 bnx2x_fp(bp, index, sb_running_index) =
4456 (__le16 *)status_blk.e2_sb->sb.running_index;
4457 } else {
4458 bnx2x_fp(bp, index, sb_index_values) =
4459 (__le16 *)status_blk.e1x_sb->sb.index_values;
4460 bnx2x_fp(bp, index, sb_running_index) =
4461 (__le16 *)status_blk.e1x_sb->sb.running_index;
4462 }
4463}
4464
4465
4466static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4467 int rx_ring_size)
4468{
4469 struct bnx2x *bp = fp->bp;
4470 u16 ring_prod, cqe_ring_prod;
4471 int i, failure_cnt = 0;
4472
4473 fp->rx_comp_cons = 0;
4474 cqe_ring_prod = ring_prod = 0;
4475
4476
4477
4478
4479 for (i = 0; i < rx_ring_size; i++) {
4480 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4481 failure_cnt++;
4482 continue;
4483 }
4484 ring_prod = NEXT_RX_IDX(ring_prod);
4485 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4486 WARN_ON(ring_prod <= (i - failure_cnt));
4487 }
4488
4489 if (failure_cnt)
4490 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4491 i - failure_cnt, fp->index);
4492
4493 fp->rx_bd_prod = ring_prod;
4494
4495 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4496 cqe_ring_prod);
4497 fp->rx_pkt = fp->rx_calls = 0;
4498
4499 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4500
4501 return i - failure_cnt;
4502}
4503
4504static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4505{
4506 int i;
4507
4508 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4509 struct eth_rx_cqe_next_page *nextpg;
4510
4511 nextpg = (struct eth_rx_cqe_next_page *)
4512 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4513 nextpg->addr_hi =
4514 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4515 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4516 nextpg->addr_lo =
4517 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4518 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4519 }
4520}
4521
4522static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4523{
4524 union host_hc_status_block *sb;
4525 struct bnx2x_fastpath *fp = &bp->fp[index];
4526 int ring_size = 0;
4527 u8 cos;
4528 int rx_ring_size = 0;
4529
4530 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4531 rx_ring_size = MIN_RX_SIZE_NONTPA;
4532 bp->rx_ring_size = rx_ring_size;
4533 } else if (!bp->rx_ring_size) {
4534 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4535
4536 if (CHIP_IS_E3(bp)) {
4537 u32 cfg = SHMEM_RD(bp,
4538 dev_info.port_hw_config[BP_PORT(bp)].
4539 default_cfg);
4540
4541
4542 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4543 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4544 rx_ring_size /= 10;
4545 }
4546
4547
4548 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4549 MIN_RX_SIZE_TPA, rx_ring_size);
4550
4551 bp->rx_ring_size = rx_ring_size;
4552 } else
4553 rx_ring_size = bp->rx_ring_size;
4554
4555 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4556
4557
4558 sb = &bnx2x_fp(bp, index, status_blk);
4559
4560 if (!IS_FCOE_IDX(index)) {
4561
4562 if (!CHIP_IS_E1x(bp)) {
4563 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4564 sizeof(struct host_hc_status_block_e2));
4565 if (!sb->e2_sb)
4566 goto alloc_mem_err;
4567 } else {
4568 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4569 sizeof(struct host_hc_status_block_e1x));
4570 if (!sb->e1x_sb)
4571 goto alloc_mem_err;
4572 }
4573 }
4574
4575
4576
4577
4578 if (!IS_FCOE_IDX(index))
4579 set_sb_shortcuts(bp, index);
4580
4581
4582 if (!skip_tx_queue(bp, index)) {
4583
4584 for_each_cos_in_tx_queue(fp, cos) {
4585 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4586
4587 DP(NETIF_MSG_IFUP,
4588 "allocating tx memory of fp %d cos %d\n",
4589 index, cos);
4590
4591 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4592 sizeof(struct sw_tx_bd),
4593 GFP_KERNEL);
4594 if (!txdata->tx_buf_ring)
4595 goto alloc_mem_err;
4596 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4597 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4598 if (!txdata->tx_desc_ring)
4599 goto alloc_mem_err;
4600 }
4601 }
4602
4603
4604 if (!skip_rx_queue(bp, index)) {
4605
4606 bnx2x_fp(bp, index, rx_buf_ring) =
4607 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4608 if (!bnx2x_fp(bp, index, rx_buf_ring))
4609 goto alloc_mem_err;
4610 bnx2x_fp(bp, index, rx_desc_ring) =
4611 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4612 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4613 if (!bnx2x_fp(bp, index, rx_desc_ring))
4614 goto alloc_mem_err;
4615
4616
4617 bnx2x_fp(bp, index, rx_comp_ring) =
4618 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4619 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4620 if (!bnx2x_fp(bp, index, rx_comp_ring))
4621 goto alloc_mem_err;
4622
4623
4624 bnx2x_fp(bp, index, rx_page_ring) =
4625 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4626 GFP_KERNEL);
4627 if (!bnx2x_fp(bp, index, rx_page_ring))
4628 goto alloc_mem_err;
4629 bnx2x_fp(bp, index, rx_sge_ring) =
4630 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4631 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4632 if (!bnx2x_fp(bp, index, rx_sge_ring))
4633 goto alloc_mem_err;
4634
4635 bnx2x_set_next_page_rx_bd(fp);
4636
4637
4638 bnx2x_set_next_page_rx_cq(fp);
4639
4640
4641 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4642 if (ring_size < rx_ring_size)
4643 goto alloc_mem_err;
4644 }
4645
4646 return 0;
4647
4648
4649alloc_mem_err:
4650 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4651 index, ring_size);
4652
4653
4654
4655
4656 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4657 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4658
4659 bnx2x_free_fp_mem_at(bp, index);
4660 return -ENOMEM;
4661 }
4662 return 0;
4663}
4664
4665static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4666{
4667 if (!NO_FCOE(bp))
4668
4669 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4670
4671
4672
4673 return -ENOMEM;
4674
4675 return 0;
4676}
4677
4678static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4679{
4680 int i;
4681
4682
4683
4684
4685
4686
4687 if (bnx2x_alloc_fp_mem_at(bp, 0))
4688 return -ENOMEM;
4689
4690
4691 for_each_nondefault_eth_queue(bp, i)
4692 if (bnx2x_alloc_fp_mem_at(bp, i))
4693 break;
4694
4695
4696 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4697 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4698
4699 WARN_ON(delta < 0);
4700 bnx2x_shrink_eth_fp(bp, delta);
4701 if (CNIC_SUPPORT(bp))
4702
4703
4704
4705
4706
4707
4708 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4709 bp->num_ethernet_queues -= delta;
4710 bp->num_queues = bp->num_ethernet_queues +
4711 bp->num_cnic_queues;
4712 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4713 bp->num_queues + delta, bp->num_queues);
4714 }
4715
4716 return 0;
4717}
4718
4719void bnx2x_free_mem_bp(struct bnx2x *bp)
4720{
4721 int i;
4722
4723 for (i = 0; i < bp->fp_array_size; i++)
4724 kfree(bp->fp[i].tpa_info);
4725 kfree(bp->fp);
4726 kfree(bp->sp_objs);
4727 kfree(bp->fp_stats);
4728 kfree(bp->bnx2x_txq);
4729 kfree(bp->msix_table);
4730 kfree(bp->ilt);
4731}
4732
4733int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4734{
4735 struct bnx2x_fastpath *fp;
4736 struct msix_entry *tbl;
4737 struct bnx2x_ilt *ilt;
4738 int msix_table_size = 0;
4739 int fp_array_size, txq_array_size;
4740 int i;
4741
4742
4743
4744
4745
4746 msix_table_size = bp->igu_sb_cnt;
4747 if (IS_PF(bp))
4748 msix_table_size++;
4749 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4750
4751
4752 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4753 bp->fp_array_size = fp_array_size;
4754 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4755
4756 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4757 if (!fp)
4758 goto alloc_err;
4759 for (i = 0; i < bp->fp_array_size; i++) {
4760 fp[i].tpa_info =
4761 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4762 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4763 if (!(fp[i].tpa_info))
4764 goto alloc_err;
4765 }
4766
4767 bp->fp = fp;
4768
4769
4770 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4771 GFP_KERNEL);
4772 if (!bp->sp_objs)
4773 goto alloc_err;
4774
4775
4776 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4777 GFP_KERNEL);
4778 if (!bp->fp_stats)
4779 goto alloc_err;
4780
4781
4782 txq_array_size =
4783 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4784 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4785
4786 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4787 GFP_KERNEL);
4788 if (!bp->bnx2x_txq)
4789 goto alloc_err;
4790
4791
4792 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4793 if (!tbl)
4794 goto alloc_err;
4795 bp->msix_table = tbl;
4796
4797
4798 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4799 if (!ilt)
4800 goto alloc_err;
4801 bp->ilt = ilt;
4802
4803 return 0;
4804alloc_err:
4805 bnx2x_free_mem_bp(bp);
4806 return -ENOMEM;
4807}
4808
4809int bnx2x_reload_if_running(struct net_device *dev)
4810{
4811 struct bnx2x *bp = netdev_priv(dev);
4812
4813 if (unlikely(!netif_running(dev)))
4814 return 0;
4815
4816 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4817 return bnx2x_nic_load(bp, LOAD_NORMAL);
4818}
4819
4820int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4821{
4822 u32 sel_phy_idx = 0;
4823 if (bp->link_params.num_phys <= 1)
4824 return INT_PHY;
4825
4826 if (bp->link_vars.link_up) {
4827 sel_phy_idx = EXT_PHY1;
4828
4829 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4830 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4831 sel_phy_idx = EXT_PHY2;
4832 } else {
4833
4834 switch (bnx2x_phy_selection(&bp->link_params)) {
4835 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4836 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4837 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4838 sel_phy_idx = EXT_PHY1;
4839 break;
4840 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4841 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4842 sel_phy_idx = EXT_PHY2;
4843 break;
4844 }
4845 }
4846
4847 return sel_phy_idx;
4848}
4849int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4850{
4851 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4852
4853
4854
4855
4856
4857
4858 if (bp->link_params.multi_phy_config &
4859 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4860 if (sel_phy_idx == EXT_PHY1)
4861 sel_phy_idx = EXT_PHY2;
4862 else if (sel_phy_idx == EXT_PHY2)
4863 sel_phy_idx = EXT_PHY1;
4864 }
4865 return LINK_CONFIG_IDX(sel_phy_idx);
4866}
4867
4868#ifdef NETDEV_FCOE_WWNN
4869int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4870{
4871 struct bnx2x *bp = netdev_priv(dev);
4872 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4873
4874 switch (type) {
4875 case NETDEV_FCOE_WWNN:
4876 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4877 cp->fcoe_wwn_node_name_lo);
4878 break;
4879 case NETDEV_FCOE_WWPN:
4880 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4881 cp->fcoe_wwn_port_name_lo);
4882 break;
4883 default:
4884 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4885 return -EINVAL;
4886 }
4887
4888 return 0;
4889}
4890#endif
4891
4892
4893int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4894{
4895 struct bnx2x *bp = netdev_priv(dev);
4896
4897 if (pci_num_vf(bp->pdev)) {
4898 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4899 return -EPERM;
4900 }
4901
4902 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4903 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4904 return -EAGAIN;
4905 }
4906
4907 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4908 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4909 BNX2X_ERR("Can't support requested MTU size\n");
4910 return -EINVAL;
4911 }
4912
4913
4914
4915
4916
4917 dev->mtu = new_mtu;
4918
4919 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4920 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4921
4922 return bnx2x_reload_if_running(dev);
4923}
4924
4925netdev_features_t bnx2x_fix_features(struct net_device *dev,
4926 netdev_features_t features)
4927{
4928 struct bnx2x *bp = netdev_priv(dev);
4929
4930 if (pci_num_vf(bp->pdev)) {
4931 netdev_features_t changed = dev->features ^ features;
4932
4933
4934
4935
4936 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4937 features &= ~NETIF_F_RXCSUM;
4938 features |= dev->features & NETIF_F_RXCSUM;
4939 }
4940
4941 if (changed & NETIF_F_LOOPBACK) {
4942 features &= ~NETIF_F_LOOPBACK;
4943 features |= dev->features & NETIF_F_LOOPBACK;
4944 }
4945 }
4946
4947
4948 if (!(features & NETIF_F_RXCSUM)) {
4949 features &= ~NETIF_F_LRO;
4950 features &= ~NETIF_F_GRO;
4951 }
4952
4953 return features;
4954}
4955
4956int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4957{
4958 struct bnx2x *bp = netdev_priv(dev);
4959 netdev_features_t changes = features ^ dev->features;
4960 bool bnx2x_reload = false;
4961 int rc;
4962
4963
4964 if (!pci_num_vf(bp->pdev)) {
4965 if (features & NETIF_F_LOOPBACK) {
4966 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4967 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4968 bnx2x_reload = true;
4969 }
4970 } else {
4971 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4972 bp->link_params.loopback_mode = LOOPBACK_NONE;
4973 bnx2x_reload = true;
4974 }
4975 }
4976 }
4977
4978
4979 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4980 changes &= ~NETIF_F_GRO;
4981
4982
4983 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4984 changes &= ~NETIF_F_GRO;
4985
4986 if (changes)
4987 bnx2x_reload = true;
4988
4989 if (bnx2x_reload) {
4990 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4991 dev->features = features;
4992 rc = bnx2x_reload_if_running(dev);
4993 return rc ? rc : 1;
4994 }
4995
4996 }
4997
4998 return 0;
4999}
5000
5001void bnx2x_tx_timeout(struct net_device *dev)
5002{
5003 struct bnx2x *bp = netdev_priv(dev);
5004
5005#ifdef BNX2X_STOP_ON_ERROR
5006 if (!bp->panic)
5007 bnx2x_panic();
5008#endif
5009
5010
5011 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
5012}
5013
5014int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
5015{
5016 struct net_device *dev = pci_get_drvdata(pdev);
5017 struct bnx2x *bp;
5018
5019 if (!dev) {
5020 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5021 return -ENODEV;
5022 }
5023 bp = netdev_priv(dev);
5024
5025 rtnl_lock();
5026
5027 pci_save_state(pdev);
5028
5029 if (!netif_running(dev)) {
5030 rtnl_unlock();
5031 return 0;
5032 }
5033
5034 netif_device_detach(dev);
5035
5036 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5037
5038 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5039
5040 rtnl_unlock();
5041
5042 return 0;
5043}
5044
5045int bnx2x_resume(struct pci_dev *pdev)
5046{
5047 struct net_device *dev = pci_get_drvdata(pdev);
5048 struct bnx2x *bp;
5049 int rc;
5050
5051 if (!dev) {
5052 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5053 return -ENODEV;
5054 }
5055 bp = netdev_priv(dev);
5056
5057 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5058 BNX2X_ERR("Handling parity error recovery. Try again later\n");
5059 return -EAGAIN;
5060 }
5061
5062 rtnl_lock();
5063
5064 pci_restore_state(pdev);
5065
5066 if (!netif_running(dev)) {
5067 rtnl_unlock();
5068 return 0;
5069 }
5070
5071 bnx2x_set_power_state(bp, PCI_D0);
5072 netif_device_attach(dev);
5073
5074 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5075
5076 rtnl_unlock();
5077
5078 return rc;
5079}
5080
5081void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5082 u32 cid)
5083{
5084 if (!cxt) {
5085 BNX2X_ERR("bad context pointer %p\n", cxt);
5086 return;
5087 }
5088
5089
5090 cxt->ustorm_ag_context.cdu_usage =
5091 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5092 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5093
5094 cxt->xstorm_ag_context.cdu_reserved =
5095 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5096 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5097}
5098
5099static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5100 u8 fw_sb_id, u8 sb_index,
5101 u8 ticks)
5102{
5103 u32 addr = BAR_CSTRORM_INTMEM +
5104 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5105 REG_WR8(bp, addr, ticks);
5106 DP(NETIF_MSG_IFUP,
5107 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5108 port, fw_sb_id, sb_index, ticks);
5109}
5110
5111static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5112 u16 fw_sb_id, u8 sb_index,
5113 u8 disable)
5114{
5115 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5116 u32 addr = BAR_CSTRORM_INTMEM +
5117 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5118 u8 flags = REG_RD8(bp, addr);
5119
5120 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5121 flags |= enable_flag;
5122 REG_WR8(bp, addr, flags);
5123 DP(NETIF_MSG_IFUP,
5124 "port %x fw_sb_id %d sb_index %d disable %d\n",
5125 port, fw_sb_id, sb_index, disable);
5126}
5127
5128void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5129 u8 sb_index, u8 disable, u16 usec)
5130{
5131 int port = BP_PORT(bp);
5132 u8 ticks = usec / BNX2X_BTR;
5133
5134 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5135
5136 disable = disable ? 1 : (usec ? 0 : 1);
5137 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5138}
5139
5140void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5141 u32 verbose)
5142{
5143 smp_mb__before_atomic();
5144 set_bit(flag, &bp->sp_rtnl_state);
5145 smp_mb__after_atomic();
5146 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5147 flag);
5148 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5149}
5150EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
5151