1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef BNX2X_CMN_H
18#define BNX2X_CMN_H
19
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23
24
25#include "bnx2x.h"
26
27
28extern int load_count[2][3];
29
30extern int num_queues;
31
32
33#define BNX2X_PCI_FREE(x, y, size) \
34 do { \
35 if (x) { \
36 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
37 x = NULL; \
38 y = 0; \
39 } \
40 } while (0)
41
42#define BNX2X_FREE(x) \
43 do { \
44 if (x) { \
45 kfree((void *)x); \
46 x = NULL; \
47 } \
48 } while (0)
49
50#define BNX2X_PCI_ALLOC(x, y, size) \
51 do { \
52 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
53 if (x == NULL) \
54 goto alloc_mem_err; \
55 memset((void *)x, 0, size); \
56 } while (0)
57
58#define BNX2X_ALLOC(x, size) \
59 do { \
60 x = kzalloc(size, GFP_KERNEL); \
61 if (x == NULL) \
62 goto alloc_mem_err; \
63 } while (0)
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
79
80
81
82
83
84
85void bnx2x_send_unload_done(struct bnx2x *bp);
86
87
88
89
90
91
92
93
94int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
95
96
97
98
99
100
101
102
103
104
105void bnx2x__init_func_obj(struct bnx2x *bp);
106
107
108
109
110
111
112
113
114
115int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
116 bool leading);
117
118
119
120
121
122
123int bnx2x_setup_leading(struct bnx2x *bp);
124
125
126
127
128
129
130
131
132
133
134u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
135
136
137
138
139
140
141
142u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
143
144
145
146
147
148
149void bnx2x_link_set(struct bnx2x *bp);
150
151
152
153
154
155
156
157
158
159u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
160
161
162
163
164
165
166
167
168
169void bnx2x_drv_pulse(struct bnx2x *bp);
170
171
172
173
174
175
176
177
178
179
180
181void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
182 u16 index, u8 op, u8 update);
183
184
185void bnx2x_pf_disable(struct bnx2x *bp);
186
187
188
189
190
191
192void bnx2x__link_status_update(struct bnx2x *bp);
193
194
195
196
197
198
199void bnx2x_link_report(struct bnx2x *bp);
200
201
202void __bnx2x_link_report(struct bnx2x *bp);
203
204
205
206
207
208
209
210
211u16 bnx2x_get_mf_speed(struct bnx2x *bp);
212
213
214
215
216
217
218
219irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
220
221
222
223
224
225
226
227irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
228#ifdef BCM_CNIC
229
230
231
232
233
234
235
236int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
237
238
239
240
241
242
243void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
244#endif
245
246
247
248
249
250
251void bnx2x_int_enable(struct bnx2x *bp);
252
253
254
255
256
257
258
259
260
261
262void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
263
264
265
266
267
268
269
270
271
272
273
274
275void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
276
277
278
279
280
281
282int bnx2x_alloc_mem(struct bnx2x *bp);
283
284
285
286
287
288
289void bnx2x_free_mem(struct bnx2x *bp);
290
291
292
293
294
295
296void bnx2x_set_num_queues(struct bnx2x *bp);
297
298
299
300
301
302
303
304
305
306
307
308void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
309
310
311
312
313
314
315
316int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
317
318
319
320
321
322
323
324int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
325
326
327
328
329
330
331int bnx2x_release_leader_lock(struct bnx2x *bp);
332
333
334
335
336
337
338
339
340
341int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
342
343
344
345
346
347
348
349
350
351
352void bnx2x_set_rx_mode(struct net_device *dev);
353
354
355
356
357
358
359
360
361
362void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
363
364
365
366
367
368
369
370
371
372
373
374void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
375 unsigned long rx_mode_flags,
376 unsigned long rx_accept_flags,
377 unsigned long tx_accept_flags,
378 unsigned long ramrod_flags);
379
380
381void bnx2x_inc_load_cnt(struct bnx2x *bp);
382u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
383bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
384bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
385void bnx2x_set_reset_in_progress(struct bnx2x *bp);
386void bnx2x_set_reset_global(struct bnx2x *bp);
387void bnx2x_disable_close_the_gate(struct bnx2x *bp);
388
389
390
391
392
393
394
395void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
396
397
398
399
400
401
402void bnx2x_ilt_set_info(struct bnx2x *bp);
403
404
405
406
407
408
409void bnx2x_dcbx_init(struct bnx2x *bp);
410
411
412
413
414
415
416
417
418
419int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
420
421
422
423
424
425
426
427void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
428
429void bnx2x_panic_dump(struct bnx2x *bp);
430
431void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
432
433
434int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
435
436
437int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
438
439
440netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
441
442
443int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
444
445
446u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
447
448
449int bnx2x_reload_if_running(struct net_device *dev);
450
451int bnx2x_change_mac_addr(struct net_device *dev, void *p);
452
453
454int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
455
456void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
457 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
458
459
460int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
461
462
463int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
464int bnx2x_resume(struct pci_dev *pdev);
465
466
467void bnx2x_free_irq(struct bnx2x *bp);
468
469void bnx2x_free_fp_mem(struct bnx2x *bp);
470int bnx2x_alloc_fp_mem(struct bnx2x *bp);
471void bnx2x_init_rx_rings(struct bnx2x *bp);
472void bnx2x_free_skbs(struct bnx2x *bp);
473void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
474void bnx2x_netif_start(struct bnx2x *bp);
475
476
477
478
479
480
481
482
483
484int bnx2x_enable_msix(struct bnx2x *bp);
485
486
487
488
489
490
491int bnx2x_enable_msi(struct bnx2x *bp);
492
493
494
495
496
497
498
499
500int bnx2x_poll(struct napi_struct *napi, int budget);
501
502
503
504
505
506
507int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
508
509
510
511
512
513
514void bnx2x_free_mem_bp(struct bnx2x *bp);
515
516
517
518
519
520
521
522
523int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
524
525#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
526
527
528
529
530
531
532
533
534int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
535#endif
536u32 bnx2x_fix_features(struct net_device *dev, u32 features);
537int bnx2x_set_features(struct net_device *dev, u32 features);
538
539
540
541
542
543
544void bnx2x_tx_timeout(struct net_device *dev);
545
546
547
548static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
549{
550 barrier();
551 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
552}
553
554static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
555 struct bnx2x_fastpath *fp, u16 bd_prod,
556 u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
557{
558 struct ustorm_eth_rx_producers rx_prods = {0};
559 u32 i;
560
561
562 rx_prods.bd_prod = bd_prod;
563 rx_prods.cqe_prod = rx_comp_prod;
564 rx_prods.sge_prod = rx_sge_prod;
565
566
567
568
569
570
571
572
573
574 wmb();
575
576 for (i = 0; i < sizeof(rx_prods)/4; i++)
577 REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
578
579 mmiowb();
580
581 DP(NETIF_MSG_RX_STATUS,
582 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
583 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
584}
585
586static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
587 u8 segment, u16 index, u8 op,
588 u8 update, u32 igu_addr)
589{
590 struct igu_regular cmd_data = {0};
591
592 cmd_data.sb_id_and_flags =
593 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
594 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
595 (update << IGU_REGULAR_BUPDATE_SHIFT) |
596 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
597
598 DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
599 cmd_data.sb_id_and_flags, igu_addr);
600 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
601
602
603 mmiowb();
604 barrier();
605}
606
607static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
608 u8 idu_sb_id, bool is_Pf)
609{
610 u32 data, ctl, cnt = 100;
611 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
612 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
613 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
614 u32 sb_bit = 1 << (idu_sb_id%32);
615 u32 func_encode = func |
616 ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
617 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
618
619
620 if (CHIP_INT_MODE_IS_BC(bp))
621 return;
622
623 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
624 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
625 IGU_REGULAR_CLEANUP_SET |
626 IGU_REGULAR_BCLEANUP;
627
628 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
629 func_encode << IGU_CTRL_REG_FID_SHIFT |
630 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
631
632 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
633 data, igu_addr_data);
634 REG_WR(bp, igu_addr_data, data);
635 mmiowb();
636 barrier();
637 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
638 ctl, igu_addr_ctl);
639 REG_WR(bp, igu_addr_ctl, ctl);
640 mmiowb();
641 barrier();
642
643
644 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
645 msleep(20);
646
647
648 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
649 DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
650 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
651 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
652 }
653}
654
655static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
656 u8 storm, u16 index, u8 op, u8 update)
657{
658 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
659 COMMAND_REG_INT_ACK);
660 struct igu_ack_register igu_ack;
661
662 igu_ack.status_block_index = index;
663 igu_ack.sb_id_and_flags =
664 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
665 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
666 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
667 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
668
669 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
670 (*(u32 *)&igu_ack), hc_addr);
671 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
672
673
674 mmiowb();
675 barrier();
676}
677
678static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
679 u16 index, u8 op, u8 update)
680{
681 if (bp->common.int_block == INT_BLOCK_HC)
682 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
683 else {
684 u8 segment;
685
686 if (CHIP_INT_MODE_IS_BC(bp))
687 segment = storm;
688 else if (igu_sb_id != bp->igu_dsb_id)
689 segment = IGU_SEG_ACCESS_DEF;
690 else if (storm == ATTENTION_ID)
691 segment = IGU_SEG_ACCESS_ATTN;
692 else
693 segment = IGU_SEG_ACCESS_DEF;
694 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
695 }
696}
697
698static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
699{
700 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
701 COMMAND_REG_SIMD_MASK);
702 u32 result = REG_RD(bp, hc_addr);
703
704 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
705 result, hc_addr);
706
707 barrier();
708 return result;
709}
710
711static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
712{
713 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
714 u32 result = REG_RD(bp, igu_addr);
715
716 DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
717 result, igu_addr);
718
719 barrier();
720 return result;
721}
722
723static inline u16 bnx2x_ack_int(struct bnx2x *bp)
724{
725 barrier();
726 if (bp->common.int_block == INT_BLOCK_HC)
727 return bnx2x_hc_ack_int(bp);
728 else
729 return bnx2x_igu_ack_int(bp);
730}
731
732static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
733{
734
735 barrier();
736 return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
737}
738
739static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
740 struct bnx2x_fp_txdata *txdata)
741{
742 s16 used;
743 u16 prod;
744 u16 cons;
745
746 prod = txdata->tx_bd_prod;
747 cons = txdata->tx_bd_cons;
748
749
750
751 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
752
753#ifdef BNX2X_STOP_ON_ERROR
754 WARN_ON(used < 0);
755 WARN_ON(used > bp->tx_ring_size);
756 WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL);
757#endif
758
759 return (s16)(bp->tx_ring_size) - used;
760}
761
762static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
763{
764 u16 hw_cons;
765
766
767 barrier();
768 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
769 return hw_cons != txdata->tx_pkt_cons;
770}
771
772static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
773{
774 u8 cos;
775 for_each_cos_in_tx_queue(fp, cos)
776 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
777 return true;
778 return false;
779}
780
781static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
782{
783 u16 rx_cons_sb;
784
785
786 barrier();
787 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
788 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
789 rx_cons_sb++;
790 return (fp->rx_comp_cons != rx_cons_sb);
791}
792
793
794
795
796
797
798static inline void bnx2x_tx_disable(struct bnx2x *bp)
799{
800 netif_tx_disable(bp->dev);
801 netif_carrier_off(bp->dev);
802}
803
804static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
805 struct bnx2x_fastpath *fp, u16 index)
806{
807 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
808 struct page *page = sw_buf->page;
809 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
810
811
812 if (!page)
813 return;
814
815 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
816 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
817 __free_pages(page, PAGES_PER_SGE_SHIFT);
818
819 sw_buf->page = NULL;
820 sge->addr_hi = 0;
821 sge->addr_lo = 0;
822}
823
824static inline void bnx2x_add_all_napi(struct bnx2x *bp)
825{
826 int i;
827
828
829 for_each_rx_queue(bp, i)
830 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
831 bnx2x_poll, BNX2X_NAPI_WEIGHT);
832}
833
834static inline void bnx2x_del_all_napi(struct bnx2x *bp)
835{
836 int i;
837
838 for_each_rx_queue(bp, i)
839 netif_napi_del(&bnx2x_fp(bp, i, napi));
840}
841
842static inline void bnx2x_disable_msi(struct bnx2x *bp)
843{
844 if (bp->flags & USING_MSIX_FLAG) {
845 pci_disable_msix(bp->pdev);
846 bp->flags &= ~USING_MSIX_FLAG;
847 } else if (bp->flags & USING_MSI_FLAG) {
848 pci_disable_msi(bp->pdev);
849 bp->flags &= ~USING_MSI_FLAG;
850 }
851}
852
853static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
854{
855 return num_queues ?
856 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
857 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
858}
859
860static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
861{
862 int i, j;
863
864 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
865 int idx = RX_SGE_CNT * i - 1;
866
867 for (j = 0; j < 2; j++) {
868 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
869 idx--;
870 }
871 }
872}
873
874static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
875{
876
877 memset(fp->sge_mask, 0xff,
878 (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
879
880
881
882
883
884 bnx2x_clear_sge_mask_next_elems(fp);
885}
886
887static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
888 struct bnx2x_fastpath *fp, u16 index)
889{
890 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
891 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
892 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
893 dma_addr_t mapping;
894
895 if (unlikely(page == NULL))
896 return -ENOMEM;
897
898 mapping = dma_map_page(&bp->pdev->dev, page, 0,
899 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
900 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
901 __free_pages(page, PAGES_PER_SGE_SHIFT);
902 return -ENOMEM;
903 }
904
905 sw_buf->page = page;
906 dma_unmap_addr_set(sw_buf, mapping, mapping);
907
908 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
909 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
910
911 return 0;
912}
913
914static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
915 struct bnx2x_fastpath *fp, u16 index)
916{
917 struct sk_buff *skb;
918 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
919 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
920 dma_addr_t mapping;
921
922 skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
923 if (unlikely(skb == NULL))
924 return -ENOMEM;
925
926 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
927 DMA_FROM_DEVICE);
928 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
929 dev_kfree_skb_any(skb);
930 return -ENOMEM;
931 }
932
933 rx_buf->skb = skb;
934 dma_unmap_addr_set(rx_buf, mapping, mapping);
935
936 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
937 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
938
939 return 0;
940}
941
942
943
944
945
946
947static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
948 u16 cons, u16 prod)
949{
950 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
951 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
952 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
953 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
954
955 dma_unmap_addr_set(prod_rx_buf, mapping,
956 dma_unmap_addr(cons_rx_buf, mapping));
957 prod_rx_buf->skb = cons_rx_buf->skb;
958 *prod_bd = *cons_bd;
959}
960
961
962
963
964
965
966
967
968
969
970static inline int bnx2x_func_start(struct bnx2x *bp)
971{
972 struct bnx2x_func_state_params func_params = {0};
973 struct bnx2x_func_start_params *start_params =
974 &func_params.params.start;
975
976
977 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
978
979 func_params.f_obj = &bp->func_obj;
980 func_params.cmd = BNX2X_F_CMD_START;
981
982
983 start_params->mf_mode = bp->mf_mode;
984 start_params->sd_vlan_tag = bp->mf_ov;
985 if (CHIP_IS_E1x(bp))
986 start_params->network_cos_mode = OVERRIDE_COS;
987 else
988 start_params->network_cos_mode = STATIC_COS;
989
990 return bnx2x_func_state_change(bp, &func_params);
991}
992
993
994
995
996
997
998
999
1000
1001
1002static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
1003 u8 *mac)
1004{
1005 ((u8 *)fw_hi)[0] = mac[1];
1006 ((u8 *)fw_hi)[1] = mac[0];
1007 ((u8 *)fw_mid)[0] = mac[3];
1008 ((u8 *)fw_mid)[1] = mac[2];
1009 ((u8 *)fw_lo)[0] = mac[5];
1010 ((u8 *)fw_lo)[1] = mac[4];
1011}
1012
1013static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1014 struct bnx2x_fastpath *fp, int last)
1015{
1016 int i;
1017
1018 if (fp->disable_tpa)
1019 return;
1020
1021 for (i = 0; i < last; i++)
1022 bnx2x_free_rx_sge(bp, fp, i);
1023}
1024
1025static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
1026 struct bnx2x_fastpath *fp, int last)
1027{
1028 int i;
1029
1030 for (i = 0; i < last; i++) {
1031 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1032 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1033 struct sk_buff *skb = first_buf->skb;
1034
1035 if (skb == NULL) {
1036 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1037 continue;
1038 }
1039 if (tpa_info->tpa_state == BNX2X_TPA_START)
1040 dma_unmap_single(&bp->pdev->dev,
1041 dma_unmap_addr(first_buf, mapping),
1042 fp->rx_buf_size, DMA_FROM_DEVICE);
1043 dev_kfree_skb(skb);
1044 first_buf->skb = NULL;
1045 }
1046}
1047
1048static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
1049{
1050 int i;
1051
1052 for (i = 1; i <= NUM_TX_RINGS; i++) {
1053 struct eth_tx_next_bd *tx_next_bd =
1054 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
1055
1056 tx_next_bd->addr_hi =
1057 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
1058 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1059 tx_next_bd->addr_lo =
1060 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
1061 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1062 }
1063
1064 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
1065 txdata->tx_db.data.zero_fill1 = 0;
1066 txdata->tx_db.data.prod = 0;
1067
1068 txdata->tx_pkt_prod = 0;
1069 txdata->tx_pkt_cons = 0;
1070 txdata->tx_bd_prod = 0;
1071 txdata->tx_bd_cons = 0;
1072 txdata->tx_pkt = 0;
1073}
1074
1075static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
1076{
1077 int i;
1078 u8 cos;
1079
1080 for_each_tx_queue(bp, i)
1081 for_each_cos_in_tx_queue(&bp->fp[i], cos)
1082 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
1083}
1084
1085static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1086{
1087 int i;
1088
1089 for (i = 1; i <= NUM_RX_RINGS; i++) {
1090 struct eth_rx_bd *rx_bd;
1091
1092 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
1093 rx_bd->addr_hi =
1094 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
1095 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
1096 rx_bd->addr_lo =
1097 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
1098 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
1099 }
1100}
1101
1102static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1103{
1104 int i;
1105
1106 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1107 struct eth_rx_sge *sge;
1108
1109 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1110 sge->addr_hi =
1111 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1112 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1113
1114 sge->addr_lo =
1115 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1116 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1117 }
1118}
1119
1120static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
1121{
1122 int i;
1123 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
1124 struct eth_rx_cqe_next_page *nextpg;
1125
1126 nextpg = (struct eth_rx_cqe_next_page *)
1127 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
1128 nextpg->addr_hi =
1129 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
1130 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1131 nextpg->addr_lo =
1132 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
1133 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1134 }
1135}
1136
1137
1138static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
1139 int rx_ring_size)
1140{
1141 struct bnx2x *bp = fp->bp;
1142 u16 ring_prod, cqe_ring_prod;
1143 int i;
1144
1145 fp->rx_comp_cons = 0;
1146 cqe_ring_prod = ring_prod = 0;
1147
1148
1149
1150
1151 for (i = 0; i < rx_ring_size; i++) {
1152 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
1153 fp->eth_q_stats.rx_skb_alloc_failed++;
1154 continue;
1155 }
1156 ring_prod = NEXT_RX_IDX(ring_prod);
1157 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
1158 WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
1159 }
1160
1161 if (fp->eth_q_stats.rx_skb_alloc_failed)
1162 BNX2X_ERR("was only able to allocate "
1163 "%d rx skbs on queue[%d]\n",
1164 (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
1165
1166 fp->rx_bd_prod = ring_prod;
1167
1168 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
1169 cqe_ring_prod);
1170 fp->rx_pkt = fp->rx_calls = 0;
1171
1172 return i - fp->eth_q_stats.rx_skb_alloc_failed;
1173}
1174
1175
1176
1177
1178static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
1179{
1180 if (!CHIP_IS_E1x(fp->bp))
1181 return fp->cl_id;
1182 else
1183 return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
1184}
1185
1186static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
1187 bnx2x_obj_type obj_type)
1188{
1189 struct bnx2x *bp = fp->bp;
1190
1191
1192 bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
1193 BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
1194 bnx2x_sp_mapping(bp, mac_rdata),
1195 BNX2X_FILTER_MAC_PENDING,
1196 &bp->sp_state, obj_type,
1197 &bp->macs_pool);
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
1209{
1210 u8 func_num = 0, i;
1211
1212
1213 if (CHIP_IS_E1(bp))
1214 return 1;
1215
1216
1217
1218
1219 if (CHIP_REV_IS_SLOW(bp)) {
1220 if (IS_MF(bp))
1221 func_num = 4;
1222 else
1223 func_num = 2;
1224 } else {
1225 for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
1226 u32 func_config =
1227 MF_CFG_RD(bp,
1228 func_mf_config[BP_PORT(bp) + 2 * i].
1229 config);
1230 func_num +=
1231 ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
1232 }
1233 }
1234
1235 WARN_ON(!func_num);
1236
1237 return func_num;
1238}
1239
1240static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
1241{
1242
1243 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
1244
1245
1246 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
1247 BP_FUNC(bp), BP_FUNC(bp),
1248 bnx2x_sp(bp, mcast_rdata),
1249 bnx2x_sp_mapping(bp, mcast_rdata),
1250 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
1251 BNX2X_OBJ_TYPE_RX);
1252
1253
1254 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
1255 bnx2x_get_path_func_num(bp));
1256
1257
1258 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
1259 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
1260 bnx2x_sp(bp, rss_rdata),
1261 bnx2x_sp_mapping(bp, rss_rdata),
1262 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
1263 BNX2X_OBJ_TYPE_RX);
1264}
1265
1266static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
1267{
1268 if (CHIP_IS_E1x(fp->bp))
1269 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
1270 else
1271 return fp->cl_id;
1272}
1273
1274static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
1275{
1276 struct bnx2x *bp = fp->bp;
1277
1278 if (!CHIP_IS_E1x(bp))
1279 return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
1280 else
1281 return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
1282}
1283
1284static inline void bnx2x_init_txdata(struct bnx2x *bp,
1285 struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
1286 __le16 *tx_cons_sb)
1287{
1288 txdata->cid = cid;
1289 txdata->txq_index = txq_index;
1290 txdata->tx_cons_sb = tx_cons_sb;
1291
1292 DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n",
1293 txdata->cid, txdata->txq_index);
1294}
1295
1296#ifdef BCM_CNIC
1297static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1298{
1299 return bp->cnic_base_cl_id + cl_idx +
1300 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
1301}
1302
1303static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
1304{
1305
1306
1307 return bp->base_fw_ndsb;
1308}
1309
1310static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
1311{
1312 return bp->igu_base_sb;
1313}
1314
1315
1316static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1317{
1318 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
1319 unsigned long q_type = 0;
1320
1321 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
1322 BNX2X_FCOE_ETH_CL_ID_IDX);
1323
1324
1325
1326
1327
1328 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1329 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1330 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1331 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1332
1333 bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
1334 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
1335
1336 DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index);
1337
1338
1339 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
1340
1341 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
1342 bnx2x_rx_ustorm_prods_offset(fp);
1343
1344
1345 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1346 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1347
1348
1349 BUG_ON(fp->max_cos != 1);
1350
1351 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
1352 BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
1353 bnx2x_sp_mapping(bp, q_rdata), q_type);
1354
1355 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
1356 "igu_sb %d\n",
1357 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
1358 fp->igu_sb_id);
1359}
1360#endif
1361
1362static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
1363 struct bnx2x_fp_txdata *txdata)
1364{
1365 int cnt = 1000;
1366
1367 while (bnx2x_has_tx_work_unload(txdata)) {
1368 if (!cnt) {
1369 BNX2X_ERR("timeout waiting for queue[%d]: "
1370 "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
1371 txdata->txq_index, txdata->tx_pkt_prod,
1372 txdata->tx_pkt_cons);
1373#ifdef BNX2X_STOP_ON_ERROR
1374 bnx2x_panic();
1375 return -EBUSY;
1376#else
1377 break;
1378#endif
1379 }
1380 cnt--;
1381 usleep_range(1000, 1000);
1382 }
1383
1384 return 0;
1385}
1386
1387int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1388
1389static inline void __storm_memset_struct(struct bnx2x *bp,
1390 u32 addr, size_t size, u32 *data)
1391{
1392 int i;
1393 for (i = 0; i < size/4; i++)
1394 REG_WR(bp, addr + (i * 4), data[i]);
1395}
1396
1397static inline void storm_memset_func_cfg(struct bnx2x *bp,
1398 struct tstorm_eth_function_common_config *tcfg,
1399 u16 abs_fid)
1400{
1401 size_t size = sizeof(struct tstorm_eth_function_common_config);
1402
1403 u32 addr = BAR_TSTRORM_INTMEM +
1404 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
1405
1406 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
1407}
1408
1409static inline void storm_memset_cmng(struct bnx2x *bp,
1410 struct cmng_struct_per_port *cmng,
1411 u8 port)
1412{
1413 size_t size = sizeof(struct cmng_struct_per_port);
1414
1415 u32 addr = BAR_XSTRORM_INTMEM +
1416 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1417
1418 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
1419}
1420
1421
1422
1423
1424
1425
1426
1427static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
1428{
1429 int tout = 5000;
1430
1431 while (tout--) {
1432 smp_mb();
1433 netif_addr_lock_bh(bp->dev);
1434 if (!(bp->sp_state & mask)) {
1435 netif_addr_unlock_bh(bp->dev);
1436 return true;
1437 }
1438 netif_addr_unlock_bh(bp->dev);
1439
1440 usleep_range(1000, 1000);
1441 }
1442
1443 smp_mb();
1444
1445 netif_addr_lock_bh(bp->dev);
1446 if (bp->sp_state & mask) {
1447 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
1448 "mask 0x%lx\n", bp->sp_state, mask);
1449 netif_addr_unlock_bh(bp->dev);
1450 return false;
1451 }
1452 netif_addr_unlock_bh(bp->dev);
1453
1454 return true;
1455}
1456
1457
1458
1459
1460
1461
1462
1463
1464void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
1465 u32 cid);
1466
1467void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
1468 u8 sb_index, u8 disable, u16 usec);
1469void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1470void bnx2x_release_phy_lock(struct bnx2x *bp);
1471
1472
1473
1474
1475
1476
1477
1478
1479static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1480{
1481 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1482 FUNC_MF_CFG_MAX_BW_SHIFT;
1483 if (!max_cfg) {
1484 DP(NETIF_MSG_LINK,
1485 "Max BW configured to 0 - using 100 instead\n");
1486 max_cfg = 100;
1487 }
1488 return max_cfg;
1489}
1490
1491#endif
1492