1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef BNX2X_CMN_H
18#define BNX2X_CMN_H
19
20#include <linux/types.h>
21#include <linux/netdevice.h>
22
23
24#include "bnx2x.h"
25
26extern int num_queues;
27
28
29
30
31
32
33
34
35
36
37
38
39
40u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
41
42
43
44
45
46
47void bnx2x_link_set(struct bnx2x *bp);
48
49
50
51
52
53
54
55
56
57u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
58
59
60
61
62
63
64void bnx2x__link_status_update(struct bnx2x *bp);
65
66
67
68
69
70
71
72
73void bnx2x_link_report(struct bnx2x *bp);
74
75
76
77
78
79
80
81
82
83u16 bnx2x_get_mf_speed(struct bnx2x *bp);
84
85
86
87
88
89
90
91
92
93irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
94
95
96
97
98
99
100
101
102
103irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
104#ifdef BCM_CNIC
105
106
107
108
109
110
111
112int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
113
114
115
116
117
118
119void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
120#endif
121
122
123
124
125
126
127void bnx2x_int_enable(struct bnx2x *bp);
128
129
130
131
132
133
134
135
136void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
137
138
139
140
141
142
143
144
145int bnx2x_init_firmware(struct bnx2x *bp);
146
147
148
149
150
151
152
153
154
155
156int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
157
158
159
160
161
162
163
164
165
166
167void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
168
169
170
171
172
173
174
175
176int bnx2x_alloc_mem(struct bnx2x *bp);
177
178
179
180
181
182
183void bnx2x_free_mem(struct bnx2x *bp);
184
185
186
187
188
189
190
191
192
193
194int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
195 int is_leading);
196
197
198
199
200
201
202
203void bnx2x_set_num_queues(struct bnx2x *bp);
204
205
206
207
208
209
210
211
212
213
214void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
215
216
217
218
219
220
221
222
223
224int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
225
226
227
228
229
230
231
232
233
234int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
235
236
237
238
239
240
241
242
243void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
244
245#ifdef BCM_CNIC
246
247
248
249
250
251
252
253
254
255
256int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
257
258
259
260
261
262
263
264
265
266int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
267#endif
268
269
270
271
272
273
274
275
276void bnx2x_set_rx_mode(struct net_device *dev);
277
278
279
280
281
282
283void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
284
285
286void bnx2x_inc_load_cnt(struct bnx2x *bp);
287u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
288bool bnx2x_chk_parity_attn(struct bnx2x *bp);
289bool bnx2x_reset_is_done(struct bnx2x *bp);
290void bnx2x_disable_close_the_gate(struct bnx2x *bp);
291
292
293
294
295
296
297
298void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
299
300
301
302
303
304
305
306void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
307
308
309
310
311
312
313
314
315
316int bnx2x_func_start(struct bnx2x *bp);
317
318
319
320
321
322
323
324void bnx2x_ilt_set_info(struct bnx2x *bp);
325
326
327
328
329
330
331void bnx2x_dcbx_init(struct bnx2x *bp);
332
333
334
335
336
337
338
339
340
341
342int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
343
344
345
346
347
348
349
350
351void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
352
353
354int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
355
356
357int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
358
359
360netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
361
362
363u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
364
365int bnx2x_change_mac_addr(struct net_device *dev, void *p);
366
367
368int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
369
370
371int bnx2x_tx_int(struct bnx2x_fastpath *fp);
372
373
374int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
375int bnx2x_resume(struct pci_dev *pdev);
376
377
378void bnx2x_free_irq(struct bnx2x *bp);
379
380void bnx2x_init_rx_rings(struct bnx2x *bp);
381void bnx2x_free_skbs(struct bnx2x *bp);
382void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
383void bnx2x_netif_start(struct bnx2x *bp);
384
385
386
387
388
389
390
391
392
393int bnx2x_enable_msix(struct bnx2x *bp);
394
395
396
397
398
399
400
401
402int bnx2x_enable_msi(struct bnx2x *bp);
403
404
405
406
407
408
409
410
411
412int bnx2x_poll(struct napi_struct *napi, int budget);
413
414
415
416
417
418
419
420
421int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
422void bnx2x_free_mem_bp(struct bnx2x *bp);
423
424
425
426
427
428
429
430
431
432int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
433
434
435
436
437
438
439
440
441
442void bnx2x_tx_timeout(struct net_device *dev);
443
444#ifdef BCM_VLAN
445
446
447
448
449
450
451
452
453void bnx2x_vlan_rx_register(struct net_device *dev,
454 struct vlan_group *vlgrp);
455
456#endif
457
458static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
459{
460 barrier();
461 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
462}
463
464static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
465 struct bnx2x_fastpath *fp,
466 u16 bd_prod, u16 rx_comp_prod,
467 u16 rx_sge_prod)
468{
469 struct ustorm_eth_rx_producers rx_prods = {0};
470 int i;
471
472
473 rx_prods.bd_prod = bd_prod;
474 rx_prods.cqe_prod = rx_comp_prod;
475 rx_prods.sge_prod = rx_sge_prod;
476
477
478
479
480
481
482
483
484
485 wmb();
486
487 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
488 REG_WR(bp,
489 BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
490 ((u32 *)&rx_prods)[i]);
491
492 mmiowb();
493
494 DP(NETIF_MSG_RX_STATUS,
495 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
496 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
497}
498
499static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
500 u8 segment, u16 index, u8 op,
501 u8 update, u32 igu_addr)
502{
503 struct igu_regular cmd_data = {0};
504
505 cmd_data.sb_id_and_flags =
506 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
507 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
508 (update << IGU_REGULAR_BUPDATE_SHIFT) |
509 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
510
511 DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
512 cmd_data.sb_id_and_flags, igu_addr);
513 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
514
515
516 mmiowb();
517 barrier();
518}
519
520static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
521 u8 idu_sb_id, bool is_Pf)
522{
523 u32 data, ctl, cnt = 100;
524 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
525 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
526 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
527 u32 sb_bit = 1 << (idu_sb_id%32);
528 u32 func_encode = BP_FUNC(bp) |
529 ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
530 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
531
532
533 if (CHIP_INT_MODE_IS_BC(bp))
534 return;
535
536 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
537 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
538 IGU_REGULAR_CLEANUP_SET |
539 IGU_REGULAR_BCLEANUP;
540
541 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
542 func_encode << IGU_CTRL_REG_FID_SHIFT |
543 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
544
545 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
546 data, igu_addr_data);
547 REG_WR(bp, igu_addr_data, data);
548 mmiowb();
549 barrier();
550 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
551 ctl, igu_addr_ctl);
552 REG_WR(bp, igu_addr_ctl, ctl);
553 mmiowb();
554 barrier();
555
556
557 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
558 msleep(20);
559
560
561 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
562 DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
563 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
564 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
565 }
566}
567
568static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
569 u8 storm, u16 index, u8 op, u8 update)
570{
571 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
572 COMMAND_REG_INT_ACK);
573 struct igu_ack_register igu_ack;
574
575 igu_ack.status_block_index = index;
576 igu_ack.sb_id_and_flags =
577 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
578 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
579 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
580 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
581
582 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
583 (*(u32 *)&igu_ack), hc_addr);
584 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
585
586
587 mmiowb();
588 barrier();
589}
590
591static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
592 u16 index, u8 op, u8 update)
593{
594 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
595
596 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
597 igu_addr);
598}
599
600static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
601 u16 index, u8 op, u8 update)
602{
603 if (bp->common.int_block == INT_BLOCK_HC)
604 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
605 else {
606 u8 segment;
607
608 if (CHIP_INT_MODE_IS_BC(bp))
609 segment = storm;
610 else if (igu_sb_id != bp->igu_dsb_id)
611 segment = IGU_SEG_ACCESS_DEF;
612 else if (storm == ATTENTION_ID)
613 segment = IGU_SEG_ACCESS_ATTN;
614 else
615 segment = IGU_SEG_ACCESS_DEF;
616 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
617 }
618}
619
620static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
621{
622 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
623 COMMAND_REG_SIMD_MASK);
624 u32 result = REG_RD(bp, hc_addr);
625
626 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
627 result, hc_addr);
628
629 barrier();
630 return result;
631}
632
633static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
634{
635 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
636 u32 result = REG_RD(bp, igu_addr);
637
638 DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
639 result, igu_addr);
640
641 barrier();
642 return result;
643}
644
645static inline u16 bnx2x_ack_int(struct bnx2x *bp)
646{
647 barrier();
648 if (bp->common.int_block == INT_BLOCK_HC)
649 return bnx2x_hc_ack_int(bp);
650 else
651 return bnx2x_igu_ack_int(bp);
652}
653
654static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
655{
656
657 barrier();
658 return fp->tx_pkt_prod != fp->tx_pkt_cons;
659}
660
661static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
662{
663 s16 used;
664 u16 prod;
665 u16 cons;
666
667 prod = fp->tx_bd_prod;
668 cons = fp->tx_bd_cons;
669
670
671
672 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
673
674#ifdef BNX2X_STOP_ON_ERROR
675 WARN_ON(used < 0);
676 WARN_ON(used > fp->bp->tx_ring_size);
677 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
678#endif
679
680 return (s16)(fp->bp->tx_ring_size) - used;
681}
682
683static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
684{
685 u16 hw_cons;
686
687
688 barrier();
689 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
690 return hw_cons != fp->tx_pkt_cons;
691}
692
693static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
694{
695 u16 rx_cons_sb;
696
697
698 barrier();
699 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
700 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
701 rx_cons_sb++;
702 return (fp->rx_comp_cons != rx_cons_sb);
703}
704
705
706
707
708
709
710static inline void bnx2x_tx_disable(struct bnx2x *bp)
711{
712 netif_tx_disable(bp->dev);
713 netif_carrier_off(bp->dev);
714}
715
716static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
717 struct bnx2x_fastpath *fp, u16 index)
718{
719 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
720 struct page *page = sw_buf->page;
721 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
722
723
724 if (!page)
725 return;
726
727 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
728 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
729 __free_pages(page, PAGES_PER_SGE_SHIFT);
730
731 sw_buf->page = NULL;
732 sge->addr_hi = 0;
733 sge->addr_lo = 0;
734}
735
736static inline void bnx2x_add_all_napi(struct bnx2x *bp)
737{
738 int i;
739
740
741 for_each_napi_queue(bp, i)
742 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
743 bnx2x_poll, BNX2X_NAPI_WEIGHT);
744}
745
746static inline void bnx2x_del_all_napi(struct bnx2x *bp)
747{
748 int i;
749
750 for_each_napi_queue(bp, i)
751 netif_napi_del(&bnx2x_fp(bp, i, napi));
752}
753
754static inline void bnx2x_disable_msi(struct bnx2x *bp)
755{
756 if (bp->flags & USING_MSIX_FLAG) {
757 pci_disable_msix(bp->pdev);
758 bp->flags &= ~USING_MSIX_FLAG;
759 } else if (bp->flags & USING_MSI_FLAG) {
760 pci_disable_msi(bp->pdev);
761 bp->flags &= ~USING_MSI_FLAG;
762 }
763}
764
765static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
766{
767 return num_queues ?
768 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
769 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
770}
771
772static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
773{
774 int i, j;
775
776 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
777 int idx = RX_SGE_CNT * i - 1;
778
779 for (j = 0; j < 2; j++) {
780 SGE_MASK_CLEAR_BIT(fp, idx);
781 idx--;
782 }
783 }
784}
785
786static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
787{
788
789 memset(fp->sge_mask, 0xff,
790 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
791
792
793
794
795
796 bnx2x_clear_sge_mask_next_elems(fp);
797}
798
799static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
800 struct bnx2x_fastpath *fp, u16 index)
801{
802 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
803 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
804 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
805 dma_addr_t mapping;
806
807 if (unlikely(page == NULL))
808 return -ENOMEM;
809
810 mapping = dma_map_page(&bp->pdev->dev, page, 0,
811 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
812 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
813 __free_pages(page, PAGES_PER_SGE_SHIFT);
814 return -ENOMEM;
815 }
816
817 sw_buf->page = page;
818 dma_unmap_addr_set(sw_buf, mapping, mapping);
819
820 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
821 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
822
823 return 0;
824}
825
826static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
827 struct bnx2x_fastpath *fp, u16 index)
828{
829 struct sk_buff *skb;
830 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
831 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
832 dma_addr_t mapping;
833
834 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
835 if (unlikely(skb == NULL))
836 return -ENOMEM;
837
838 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
839 DMA_FROM_DEVICE);
840 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
841 dev_kfree_skb(skb);
842 return -ENOMEM;
843 }
844
845 rx_buf->skb = skb;
846 dma_unmap_addr_set(rx_buf, mapping, mapping);
847
848 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
849 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
850
851 return 0;
852}
853
854
855
856
857
858
859static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
860 u16 cons, u16 prod)
861{
862 struct bnx2x *bp = fp->bp;
863 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
864 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
865 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
866 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
867
868 dma_sync_single_for_device(&bp->pdev->dev,
869 dma_unmap_addr(cons_rx_buf, mapping),
870 RX_COPY_THRESH, DMA_FROM_DEVICE);
871
872 prod_rx_buf->skb = cons_rx_buf->skb;
873 dma_unmap_addr_set(prod_rx_buf, mapping,
874 dma_unmap_addr(cons_rx_buf, mapping));
875 *prod_bd = *cons_bd;
876}
877
878static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
879 struct bnx2x_fastpath *fp, int last)
880{
881 int i;
882
883 for (i = 0; i < last; i++)
884 bnx2x_free_rx_sge(bp, fp, i);
885}
886
887static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
888 struct bnx2x_fastpath *fp, int last)
889{
890 int i;
891
892 for (i = 0; i < last; i++) {
893 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
894 struct sk_buff *skb = rx_buf->skb;
895
896 if (skb == NULL) {
897 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
898 continue;
899 }
900
901 if (fp->tpa_state[i] == BNX2X_TPA_START)
902 dma_unmap_single(&bp->pdev->dev,
903 dma_unmap_addr(rx_buf, mapping),
904 bp->rx_buf_size, DMA_FROM_DEVICE);
905
906 dev_kfree_skb(skb);
907 rx_buf->skb = NULL;
908 }
909}
910
911
912static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
913{
914 int i, j;
915
916 for_each_tx_queue(bp, j) {
917 struct bnx2x_fastpath *fp = &bp->fp[j];
918
919 for (i = 1; i <= NUM_TX_RINGS; i++) {
920 struct eth_tx_next_bd *tx_next_bd =
921 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
922
923 tx_next_bd->addr_hi =
924 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
925 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
926 tx_next_bd->addr_lo =
927 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
928 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
929 }
930
931 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
932 fp->tx_db.data.zero_fill1 = 0;
933 fp->tx_db.data.prod = 0;
934
935 fp->tx_pkt_prod = 0;
936 fp->tx_pkt_cons = 0;
937 fp->tx_bd_prod = 0;
938 fp->tx_bd_cons = 0;
939 fp->tx_pkt = 0;
940 }
941}
942
943static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
944{
945 int i;
946
947 for (i = 1; i <= NUM_RX_RINGS; i++) {
948 struct eth_rx_bd *rx_bd;
949
950 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
951 rx_bd->addr_hi =
952 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
953 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
954 rx_bd->addr_lo =
955 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
956 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
957 }
958}
959
960static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
961{
962 int i;
963
964 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
965 struct eth_rx_sge *sge;
966
967 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
968 sge->addr_hi =
969 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
970 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
971
972 sge->addr_lo =
973 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
974 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
975 }
976}
977
978static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
979{
980 int i;
981 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
982 struct eth_rx_cqe_next_page *nextpg;
983
984 nextpg = (struct eth_rx_cqe_next_page *)
985 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
986 nextpg->addr_hi =
987 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
988 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
989 nextpg->addr_lo =
990 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
991 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
992 }
993}
994
995#ifdef BCM_CNIC
996static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
997{
998 bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
999 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
1000 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1001 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1002 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1003 bnx2x_fcoe(bp, bp) = bp;
1004 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
1005 bnx2x_fcoe(bp, index) = FCOE_IDX;
1006 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1007 bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
1008
1009 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
1010 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
1011 ETH_MAX_RX_CLIENTS_E1H);
1012
1013 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
1014 USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
1015 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
1016
1017}
1018#endif
1019
1020static inline void __storm_memset_struct(struct bnx2x *bp,
1021 u32 addr, size_t size, u32 *data)
1022{
1023 int i;
1024 for (i = 0; i < size/4; i++)
1025 REG_WR(bp, addr + (i * 4), data[i]);
1026}
1027
1028static inline void storm_memset_mac_filters(struct bnx2x *bp,
1029 struct tstorm_eth_mac_filter_config *mac_filters,
1030 u16 abs_fid)
1031{
1032 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1033
1034 u32 addr = BAR_TSTRORM_INTMEM +
1035 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
1036
1037 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
1038}
1039
1040static inline void storm_memset_cmng(struct bnx2x *bp,
1041 struct cmng_struct_per_port *cmng,
1042 u8 port)
1043{
1044 size_t size = sizeof(struct cmng_struct_per_port);
1045
1046 u32 addr = BAR_XSTRORM_INTMEM +
1047 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1048
1049 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
1050}
1051
1052
1053void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1054void bnx2x_release_phy_lock(struct bnx2x *bp);
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1065{
1066 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1067 FUNC_MF_CFG_MAX_BW_SHIFT;
1068 if (!max_cfg) {
1069 BNX2X_ERR("Illegal configuration detected for Max BW - "
1070 "using 100 instead\n");
1071 max_cfg = 100;
1072 }
1073 return max_cfg;
1074}
1075
1076#endif
1077