1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef BNX2X_CMN_H
20#define BNX2X_CMN_H
21
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/irq.h>
27
28#include "bnx2x.h"
29#include "bnx2x_sriov.h"
30
31
32extern int bnx2x_load_count[2][3];
33extern int bnx2x_num_queues;
34
35
36#define BNX2X_PCI_FREE(x, y, size) \
37 do { \
38 if (x) { \
39 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
40 x = NULL; \
41 y = 0; \
42 } \
43 } while (0)
44
45#define BNX2X_FREE(x) \
46 do { \
47 if (x) { \
48 kfree((void *)x); \
49 x = NULL; \
50 } \
51 } while (0)
52
53#define BNX2X_PCI_ALLOC(y, size) \
54({ \
55 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
56 if (x) \
57 DP(NETIF_MSG_HW, \
58 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
59 (unsigned long long)(*y), x); \
60 x; \
61})
62#define BNX2X_PCI_FALLOC(y, size) \
63({ \
64 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
65 if (x) { \
66 memset(x, 0xff, size); \
67 DP(NETIF_MSG_HW, \
68 "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
69 (unsigned long long)(*y), x); \
70 } \
71 x; \
72})
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
88
89
90
91
92
93
94
95void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
96
97
98
99
100
101
102
103
104
105
106int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
107 bool config_hash, bool enable);
108
109
110
111
112
113
114
115
116
117
118void bnx2x__init_func_obj(struct bnx2x *bp);
119
120
121
122
123
124
125
126
127
128int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
129 bool leading);
130
131
132
133
134
135
136int bnx2x_setup_leading(struct bnx2x *bp);
137
138
139
140
141
142
143
144
145
146
147u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
148
149
150
151
152
153
154
155int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
156
157
158
159
160
161
162void bnx2x_link_set(struct bnx2x *bp);
163
164
165
166
167
168
169
170void bnx2x_force_link_reset(struct bnx2x *bp);
171
172
173
174
175
176
177
178
179
180u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
181
182
183
184
185
186
187
188
189
190void bnx2x_drv_pulse(struct bnx2x *bp);
191
192
193
194
195
196
197
198
199
200
201
202void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
203 u16 index, u8 op, u8 update);
204
205
206void bnx2x_pf_disable(struct bnx2x *bp);
207int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
208
209
210
211
212
213
214void bnx2x__link_status_update(struct bnx2x *bp);
215
216
217
218
219
220
221void bnx2x_link_report(struct bnx2x *bp);
222
223
224void __bnx2x_link_report(struct bnx2x *bp);
225
226
227
228
229
230
231
232
233u16 bnx2x_get_mf_speed(struct bnx2x *bp);
234
235
236
237
238
239
240
241irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
242
243
244
245
246
247
248
249irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
250
251
252
253
254
255
256
257int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
258
259
260
261
262
263
264void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
265
266
267
268
269
270
271void bnx2x_setup_cnic_info(struct bnx2x *bp);
272
273
274
275
276
277
278void bnx2x_int_enable(struct bnx2x *bp);
279
280
281
282
283
284
285
286
287
288
289void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
290
291
292
293
294
295
296
297
298
299
300
301
302void bnx2x_nic_init_cnic(struct bnx2x *bp);
303
304
305
306
307
308
309
310
311
312
313
314void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
315
316
317
318
319
320
321
322
323
324
325
326
327void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
328
329
330
331
332
333int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
334
335
336
337
338
339int bnx2x_alloc_mem(struct bnx2x *bp);
340
341
342
343
344
345
346void bnx2x_free_mem_cnic(struct bnx2x *bp);
347
348
349
350
351
352void bnx2x_free_mem(struct bnx2x *bp);
353
354
355
356
357
358
359void bnx2x_set_num_queues(struct bnx2x *bp);
360
361
362
363
364
365
366
367
368
369
370
371
372void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
373
374
375
376
377
378
379
380int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
381
382
383
384
385
386
387
388int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
389
390
391
392
393
394
395int bnx2x_release_leader_lock(struct bnx2x *bp);
396
397
398
399
400
401
402
403
404
405int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
406
407
408
409
410
411
412
413
414
415
416void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
417
418
419void bnx2x_set_pf_load(struct bnx2x *bp);
420bool bnx2x_clear_pf_load(struct bnx2x *bp);
421bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
422bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
423void bnx2x_set_reset_in_progress(struct bnx2x *bp);
424void bnx2x_set_reset_global(struct bnx2x *bp);
425void bnx2x_disable_close_the_gate(struct bnx2x *bp);
426int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
427
428
429
430
431
432
433
434void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
435
436
437
438
439
440
441void bnx2x_ilt_set_info(struct bnx2x *bp);
442
443
444
445
446
447
448
449void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
450
451
452
453
454
455
456void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
457
458
459
460
461
462
463
464
465
466int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
467
468
469
470
471
472
473
474void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
475
476void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
477
478
479int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
480
481
482int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
483
484
485netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
486
487
488int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
489int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
490 void *type_data);
491
492int bnx2x_get_vf_config(struct net_device *dev, int vf,
493 struct ifla_vf_info *ivi);
494int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
495int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
496 __be16 vlan_proto);
497int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val);
498
499
500u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
501 struct net_device *sb_dev,
502 select_queue_fallback_t fallback);
503
504static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
505 struct bnx2x_fastpath *fp,
506 u16 bd_prod, u16 rx_comp_prod,
507 u16 rx_sge_prod)
508{
509 struct ustorm_eth_rx_producers rx_prods = {0};
510 u32 i;
511
512
513 rx_prods.bd_prod = bd_prod;
514 rx_prods.cqe_prod = rx_comp_prod;
515 rx_prods.sge_prod = rx_sge_prod;
516
517
518
519
520
521
522
523
524 wmb();
525
526 for (i = 0; i < sizeof(rx_prods)/4; i++)
527 REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4,
528 ((u32 *)&rx_prods)[i]);
529
530 mmiowb();
531
532 DP(NETIF_MSG_RX_STATUS,
533 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
534 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
535}
536
537
538int bnx2x_reload_if_running(struct net_device *dev);
539
540int bnx2x_change_mac_addr(struct net_device *dev, void *p);
541
542
543int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
544
545
546int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
547int bnx2x_resume(struct pci_dev *pdev);
548
549
550void bnx2x_free_irq(struct bnx2x *bp);
551
552void bnx2x_free_fp_mem(struct bnx2x *bp);
553void bnx2x_init_rx_rings(struct bnx2x *bp);
554void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
555void bnx2x_free_skbs(struct bnx2x *bp);
556void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
557void bnx2x_netif_start(struct bnx2x *bp);
558int bnx2x_load_cnic(struct bnx2x *bp);
559
560
561
562
563
564
565
566
567
568int bnx2x_enable_msix(struct bnx2x *bp);
569
570
571
572
573
574
575int bnx2x_enable_msi(struct bnx2x *bp);
576
577
578
579
580
581
582int bnx2x_alloc_mem_bp(struct bnx2x *bp);
583
584
585
586
587
588
589void bnx2x_free_mem_bp(struct bnx2x *bp);
590
591
592
593
594
595
596
597
598int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
599
600#ifdef NETDEV_FCOE_WWNN
601
602
603
604
605
606
607
608
609int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
610#endif
611
612netdev_features_t bnx2x_fix_features(struct net_device *dev,
613 netdev_features_t features);
614int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
615
616
617
618
619
620
621void bnx2x_tx_timeout(struct net_device *dev);
622
623
624
625
626
627
628
629void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
630
631
632
633static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
634{
635 barrier();
636 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
637}
638
639static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
640 u8 segment, u16 index, u8 op,
641 u8 update, u32 igu_addr)
642{
643 struct igu_regular cmd_data = {0};
644
645 cmd_data.sb_id_and_flags =
646 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
647 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
648 (update << IGU_REGULAR_BUPDATE_SHIFT) |
649 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
650
651 DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
652 cmd_data.sb_id_and_flags, igu_addr);
653 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
654
655
656 mmiowb();
657 barrier();
658}
659
660static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
661 u8 storm, u16 index, u8 op, u8 update)
662{
663 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
664 COMMAND_REG_INT_ACK);
665 struct igu_ack_register igu_ack;
666
667 igu_ack.status_block_index = index;
668 igu_ack.sb_id_and_flags =
669 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
670 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
671 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
672 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
673
674 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
675
676
677 mmiowb();
678 barrier();
679}
680
681static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
682 u16 index, u8 op, u8 update)
683{
684 if (bp->common.int_block == INT_BLOCK_HC)
685 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
686 else {
687 u8 segment;
688
689 if (CHIP_INT_MODE_IS_BC(bp))
690 segment = storm;
691 else if (igu_sb_id != bp->igu_dsb_id)
692 segment = IGU_SEG_ACCESS_DEF;
693 else if (storm == ATTENTION_ID)
694 segment = IGU_SEG_ACCESS_ATTN;
695 else
696 segment = IGU_SEG_ACCESS_DEF;
697 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
698 }
699}
700
701static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
702{
703 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
704 COMMAND_REG_SIMD_MASK);
705 u32 result = REG_RD(bp, hc_addr);
706
707 barrier();
708 return result;
709}
710
711static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
712{
713 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
714 u32 result = REG_RD(bp, igu_addr);
715
716 DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
717 result, igu_addr);
718
719 barrier();
720 return result;
721}
722
723static inline u16 bnx2x_ack_int(struct bnx2x *bp)
724{
725 barrier();
726 if (bp->common.int_block == INT_BLOCK_HC)
727 return bnx2x_hc_ack_int(bp);
728 else
729 return bnx2x_igu_ack_int(bp);
730}
731
732static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
733{
734
735 barrier();
736 return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
737}
738
739static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
740 struct bnx2x_fp_txdata *txdata)
741{
742 s16 used;
743 u16 prod;
744 u16 cons;
745
746 prod = txdata->tx_bd_prod;
747 cons = txdata->tx_bd_cons;
748
749 used = SUB_S16(prod, cons);
750
751#ifdef BNX2X_STOP_ON_ERROR
752 WARN_ON(used < 0);
753 WARN_ON(used > txdata->tx_ring_size);
754 WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL);
755#endif
756
757 return (s16)(txdata->tx_ring_size) - used;
758}
759
760static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
761{
762 u16 hw_cons;
763
764
765 barrier();
766 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
767 return hw_cons != txdata->tx_pkt_cons;
768}
769
770static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
771{
772 u8 cos;
773 for_each_cos_in_tx_queue(fp, cos)
774 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
775 return true;
776 return false;
777}
778
779#define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0)
780#define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF)
781static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
782{
783 u16 cons;
784 union eth_rx_cqe *cqe;
785 struct eth_fast_path_rx_cqe *cqe_fp;
786
787 cons = RCQ_BD(fp->rx_comp_cons);
788 cqe = &fp->rx_comp_ring[cons];
789 cqe_fp = &cqe->fast_path_cqe;
790 return BNX2X_IS_CQE_COMPLETED(cqe_fp);
791}
792
793
794
795
796
797
798static inline void bnx2x_tx_disable(struct bnx2x *bp)
799{
800 netif_tx_disable(bp->dev);
801 netif_carrier_off(bp->dev);
802}
803
804static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
805 struct bnx2x_fastpath *fp, u16 index)
806{
807 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
808 struct page *page = sw_buf->page;
809 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
810
811
812 if (!page)
813 return;
814
815
816
817
818 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
819 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
820
821 put_page(page);
822
823 sw_buf->page = NULL;
824 sge->addr_hi = 0;
825 sge->addr_lo = 0;
826}
827
828static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
829{
830 int i;
831
832 for_each_rx_queue_cnic(bp, i) {
833 napi_hash_del(&bnx2x_fp(bp, i, napi));
834 netif_napi_del(&bnx2x_fp(bp, i, napi));
835 }
836}
837
838static inline void bnx2x_del_all_napi(struct bnx2x *bp)
839{
840 int i;
841
842 for_each_eth_queue(bp, i) {
843 napi_hash_del(&bnx2x_fp(bp, i, napi));
844 netif_napi_del(&bnx2x_fp(bp, i, napi));
845 }
846}
847
848int bnx2x_set_int_mode(struct bnx2x *bp);
849
850static inline void bnx2x_disable_msi(struct bnx2x *bp)
851{
852 if (bp->flags & USING_MSIX_FLAG) {
853 pci_disable_msix(bp->pdev);
854 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
855 } else if (bp->flags & USING_MSI_FLAG) {
856 pci_disable_msi(bp->pdev);
857 bp->flags &= ~USING_MSI_FLAG;
858 }
859}
860
861static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
862{
863 int i, j;
864
865 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
866 int idx = RX_SGE_CNT * i - 1;
867
868 for (j = 0; j < 2; j++) {
869 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
870 idx--;
871 }
872 }
873}
874
875static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
876{
877
878 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
879
880
881
882
883
884 bnx2x_clear_sge_mask_next_elems(fp);
885}
886
887
888
889
890
891
892static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
893 u16 cons, u16 prod)
894{
895 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
896 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
897 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
898 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
899
900 dma_unmap_addr_set(prod_rx_buf, mapping,
901 dma_unmap_addr(cons_rx_buf, mapping));
902 prod_rx_buf->data = cons_rx_buf->data;
903 *prod_bd = *cons_bd;
904}
905
906
907
908
909static inline int func_by_vn(struct bnx2x *bp, int vn)
910{
911 return 2 * vn + BP_PORT(bp);
912}
913
914static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
915{
916 return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
917}
918
919
920
921
922
923
924
925
926static inline int bnx2x_func_start(struct bnx2x *bp)
927{
928 struct bnx2x_func_state_params func_params = {NULL};
929 struct bnx2x_func_start_params *start_params =
930 &func_params.params.start;
931 u16 port;
932
933
934 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
935
936 func_params.f_obj = &bp->func_obj;
937 func_params.cmd = BNX2X_F_CMD_START;
938
939
940 start_params->mf_mode = bp->mf_mode;
941 start_params->sd_vlan_tag = bp->mf_ov;
942
943
944 if (IS_MF_BD(bp)) {
945 DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
946 start_params->sd_vlan_eth_type = ETH_P_8021AD;
947 REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
948 REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
949 REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
950
951 bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
952 &start_params->c2s_pri_default);
953 start_params->c2s_pri_valid = 1;
954
955 DP(NETIF_MSG_IFUP,
956 "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
957 start_params->c2s_pri[0], start_params->c2s_pri[1],
958 start_params->c2s_pri[2], start_params->c2s_pri[3],
959 start_params->c2s_pri[4], start_params->c2s_pri[5],
960 start_params->c2s_pri[6], start_params->c2s_pri[7],
961 start_params->c2s_pri_default);
962 }
963
964 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
965 start_params->network_cos_mode = STATIC_COS;
966 else
967 start_params->network_cos_mode = FW_WRR;
968 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
969 port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].dst_port;
970 start_params->vxlan_dst_port = port;
971 }
972 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
973 port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].dst_port;
974 start_params->geneve_dst_port = port;
975 }
976
977 start_params->inner_rss = 1;
978
979 if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
980 start_params->class_fail_ethtype = ETH_P_FIP;
981 start_params->class_fail = 1;
982 start_params->no_added_tags = 1;
983 }
984
985 return bnx2x_func_state_change(bp, &func_params);
986}
987
988
989
990
991
992
993
994
995
996static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
997 __le16 *fw_lo, u8 *mac)
998{
999 ((u8 *)fw_hi)[0] = mac[1];
1000 ((u8 *)fw_hi)[1] = mac[0];
1001 ((u8 *)fw_mid)[0] = mac[3];
1002 ((u8 *)fw_mid)[1] = mac[2];
1003 ((u8 *)fw_lo)[0] = mac[5];
1004 ((u8 *)fw_lo)[1] = mac[4];
1005}
1006
1007static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
1008 struct bnx2x_alloc_pool *pool)
1009{
1010 if (!pool->page)
1011 return;
1012
1013 put_page(pool->page);
1014
1015 pool->page = NULL;
1016}
1017
1018static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1019 struct bnx2x_fastpath *fp, int last)
1020{
1021 int i;
1022
1023 if (fp->mode == TPA_MODE_DISABLED)
1024 return;
1025
1026 for (i = 0; i < last; i++)
1027 bnx2x_free_rx_sge(bp, fp, i);
1028
1029 bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
1030}
1031
1032static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1033{
1034 int i;
1035
1036 for (i = 1; i <= NUM_RX_RINGS; i++) {
1037 struct eth_rx_bd *rx_bd;
1038
1039 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
1040 rx_bd->addr_hi =
1041 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
1042 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
1043 rx_bd->addr_lo =
1044 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
1045 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
1046 }
1047}
1048
1049
1050
1051
1052static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
1053{
1054 struct bnx2x *bp = fp->bp;
1055 if (!CHIP_IS_E1x(bp)) {
1056
1057 if (IS_FCOE_FP(fp))
1058 return bp->cnic_base_cl_id + (bp->pf_num >> 1);
1059 return fp->cl_id;
1060 }
1061 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
1062}
1063
1064static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
1065 bnx2x_obj_type obj_type)
1066{
1067 struct bnx2x *bp = fp->bp;
1068
1069
1070 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
1071 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
1072 bnx2x_sp_mapping(bp, mac_rdata),
1073 BNX2X_FILTER_MAC_PENDING,
1074 &bp->sp_state, obj_type,
1075 &bp->macs_pool);
1076
1077 if (!CHIP_IS_E1x(bp))
1078 bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
1079 fp->cl_id, fp->cid, BP_FUNC(bp),
1080 bnx2x_sp(bp, vlan_rdata),
1081 bnx2x_sp_mapping(bp, vlan_rdata),
1082 BNX2X_FILTER_VLAN_PENDING,
1083 &bp->sp_state, obj_type,
1084 &bp->vlans_pool);
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
1096{
1097 u8 func_num = 0, i;
1098
1099
1100 if (CHIP_IS_E1(bp))
1101 return 1;
1102
1103
1104
1105
1106 if (CHIP_REV_IS_SLOW(bp)) {
1107 if (IS_MF(bp))
1108 func_num = 4;
1109 else
1110 func_num = 2;
1111 } else {
1112 for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
1113 u32 func_config =
1114 MF_CFG_RD(bp,
1115 func_mf_config[BP_PORT(bp) + 2 * i].
1116 config);
1117 func_num +=
1118 ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
1119 }
1120 }
1121
1122 WARN_ON(!func_num);
1123
1124 return func_num;
1125}
1126
1127static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
1128{
1129
1130 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
1131
1132
1133 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
1134 BP_FUNC(bp), BP_FUNC(bp),
1135 bnx2x_sp(bp, mcast_rdata),
1136 bnx2x_sp_mapping(bp, mcast_rdata),
1137 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
1138 BNX2X_OBJ_TYPE_RX);
1139
1140
1141 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
1142 bnx2x_get_path_func_num(bp));
1143
1144 bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
1145 bnx2x_get_path_func_num(bp));
1146
1147
1148 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
1149 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
1150 bnx2x_sp(bp, rss_rdata),
1151 bnx2x_sp_mapping(bp, rss_rdata),
1152 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
1153 BNX2X_OBJ_TYPE_RX);
1154
1155 bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
1156}
1157
1158static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
1159{
1160 if (CHIP_IS_E1x(fp->bp))
1161 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
1162 else
1163 return fp->cl_id;
1164}
1165
1166static inline void bnx2x_init_txdata(struct bnx2x *bp,
1167 struct bnx2x_fp_txdata *txdata, u32 cid,
1168 int txq_index, __le16 *tx_cons_sb,
1169 struct bnx2x_fastpath *fp)
1170{
1171 txdata->cid = cid;
1172 txdata->txq_index = txq_index;
1173 txdata->tx_cons_sb = tx_cons_sb;
1174 txdata->parent_fp = fp;
1175 txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size;
1176
1177 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
1178 txdata->cid, txdata->txq_index);
1179}
1180
1181static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1182{
1183 return bp->cnic_base_cl_id + cl_idx +
1184 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
1185}
1186
1187static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
1188{
1189
1190 return bp->base_fw_ndsb;
1191}
1192
1193static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
1194{
1195 return bp->igu_base_sb;
1196}
1197
1198static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
1199 struct bnx2x_fp_txdata *txdata)
1200{
1201 int cnt = 1000;
1202
1203 while (bnx2x_has_tx_work_unload(txdata)) {
1204 if (!cnt) {
1205 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
1206 txdata->txq_index, txdata->tx_pkt_prod,
1207 txdata->tx_pkt_cons);
1208#ifdef BNX2X_STOP_ON_ERROR
1209 bnx2x_panic();
1210 return -EBUSY;
1211#else
1212 break;
1213#endif
1214 }
1215 cnt--;
1216 usleep_range(1000, 2000);
1217 }
1218
1219 return 0;
1220}
1221
1222int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1223
1224static inline void __storm_memset_struct(struct bnx2x *bp,
1225 u32 addr, size_t size, u32 *data)
1226{
1227 int i;
1228 for (i = 0; i < size/4; i++)
1229 REG_WR(bp, addr + (i * 4), data[i]);
1230}
1231
1232
1233
1234
1235
1236
1237
1238static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
1239{
1240 int tout = 5000;
1241
1242 while (tout--) {
1243 smp_mb();
1244 netif_addr_lock_bh(bp->dev);
1245 if (!(bp->sp_state & mask)) {
1246 netif_addr_unlock_bh(bp->dev);
1247 return true;
1248 }
1249 netif_addr_unlock_bh(bp->dev);
1250
1251 usleep_range(1000, 2000);
1252 }
1253
1254 smp_mb();
1255
1256 netif_addr_lock_bh(bp->dev);
1257 if (bp->sp_state & mask) {
1258 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
1259 bp->sp_state, mask);
1260 netif_addr_unlock_bh(bp->dev);
1261 return false;
1262 }
1263 netif_addr_unlock_bh(bp->dev);
1264
1265 return true;
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
1276 u32 cid);
1277
1278void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
1279 u8 sb_index, u8 disable, u16 usec);
1280void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1281void bnx2x_release_phy_lock(struct bnx2x *bp);
1282
1283
1284
1285
1286
1287
1288
1289
1290static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1291{
1292 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1293 FUNC_MF_CFG_MAX_BW_SHIFT;
1294 if (!max_cfg) {
1295 DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL,
1296 "Max BW configured to 0 - using 100 instead\n");
1297 max_cfg = 100;
1298 }
1299 return max_cfg;
1300}
1301
1302
1303static inline bool bnx2x_mtu_allows_gro(int mtu)
1304{
1305
1306 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
1307
1308
1309
1310
1311
1312 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1313}
1314
1315
1316
1317
1318
1319
1320
1321void bnx2x_get_iscsi_info(struct bnx2x *bp);
1322
1323
1324
1325
1326
1327
1328
1329static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
1330{
1331 int func;
1332 int vn;
1333
1334
1335 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
1336 if (vn == BP_VN(bp))
1337 continue;
1338
1339 func = func_by_vn(bp, vn);
1340 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1341 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1342 }
1343}
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
1354{
1355 if (SHMEM2_HAS(bp, drv_flags)) {
1356 u32 drv_flags;
1357 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
1358 drv_flags = SHMEM2_RD(bp, drv_flags);
1359
1360 if (set)
1361 SET_FLAGS(drv_flags, flags);
1362 else
1363 RESET_FLAGS(drv_flags, flags);
1364
1365 SHMEM2_WR(bp, drv_flags, drv_flags);
1366 DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags);
1367 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
1368 }
1369}
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
1382
1383int bnx2x_drain_tx_queues(struct bnx2x *bp);
1384void bnx2x_squeeze_objects(struct bnx2x *bp);
1385
1386void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
1387 u32 verbose);
1388
1389
1390
1391
1392
1393
1394
1395void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
1406 int buf_size);
1407
1408#endif
1409