1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/module.h>
20#include <linux/crc32.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/crc32c.h>
24#include "bnx2x.h"
25#include "bnx2x_cmn.h"
26#include "bnx2x_sp.h"
27
28#define BNX2X_MAX_EMUL_MULTI 16
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
44 struct bnx2x_exe_queue_obj *o,
45 int exe_len,
46 union bnx2x_qable_obj *owner,
47 exe_q_validate validate,
48 exe_q_optimize optimize,
49 exe_q_execute exec,
50 exe_q_get get)
51{
52 memset(o, 0, sizeof(*o));
53
54 INIT_LIST_HEAD(&o->exe_queue);
55 INIT_LIST_HEAD(&o->pending_comp);
56
57 spin_lock_init(&o->lock);
58
59 o->exe_chunk_len = exe_len;
60 o->owner = owner;
61
62
63 o->validate = validate;
64 o->optimize = optimize;
65 o->execute = exec;
66 o->get = get;
67
68 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
69 "length of %d\n", exe_len);
70}
71
72static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
73 struct bnx2x_exeq_elem *elem)
74{
75 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
76 kfree(elem);
77}
78
79static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
80{
81 struct bnx2x_exeq_elem *elem;
82 int cnt = 0;
83
84 spin_lock_bh(&o->lock);
85
86 list_for_each_entry(elem, &o->exe_queue, link)
87 cnt++;
88
89 spin_unlock_bh(&o->lock);
90
91 return cnt;
92}
93
94
95
96
97
98
99
100
101
102
103
104static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
105 struct bnx2x_exe_queue_obj *o,
106 struct bnx2x_exeq_elem *elem,
107 bool restore)
108{
109 int rc;
110
111 spin_lock_bh(&o->lock);
112
113 if (!restore) {
114
115 rc = o->optimize(bp, o->owner, elem);
116 if (rc)
117 goto free_and_exit;
118
119
120 rc = o->validate(bp, o->owner, elem);
121 if (rc) {
122 BNX2X_ERR("Preamble failed: %d\n", rc);
123 goto free_and_exit;
124 }
125 }
126
127
128 list_add_tail(&elem->link, &o->exe_queue);
129
130 spin_unlock_bh(&o->lock);
131
132 return 0;
133
134free_and_exit:
135 bnx2x_exe_queue_free_elem(bp, elem);
136
137 spin_unlock_bh(&o->lock);
138
139 return rc;
140
141}
142
143static inline void __bnx2x_exe_queue_reset_pending(
144 struct bnx2x *bp,
145 struct bnx2x_exe_queue_obj *o)
146{
147 struct bnx2x_exeq_elem *elem;
148
149 while (!list_empty(&o->pending_comp)) {
150 elem = list_first_entry(&o->pending_comp,
151 struct bnx2x_exeq_elem, link);
152
153 list_del(&elem->link);
154 bnx2x_exe_queue_free_elem(bp, elem);
155 }
156}
157
158static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
159 struct bnx2x_exe_queue_obj *o)
160{
161
162 spin_lock_bh(&o->lock);
163
164 __bnx2x_exe_queue_reset_pending(bp, o);
165
166 spin_unlock_bh(&o->lock);
167
168}
169
170
171
172
173
174
175
176
177
178
179static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
180 struct bnx2x_exe_queue_obj *o,
181 unsigned long *ramrod_flags)
182{
183 struct bnx2x_exeq_elem *elem, spacer;
184 int cur_len = 0, rc;
185
186 memset(&spacer, 0, sizeof(spacer));
187
188 spin_lock_bh(&o->lock);
189
190
191
192
193
194
195
196
197 if (!list_empty(&o->pending_comp)) {
198 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
199 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
200 "resetting pending_comp\n");
201 __bnx2x_exe_queue_reset_pending(bp, o);
202 } else {
203 spin_unlock_bh(&o->lock);
204 return 1;
205 }
206 }
207
208
209
210
211
212 while (!list_empty(&o->exe_queue)) {
213 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
214 link);
215 WARN_ON(!elem->cmd_len);
216
217 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
218 cur_len += elem->cmd_len;
219
220
221
222
223
224 list_add_tail(&spacer.link, &o->pending_comp);
225 mb();
226 list_del(&elem->link);
227 list_add_tail(&elem->link, &o->pending_comp);
228 list_del(&spacer.link);
229 } else
230 break;
231 }
232
233
234 if (!cur_len) {
235 spin_unlock_bh(&o->lock);
236 return 0;
237 }
238
239 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
240 if (rc < 0)
241
242
243
244
245 list_splice_init(&o->pending_comp, &o->exe_queue);
246 else if (!rc)
247
248
249
250
251 __bnx2x_exe_queue_reset_pending(bp, o);
252
253 spin_unlock_bh(&o->lock);
254 return rc;
255}
256
257static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
258{
259 bool empty = list_empty(&o->exe_queue);
260
261
262 mb();
263
264 return empty && list_empty(&o->pending_comp);
265}
266
267static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
268 struct bnx2x *bp)
269{
270 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
271 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
272}
273
274
275static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
276{
277 return !!test_bit(o->state, o->pstate);
278}
279
280static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
281{
282 smp_mb__before_clear_bit();
283 clear_bit(o->state, o->pstate);
284 smp_mb__after_clear_bit();
285}
286
287static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
288{
289 smp_mb__before_clear_bit();
290 set_bit(o->state, o->pstate);
291 smp_mb__after_clear_bit();
292}
293
294
295
296
297
298
299
300
301
302static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
303 unsigned long *pstate)
304{
305
306 int cnt = 5000;
307
308
309 if (CHIP_REV_IS_EMUL(bp))
310 cnt *= 20;
311
312 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
313
314 might_sleep();
315 while (cnt--) {
316 if (!test_bit(state, pstate)) {
317#ifdef BNX2X_STOP_ON_ERROR
318 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
319#endif
320 return 0;
321 }
322
323 usleep_range(1000, 1000);
324
325 if (bp->panic)
326 return -EIO;
327 }
328
329
330 BNX2X_ERR("timeout waiting for state %d\n", state);
331#ifdef BNX2X_STOP_ON_ERROR
332 bnx2x_panic();
333#endif
334
335 return -EBUSY;
336}
337
338static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
339{
340 return bnx2x_state_wait(bp, raw->state, raw->pstate);
341}
342
343
344
345static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
346{
347 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
348
349 WARN_ON(!mp);
350
351 return mp->get_entry(mp, offset);
352}
353
354static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
355{
356 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
357
358 WARN_ON(!mp);
359
360 return mp->get(mp, 1);
361}
362
363static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
364{
365 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
366
367 WARN_ON(!vp);
368
369 return vp->get_entry(vp, offset);
370}
371
372static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
373{
374 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
375
376 WARN_ON(!vp);
377
378 return vp->get(vp, 1);
379}
380
381static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
382{
383 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
384 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
385
386 if (!mp->get(mp, 1))
387 return false;
388
389 if (!vp->get(vp, 1)) {
390 mp->put(mp, 1);
391 return false;
392 }
393
394 return true;
395}
396
397static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
398{
399 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
400
401 return mp->put_entry(mp, offset);
402}
403
404static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
405{
406 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
407
408 return mp->put(mp, 1);
409}
410
411static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
412{
413 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
414
415 return vp->put_entry(vp, offset);
416}
417
418static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
419{
420 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
421
422 return vp->put(vp, 1);
423}
424
425static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
426{
427 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
428 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
429
430 if (!mp->put(mp, 1))
431 return false;
432
433 if (!vp->put(vp, 1)) {
434 mp->get(mp, 1);
435 return false;
436 }
437
438 return true;
439}
440
441
442static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
443 union bnx2x_classification_ramrod_data *data)
444{
445 struct bnx2x_vlan_mac_registry_elem *pos;
446
447 if (!is_valid_ether_addr(data->mac.mac))
448 return -EINVAL;
449
450
451 list_for_each_entry(pos, &o->head, link)
452 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
453 return -EEXIST;
454
455 return 0;
456}
457
458static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
459 union bnx2x_classification_ramrod_data *data)
460{
461 struct bnx2x_vlan_mac_registry_elem *pos;
462
463 list_for_each_entry(pos, &o->head, link)
464 if (data->vlan.vlan == pos->u.vlan.vlan)
465 return -EEXIST;
466
467 return 0;
468}
469
470static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
471 union bnx2x_classification_ramrod_data *data)
472{
473 struct bnx2x_vlan_mac_registry_elem *pos;
474
475 list_for_each_entry(pos, &o->head, link)
476 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
477 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
478 ETH_ALEN)))
479 return -EEXIST;
480
481 return 0;
482}
483
484
485
486static struct bnx2x_vlan_mac_registry_elem *
487 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
488 union bnx2x_classification_ramrod_data *data)
489{
490 struct bnx2x_vlan_mac_registry_elem *pos;
491
492 list_for_each_entry(pos, &o->head, link)
493 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
494 return pos;
495
496 return NULL;
497}
498
499static struct bnx2x_vlan_mac_registry_elem *
500 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
501 union bnx2x_classification_ramrod_data *data)
502{
503 struct bnx2x_vlan_mac_registry_elem *pos;
504
505 list_for_each_entry(pos, &o->head, link)
506 if (data->vlan.vlan == pos->u.vlan.vlan)
507 return pos;
508
509 return NULL;
510}
511
512static struct bnx2x_vlan_mac_registry_elem *
513 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
514 union bnx2x_classification_ramrod_data *data)
515{
516 struct bnx2x_vlan_mac_registry_elem *pos;
517
518 list_for_each_entry(pos, &o->head, link)
519 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
520 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
521 ETH_ALEN)))
522 return pos;
523
524 return NULL;
525}
526
527
528static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
529 struct bnx2x_vlan_mac_obj *dst_o,
530 union bnx2x_classification_ramrod_data *data)
531{
532 struct bnx2x_vlan_mac_registry_elem *pos;
533 int rc;
534
535
536
537
538 pos = src_o->check_del(src_o, data);
539
540
541 rc = dst_o->check_add(dst_o, data);
542
543
544
545
546 if (rc || !pos)
547 return false;
548
549 return true;
550}
551
552static bool bnx2x_check_move_always_err(
553 struct bnx2x_vlan_mac_obj *src_o,
554 struct bnx2x_vlan_mac_obj *dst_o,
555 union bnx2x_classification_ramrod_data *data)
556{
557 return false;
558}
559
560
561static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
562{
563 struct bnx2x_raw_obj *raw = &o->raw;
564 u8 rx_tx_flag = 0;
565
566 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
567 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
568 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
569
570 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
571 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
572 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
573
574 return rx_tx_flag;
575}
576
577
578enum {
579 LLH_CAM_ISCSI_ETH_LINE = 0,
580 LLH_CAM_ETH_LINE,
581 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
582};
583
584static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
585 bool add, unsigned char *dev_addr, int index)
586{
587 u32 wb_data[2];
588 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
589 NIG_REG_LLH0_FUNC_MEM;
590
591 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
592 return;
593
594 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
595 (add ? "ADD" : "DELETE"), index);
596
597 if (add) {
598
599 reg_offset += 8*index;
600
601 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
602 (dev_addr[4] << 8) | dev_addr[5]);
603 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
604
605 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
606 }
607
608 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
609 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
610}
611
612
613
614
615
616
617
618
619
620
621
622static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
623 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
624 struct eth_classify_cmd_header *hdr)
625{
626 struct bnx2x_raw_obj *raw = &o->raw;
627
628 hdr->client_id = raw->cl_id;
629 hdr->func_id = raw->func_id;
630
631
632 hdr->cmd_general_data |=
633 bnx2x_vlan_mac_get_rx_tx_flag(o);
634
635 if (add)
636 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
637
638 hdr->cmd_general_data |=
639 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
640}
641
642
643
644
645
646
647
648
649
650
651
652
653static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
654 struct eth_classify_header *hdr, int rule_cnt)
655{
656 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
657 hdr->rule_cnt = (u8)rule_cnt;
658}
659
660
661
662static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
663 struct bnx2x_vlan_mac_obj *o,
664 struct bnx2x_exeq_elem *elem, int rule_idx,
665 int cam_offset)
666{
667 struct bnx2x_raw_obj *raw = &o->raw;
668 struct eth_classify_rules_ramrod_data *data =
669 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
670 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
671 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
672 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
673 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
674 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694 if (cmd != BNX2X_VLAN_MAC_MOVE) {
695 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
696 bnx2x_set_mac_in_nig(bp, add, mac,
697 LLH_CAM_ISCSI_ETH_LINE);
698 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
699 bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
700 }
701
702
703 if (rule_idx == 0)
704 memset(data, 0, sizeof(*data));
705
706
707 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
708 &rule_entry->mac.header);
709
710 DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
711 "Queue %d\n", (add ? "add" : "delete"),
712 BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
713
714
715 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
716 &rule_entry->mac.mac_mid,
717 &rule_entry->mac.mac_lsb, mac);
718
719
720 if (cmd == BNX2X_VLAN_MAC_MOVE) {
721 rule_entry++;
722 rule_cnt++;
723
724
725 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
726 elem->cmd_data.vlan_mac.target_obj,
727 true, CLASSIFY_RULE_OPCODE_MAC,
728 &rule_entry->mac.header);
729
730
731 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
732 &rule_entry->mac.mac_mid,
733 &rule_entry->mac.mac_lsb, mac);
734 }
735
736
737
738
739 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
740 rule_cnt);
741}
742
743
744
745
746
747
748
749
750
751
752
753
754static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
755 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
756 struct mac_configuration_hdr *hdr)
757{
758 struct bnx2x_raw_obj *r = &o->raw;
759
760 hdr->length = 1;
761 hdr->offset = (u8)cam_offset;
762 hdr->client_id = 0xff;
763 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
764}
765
766static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
767 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
768 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
769{
770 struct bnx2x_raw_obj *r = &o->raw;
771 u32 cl_bit_vec = (1 << r->cl_id);
772
773 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
774 cfg_entry->pf_id = r->func_id;
775 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
776
777 if (add) {
778 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
779 T_ETH_MAC_COMMAND_SET);
780 SET_FLAG(cfg_entry->flags,
781 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
782
783
784 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
785 &cfg_entry->middle_mac_addr,
786 &cfg_entry->lsb_mac_addr, mac);
787 } else
788 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
789 T_ETH_MAC_COMMAND_INVALIDATE);
790}
791
792static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
793 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
794 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
795{
796 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
797 struct bnx2x_raw_obj *raw = &o->raw;
798
799 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
800 &config->hdr);
801 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
802 cfg_entry);
803
804 DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
805 (add ? "setting" : "clearing"),
806 BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
807}
808
809
810
811
812
813
814
815
816
817
818static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
819 struct bnx2x_vlan_mac_obj *o,
820 struct bnx2x_exeq_elem *elem, int rule_idx,
821 int cam_offset)
822{
823 struct bnx2x_raw_obj *raw = &o->raw;
824 struct mac_configuration_cmd *config =
825 (struct mac_configuration_cmd *)(raw->rdata);
826
827
828
829
830 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
831 true : false;
832
833
834 memset(config, 0, sizeof(*config));
835
836 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
837 cam_offset, add,
838 elem->cmd_data.vlan_mac.u.mac.mac, 0,
839 ETH_VLAN_FILTER_ANY_VLAN, config);
840}
841
842static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
843 struct bnx2x_vlan_mac_obj *o,
844 struct bnx2x_exeq_elem *elem, int rule_idx,
845 int cam_offset)
846{
847 struct bnx2x_raw_obj *raw = &o->raw;
848 struct eth_classify_rules_ramrod_data *data =
849 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
850 int rule_cnt = rule_idx + 1;
851 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
852 int cmd = elem->cmd_data.vlan_mac.cmd;
853 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
854 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
855
856
857 if (rule_idx == 0)
858 memset(data, 0, sizeof(*data));
859
860
861 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
862 &rule_entry->vlan.header);
863
864 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
865 vlan);
866
867
868 rule_entry->vlan.vlan = cpu_to_le16(vlan);
869
870
871 if (cmd == BNX2X_VLAN_MAC_MOVE) {
872 rule_entry++;
873 rule_cnt++;
874
875
876 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
877 elem->cmd_data.vlan_mac.target_obj,
878 true, CLASSIFY_RULE_OPCODE_VLAN,
879 &rule_entry->vlan.header);
880
881
882 rule_entry->vlan.vlan = cpu_to_le16(vlan);
883 }
884
885
886
887
888 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
889 rule_cnt);
890}
891
892static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
893 struct bnx2x_vlan_mac_obj *o,
894 struct bnx2x_exeq_elem *elem,
895 int rule_idx, int cam_offset)
896{
897 struct bnx2x_raw_obj *raw = &o->raw;
898 struct eth_classify_rules_ramrod_data *data =
899 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
900 int rule_cnt = rule_idx + 1;
901 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
902 int cmd = elem->cmd_data.vlan_mac.cmd;
903 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
904 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
905 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
906
907
908
909 if (rule_idx == 0)
910 memset(data, 0, sizeof(*data));
911
912
913 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
914 &rule_entry->pair.header);
915
916
917 rule_entry->pair.vlan = cpu_to_le16(vlan);
918 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
919 &rule_entry->pair.mac_mid,
920 &rule_entry->pair.mac_lsb, mac);
921
922
923 if (cmd == BNX2X_VLAN_MAC_MOVE) {
924 rule_entry++;
925 rule_cnt++;
926
927
928 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
929 elem->cmd_data.vlan_mac.target_obj,
930 true, CLASSIFY_RULE_OPCODE_PAIR,
931 &rule_entry->pair.header);
932
933
934 rule_entry->pair.vlan = cpu_to_le16(vlan);
935 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
936 &rule_entry->pair.mac_mid,
937 &rule_entry->pair.mac_lsb, mac);
938 }
939
940
941
942
943 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
944 rule_cnt);
945}
946
947
948
949
950
951
952
953
954
955
956static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
957 struct bnx2x_vlan_mac_obj *o,
958 struct bnx2x_exeq_elem *elem,
959 int rule_idx, int cam_offset)
960{
961 struct bnx2x_raw_obj *raw = &o->raw;
962 struct mac_configuration_cmd *config =
963 (struct mac_configuration_cmd *)(raw->rdata);
964
965
966
967
968 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
969 true : false;
970
971
972 memset(config, 0, sizeof(*config));
973
974 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
975 cam_offset, add,
976 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
977 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
978 ETH_VLAN_FILTER_CLASSIFY, config);
979}
980
981#define list_next_entry(pos, member) \
982 list_entry((pos)->member.next, typeof(*(pos)), member)
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1004 struct bnx2x_vlan_mac_ramrod_params *p,
1005 struct bnx2x_vlan_mac_registry_elem **ppos)
1006{
1007 struct bnx2x_vlan_mac_registry_elem *pos;
1008 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1009
1010
1011 if (list_empty(&o->head)) {
1012 *ppos = NULL;
1013 return 0;
1014 }
1015
1016
1017 if (*ppos == NULL)
1018 *ppos = list_first_entry(&o->head,
1019 struct bnx2x_vlan_mac_registry_elem,
1020 link);
1021 else
1022 *ppos = list_next_entry(*ppos, link);
1023
1024 pos = *ppos;
1025
1026
1027 if (list_is_last(&pos->link, &o->head))
1028 *ppos = NULL;
1029
1030
1031 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1032
1033
1034 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1035
1036
1037 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1038
1039
1040 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1041
1042 return bnx2x_config_vlan_mac(bp, p);
1043}
1044
1045
1046
1047
1048
1049
1050static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1051 struct bnx2x_exe_queue_obj *o,
1052 struct bnx2x_exeq_elem *elem)
1053{
1054 struct bnx2x_exeq_elem *pos;
1055 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1056
1057
1058 list_for_each_entry(pos, &o->exe_queue, link)
1059 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1060 sizeof(*data)) &&
1061 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1062 return pos;
1063
1064 return NULL;
1065}
1066
1067static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1068 struct bnx2x_exe_queue_obj *o,
1069 struct bnx2x_exeq_elem *elem)
1070{
1071 struct bnx2x_exeq_elem *pos;
1072 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1073
1074
1075 list_for_each_entry(pos, &o->exe_queue, link)
1076 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1077 sizeof(*data)) &&
1078 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1079 return pos;
1080
1081 return NULL;
1082}
1083
1084static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1085 struct bnx2x_exe_queue_obj *o,
1086 struct bnx2x_exeq_elem *elem)
1087{
1088 struct bnx2x_exeq_elem *pos;
1089 struct bnx2x_vlan_mac_ramrod_data *data =
1090 &elem->cmd_data.vlan_mac.u.vlan_mac;
1091
1092
1093 list_for_each_entry(pos, &o->exe_queue, link)
1094 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1095 sizeof(*data)) &&
1096 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1097 return pos;
1098
1099 return NULL;
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1116 union bnx2x_qable_obj *qo,
1117 struct bnx2x_exeq_elem *elem)
1118{
1119 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1120 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1121 int rc;
1122
1123
1124 rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1125 if (rc) {
1126 DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1127 "current registry state\n");
1128 return rc;
1129 }
1130
1131
1132
1133
1134
1135 if (exeq->get(exeq, elem)) {
1136 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1137 return -EEXIST;
1138 }
1139
1140
1141
1142
1143
1144
1145
1146 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1147 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1148 o->get_credit(o)))
1149 return -EINVAL;
1150
1151 return 0;
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1167 union bnx2x_qable_obj *qo,
1168 struct bnx2x_exeq_elem *elem)
1169{
1170 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1171 struct bnx2x_vlan_mac_registry_elem *pos;
1172 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1173 struct bnx2x_exeq_elem query_elem;
1174
1175
1176
1177
1178 pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1179 if (!pos) {
1180 DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1181 "current registry state\n");
1182 return -EEXIST;
1183 }
1184
1185
1186
1187
1188
1189 memcpy(&query_elem, elem, sizeof(query_elem));
1190
1191
1192 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1193 if (exeq->get(exeq, &query_elem)) {
1194 BNX2X_ERR("There is a pending MOVE command already\n");
1195 return -EINVAL;
1196 }
1197
1198
1199 if (exeq->get(exeq, elem)) {
1200 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1201 return -EEXIST;
1202 }
1203
1204
1205 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1206 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1207 o->put_credit(o))) {
1208 BNX2X_ERR("Failed to return a credit\n");
1209 return -EINVAL;
1210 }
1211
1212 return 0;
1213}
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1228 union bnx2x_qable_obj *qo,
1229 struct bnx2x_exeq_elem *elem)
1230{
1231 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1232 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1233 struct bnx2x_exeq_elem query_elem;
1234 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1235 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1236
1237
1238
1239
1240
1241 if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1242 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1243 "current registry state\n");
1244 return -EINVAL;
1245 }
1246
1247
1248
1249
1250
1251
1252 memcpy(&query_elem, elem, sizeof(query_elem));
1253
1254
1255 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1256 if (src_exeq->get(src_exeq, &query_elem)) {
1257 BNX2X_ERR("There is a pending DEL command on the source "
1258 "queue already\n");
1259 return -EINVAL;
1260 }
1261
1262
1263 if (src_exeq->get(src_exeq, elem)) {
1264 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1265 return -EEXIST;
1266 }
1267
1268
1269 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1270 if (dest_exeq->get(dest_exeq, &query_elem)) {
1271 BNX2X_ERR("There is a pending ADD command on the "
1272 "destination queue already\n");
1273 return -EINVAL;
1274 }
1275
1276
1277 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1278 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1279 dest_o->get_credit(dest_o)))
1280 return -EINVAL;
1281
1282 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1283 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1284 src_o->put_credit(src_o))) {
1285
1286 dest_o->put_credit(dest_o);
1287 return -EINVAL;
1288 }
1289
1290 return 0;
1291}
1292
1293static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1294 union bnx2x_qable_obj *qo,
1295 struct bnx2x_exeq_elem *elem)
1296{
1297 switch (elem->cmd_data.vlan_mac.cmd) {
1298 case BNX2X_VLAN_MAC_ADD:
1299 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1300 case BNX2X_VLAN_MAC_DEL:
1301 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1302 case BNX2X_VLAN_MAC_MOVE:
1303 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1304 default:
1305 return -EINVAL;
1306 }
1307}
1308
1309
1310
1311
1312
1313
1314
1315
1316static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1317 struct bnx2x_vlan_mac_obj *o)
1318{
1319 int cnt = 5000, rc;
1320 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1321 struct bnx2x_raw_obj *raw = &o->raw;
1322
1323 while (cnt--) {
1324
1325 rc = raw->wait_comp(bp, raw);
1326 if (rc)
1327 return rc;
1328
1329
1330 if (!bnx2x_exe_queue_empty(exeq))
1331 usleep_range(1000, 1000);
1332 else
1333 return 0;
1334 }
1335
1336 return -EBUSY;
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1349 struct bnx2x_vlan_mac_obj *o,
1350 union event_ring_elem *cqe,
1351 unsigned long *ramrod_flags)
1352{
1353 struct bnx2x_raw_obj *r = &o->raw;
1354 int rc;
1355
1356
1357 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1358
1359
1360 r->clear_pending(r);
1361
1362
1363 if (cqe->message.error)
1364 return -EINVAL;
1365
1366
1367 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1368 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1369 if (rc < 0)
1370 return rc;
1371 }
1372
1373
1374 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1375 return 1;
1376
1377 return 0;
1378}
1379
1380
1381
1382
1383
1384
1385
1386
1387static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1388 union bnx2x_qable_obj *qo,
1389 struct bnx2x_exeq_elem *elem)
1390{
1391 struct bnx2x_exeq_elem query, *pos;
1392 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1393 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1394
1395 memcpy(&query, elem, sizeof(query));
1396
1397 switch (elem->cmd_data.vlan_mac.cmd) {
1398 case BNX2X_VLAN_MAC_ADD:
1399 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1400 break;
1401 case BNX2X_VLAN_MAC_DEL:
1402 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1403 break;
1404 default:
1405
1406 return 0;
1407 }
1408
1409
1410 pos = exeq->get(exeq, &query);
1411 if (pos) {
1412
1413
1414 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1415 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1416 if ((query.cmd_data.vlan_mac.cmd ==
1417 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1418 BNX2X_ERR("Failed to return the credit for the "
1419 "optimized ADD command\n");
1420 return -EINVAL;
1421 } else if (!o->get_credit(o)) {
1422 BNX2X_ERR("Failed to recover the credit from "
1423 "the optimized DEL command\n");
1424 return -EINVAL;
1425 }
1426 }
1427
1428 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1429 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1430 "ADD" : "DEL");
1431
1432 list_del(&pos->link);
1433 bnx2x_exe_queue_free_elem(bp, pos);
1434 return 1;
1435 }
1436
1437 return 0;
1438}
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451static inline int bnx2x_vlan_mac_get_registry_elem(
1452 struct bnx2x *bp,
1453 struct bnx2x_vlan_mac_obj *o,
1454 struct bnx2x_exeq_elem *elem,
1455 bool restore,
1456 struct bnx2x_vlan_mac_registry_elem **re)
1457{
1458 int cmd = elem->cmd_data.vlan_mac.cmd;
1459 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1460
1461
1462 if (!restore &&
1463 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1464 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1465 if (!reg_elem)
1466 return -ENOMEM;
1467
1468
1469 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1470
1471
1472
1473
1474 WARN_ON(1);
1475 kfree(reg_elem);
1476 return -EINVAL;
1477 }
1478
1479 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1480
1481
1482 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u,
1483 sizeof(reg_elem->u));
1484
1485
1486 reg_elem->vlan_mac_flags =
1487 elem->cmd_data.vlan_mac.vlan_mac_flags;
1488 } else
1489 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1490
1491 *re = reg_elem;
1492 return 0;
1493}
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1506 union bnx2x_qable_obj *qo,
1507 struct list_head *exe_chunk,
1508 unsigned long *ramrod_flags)
1509{
1510 struct bnx2x_exeq_elem *elem;
1511 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1512 struct bnx2x_raw_obj *r = &o->raw;
1513 int rc, idx = 0;
1514 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1515 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1516 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1517 int cmd;
1518
1519
1520
1521
1522
1523 if (!drv_only) {
1524 WARN_ON(r->check_pending(r));
1525
1526
1527 r->set_pending(r);
1528
1529
1530 list_for_each_entry(elem, exe_chunk, link) {
1531 cmd = elem->cmd_data.vlan_mac.cmd;
1532
1533
1534
1535
1536 if (cmd == BNX2X_VLAN_MAC_MOVE)
1537 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1538 else
1539 cam_obj = o;
1540
1541 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1542 elem, restore,
1543 ®_elem);
1544 if (rc)
1545 goto error_exit;
1546
1547 WARN_ON(!reg_elem);
1548
1549
1550 if (!restore &&
1551 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1552 (cmd == BNX2X_VLAN_MAC_MOVE)))
1553 list_add(®_elem->link, &cam_obj->head);
1554
1555
1556 o->set_one_rule(bp, o, elem, idx,
1557 reg_elem->cam_offset);
1558
1559
1560 if (cmd == BNX2X_VLAN_MAC_MOVE)
1561 idx += 2;
1562 else
1563 idx++;
1564 }
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1575 U64_HI(r->rdata_mapping),
1576 U64_LO(r->rdata_mapping),
1577 ETH_CONNECTION_TYPE);
1578 if (rc)
1579 goto error_exit;
1580 }
1581
1582
1583 list_for_each_entry(elem, exe_chunk, link) {
1584 cmd = elem->cmd_data.vlan_mac.cmd;
1585 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1586 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1587 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1588
1589 WARN_ON(!reg_elem);
1590
1591 o->put_cam_offset(o, reg_elem->cam_offset);
1592 list_del(®_elem->link);
1593 kfree(reg_elem);
1594 }
1595 }
1596
1597 if (!drv_only)
1598 return 1;
1599 else
1600 return 0;
1601
1602error_exit:
1603 r->clear_pending(r);
1604
1605
1606 list_for_each_entry(elem, exe_chunk, link) {
1607 cmd = elem->cmd_data.vlan_mac.cmd;
1608
1609 if (cmd == BNX2X_VLAN_MAC_MOVE)
1610 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1611 else
1612 cam_obj = o;
1613
1614
1615 if (!restore &&
1616 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1617 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1618 reg_elem = o->check_del(cam_obj,
1619 &elem->cmd_data.vlan_mac.u);
1620 if (reg_elem) {
1621 list_del(®_elem->link);
1622 kfree(reg_elem);
1623 }
1624 }
1625 }
1626
1627 return rc;
1628}
1629
1630static inline int bnx2x_vlan_mac_push_new_cmd(
1631 struct bnx2x *bp,
1632 struct bnx2x_vlan_mac_ramrod_params *p)
1633{
1634 struct bnx2x_exeq_elem *elem;
1635 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1636 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1637
1638
1639 elem = bnx2x_exe_queue_alloc_elem(bp);
1640 if (!elem)
1641 return -ENOMEM;
1642
1643
1644 switch (p->user_req.cmd) {
1645 case BNX2X_VLAN_MAC_MOVE:
1646 elem->cmd_len = 2;
1647 break;
1648 default:
1649 elem->cmd_len = 1;
1650 }
1651
1652
1653 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1654
1655
1656 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1657}
1658
1659
1660
1661
1662
1663
1664
1665
1666int bnx2x_config_vlan_mac(
1667 struct bnx2x *bp,
1668 struct bnx2x_vlan_mac_ramrod_params *p)
1669{
1670 int rc = 0;
1671 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1672 unsigned long *ramrod_flags = &p->ramrod_flags;
1673 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1674 struct bnx2x_raw_obj *raw = &o->raw;
1675
1676
1677
1678
1679 if (!cont) {
1680 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1681 if (rc)
1682 return rc;
1683 }
1684
1685
1686
1687
1688
1689 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1690 rc = 1;
1691
1692 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1693 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
1694 "clearing a pending bit.\n");
1695 raw->clear_pending(raw);
1696 }
1697
1698
1699 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1700 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1701 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1702 if (rc < 0)
1703 return rc;
1704 }
1705
1706
1707
1708
1709
1710 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1711
1712
1713
1714
1715 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1716
1717 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1718 max_iterations--) {
1719
1720
1721 rc = raw->wait_comp(bp, raw);
1722 if (rc)
1723 return rc;
1724
1725
1726 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1727 ramrod_flags);
1728 if (rc < 0)
1729 return rc;
1730 }
1731
1732 return 0;
1733 }
1734
1735 return rc;
1736}
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1754 struct bnx2x_vlan_mac_obj *o,
1755 unsigned long *vlan_mac_flags,
1756 unsigned long *ramrod_flags)
1757{
1758 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1759 int rc = 0;
1760 struct bnx2x_vlan_mac_ramrod_params p;
1761 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1762 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1763
1764
1765
1766 spin_lock_bh(&exeq->lock);
1767
1768 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1769 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1770 *vlan_mac_flags)
1771 list_del(&exeq_pos->link);
1772 }
1773
1774 spin_unlock_bh(&exeq->lock);
1775
1776
1777 memset(&p, 0, sizeof(p));
1778 p.vlan_mac_obj = o;
1779 p.ramrod_flags = *ramrod_flags;
1780 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1781
1782
1783
1784
1785
1786 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1787 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1788 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1789
1790 list_for_each_entry(pos, &o->head, link) {
1791 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1792 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1793 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1794 rc = bnx2x_config_vlan_mac(bp, &p);
1795 if (rc < 0) {
1796 BNX2X_ERR("Failed to add a new DEL command\n");
1797 return rc;
1798 }
1799 }
1800 }
1801
1802 p.ramrod_flags = *ramrod_flags;
1803 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1804
1805 return bnx2x_config_vlan_mac(bp, &p);
1806}
1807
1808static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1809 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1810 unsigned long *pstate, bnx2x_obj_type type)
1811{
1812 raw->func_id = func_id;
1813 raw->cid = cid;
1814 raw->cl_id = cl_id;
1815 raw->rdata = rdata;
1816 raw->rdata_mapping = rdata_mapping;
1817 raw->state = state;
1818 raw->pstate = pstate;
1819 raw->obj_type = type;
1820 raw->check_pending = bnx2x_raw_check_pending;
1821 raw->clear_pending = bnx2x_raw_clear_pending;
1822 raw->set_pending = bnx2x_raw_set_pending;
1823 raw->wait_comp = bnx2x_raw_wait;
1824}
1825
1826static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1827 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1828 int state, unsigned long *pstate, bnx2x_obj_type type,
1829 struct bnx2x_credit_pool_obj *macs_pool,
1830 struct bnx2x_credit_pool_obj *vlans_pool)
1831{
1832 INIT_LIST_HEAD(&o->head);
1833
1834 o->macs_pool = macs_pool;
1835 o->vlans_pool = vlans_pool;
1836
1837 o->delete_all = bnx2x_vlan_mac_del_all;
1838 o->restore = bnx2x_vlan_mac_restore;
1839 o->complete = bnx2x_complete_vlan_mac;
1840 o->wait = bnx2x_wait_vlan_mac;
1841
1842 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1843 state, pstate, type);
1844}
1845
1846
1847void bnx2x_init_mac_obj(struct bnx2x *bp,
1848 struct bnx2x_vlan_mac_obj *mac_obj,
1849 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1850 dma_addr_t rdata_mapping, int state,
1851 unsigned long *pstate, bnx2x_obj_type type,
1852 struct bnx2x_credit_pool_obj *macs_pool)
1853{
1854 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1855
1856 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1857 rdata_mapping, state, pstate, type,
1858 macs_pool, NULL);
1859
1860
1861 mac_obj->get_credit = bnx2x_get_credit_mac;
1862 mac_obj->put_credit = bnx2x_put_credit_mac;
1863 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1864 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1865
1866 if (CHIP_IS_E1x(bp)) {
1867 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1868 mac_obj->check_del = bnx2x_check_mac_del;
1869 mac_obj->check_add = bnx2x_check_mac_add;
1870 mac_obj->check_move = bnx2x_check_move_always_err;
1871 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1872
1873
1874 bnx2x_exe_queue_init(bp,
1875 &mac_obj->exe_queue, 1, qable_obj,
1876 bnx2x_validate_vlan_mac,
1877 bnx2x_optimize_vlan_mac,
1878 bnx2x_execute_vlan_mac,
1879 bnx2x_exeq_get_mac);
1880 } else {
1881 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1882 mac_obj->check_del = bnx2x_check_mac_del;
1883 mac_obj->check_add = bnx2x_check_mac_add;
1884 mac_obj->check_move = bnx2x_check_move;
1885 mac_obj->ramrod_cmd =
1886 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1887
1888
1889 bnx2x_exe_queue_init(bp,
1890 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1891 qable_obj, bnx2x_validate_vlan_mac,
1892 bnx2x_optimize_vlan_mac,
1893 bnx2x_execute_vlan_mac,
1894 bnx2x_exeq_get_mac);
1895 }
1896}
1897
1898void bnx2x_init_vlan_obj(struct bnx2x *bp,
1899 struct bnx2x_vlan_mac_obj *vlan_obj,
1900 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1901 dma_addr_t rdata_mapping, int state,
1902 unsigned long *pstate, bnx2x_obj_type type,
1903 struct bnx2x_credit_pool_obj *vlans_pool)
1904{
1905 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1906
1907 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1908 rdata_mapping, state, pstate, type, NULL,
1909 vlans_pool);
1910
1911 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1912 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1913 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1914 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1915
1916 if (CHIP_IS_E1x(bp)) {
1917 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1918 BUG();
1919 } else {
1920 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
1921 vlan_obj->check_del = bnx2x_check_vlan_del;
1922 vlan_obj->check_add = bnx2x_check_vlan_add;
1923 vlan_obj->check_move = bnx2x_check_move;
1924 vlan_obj->ramrod_cmd =
1925 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1926
1927
1928 bnx2x_exe_queue_init(bp,
1929 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1930 qable_obj, bnx2x_validate_vlan_mac,
1931 bnx2x_optimize_vlan_mac,
1932 bnx2x_execute_vlan_mac,
1933 bnx2x_exeq_get_vlan);
1934 }
1935}
1936
1937void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1938 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1939 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1940 dma_addr_t rdata_mapping, int state,
1941 unsigned long *pstate, bnx2x_obj_type type,
1942 struct bnx2x_credit_pool_obj *macs_pool,
1943 struct bnx2x_credit_pool_obj *vlans_pool)
1944{
1945 union bnx2x_qable_obj *qable_obj =
1946 (union bnx2x_qable_obj *)vlan_mac_obj;
1947
1948 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
1949 rdata_mapping, state, pstate, type,
1950 macs_pool, vlans_pool);
1951
1952
1953 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
1954 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
1955
1956
1957
1958
1959
1960 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1961 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1962
1963 if (CHIP_IS_E1(bp)) {
1964 BNX2X_ERR("Do not support chips others than E2\n");
1965 BUG();
1966 } else if (CHIP_IS_E1H(bp)) {
1967 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
1968 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1969 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1970 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
1971 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1972
1973
1974 bnx2x_exe_queue_init(bp,
1975 &vlan_mac_obj->exe_queue, 1, qable_obj,
1976 bnx2x_validate_vlan_mac,
1977 bnx2x_optimize_vlan_mac,
1978 bnx2x_execute_vlan_mac,
1979 bnx2x_exeq_get_vlan_mac);
1980 } else {
1981 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
1982 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1983 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1984 vlan_mac_obj->check_move = bnx2x_check_move;
1985 vlan_mac_obj->ramrod_cmd =
1986 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1987
1988
1989 bnx2x_exe_queue_init(bp,
1990 &vlan_mac_obj->exe_queue,
1991 CLASSIFY_RULES_COUNT,
1992 qable_obj, bnx2x_validate_vlan_mac,
1993 bnx2x_optimize_vlan_mac,
1994 bnx2x_execute_vlan_mac,
1995 bnx2x_exeq_get_vlan_mac);
1996 }
1997
1998}
1999
2000
2001static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2002 struct tstorm_eth_mac_filter_config *mac_filters,
2003 u16 pf_id)
2004{
2005 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2006
2007 u32 addr = BAR_TSTRORM_INTMEM +
2008 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2009
2010 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2011}
2012
2013static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2014 struct bnx2x_rx_mode_ramrod_params *p)
2015{
2016
2017 u32 mask = (1 << p->cl_id);
2018
2019 struct tstorm_eth_mac_filter_config *mac_filters =
2020 (struct tstorm_eth_mac_filter_config *)p->rdata;
2021
2022
2023 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2024 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2025 u8 unmatched_unicast = 0;
2026
2027
2028
2029 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2030
2031 drop_all_ucast = 0;
2032
2033 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2034
2035 drop_all_mcast = 0;
2036
2037 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2038
2039 drop_all_ucast = 0;
2040 accp_all_ucast = 1;
2041 }
2042 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2043
2044 drop_all_mcast = 0;
2045 accp_all_mcast = 1;
2046 }
2047 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2048
2049 accp_all_bcast = 1;
2050 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2051
2052 unmatched_unicast = 1;
2053
2054 mac_filters->ucast_drop_all = drop_all_ucast ?
2055 mac_filters->ucast_drop_all | mask :
2056 mac_filters->ucast_drop_all & ~mask;
2057
2058 mac_filters->mcast_drop_all = drop_all_mcast ?
2059 mac_filters->mcast_drop_all | mask :
2060 mac_filters->mcast_drop_all & ~mask;
2061
2062 mac_filters->ucast_accept_all = accp_all_ucast ?
2063 mac_filters->ucast_accept_all | mask :
2064 mac_filters->ucast_accept_all & ~mask;
2065
2066 mac_filters->mcast_accept_all = accp_all_mcast ?
2067 mac_filters->mcast_accept_all | mask :
2068 mac_filters->mcast_accept_all & ~mask;
2069
2070 mac_filters->bcast_accept_all = accp_all_bcast ?
2071 mac_filters->bcast_accept_all | mask :
2072 mac_filters->bcast_accept_all & ~mask;
2073
2074 mac_filters->unmatched_unicast = unmatched_unicast ?
2075 mac_filters->unmatched_unicast | mask :
2076 mac_filters->unmatched_unicast & ~mask;
2077
2078 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2079 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2080 mac_filters->ucast_drop_all,
2081 mac_filters->mcast_drop_all,
2082 mac_filters->ucast_accept_all,
2083 mac_filters->mcast_accept_all,
2084 mac_filters->bcast_accept_all);
2085
2086
2087 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2088
2089
2090 clear_bit(p->state, p->pstate);
2091 smp_mb__after_clear_bit();
2092
2093 return 0;
2094}
2095
2096
2097static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2098 struct eth_classify_header *hdr,
2099 u8 rule_cnt)
2100{
2101 hdr->echo = cid;
2102 hdr->rule_cnt = rule_cnt;
2103}
2104
2105static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2106 unsigned long accept_flags,
2107 struct eth_filter_rules_cmd *cmd,
2108 bool clear_accept_all)
2109{
2110 u16 state;
2111
2112
2113 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2114 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2115
2116 if (accept_flags) {
2117 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2118 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2119
2120 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2121 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2122
2123 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2124 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2125 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2126 }
2127
2128 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2129 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2130 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2131 }
2132 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2133 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2134
2135 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2136 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2137 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2138 }
2139 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2140 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2141 }
2142
2143
2144 if (clear_accept_all) {
2145 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2146 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2147 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2148 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2149 }
2150
2151 cmd->state = cpu_to_le16(state);
2152
2153}
2154
2155static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2156 struct bnx2x_rx_mode_ramrod_params *p)
2157{
2158 struct eth_filter_rules_ramrod_data *data = p->rdata;
2159 int rc;
2160 u8 rule_idx = 0;
2161
2162
2163 memset(data, 0, sizeof(*data));
2164
2165
2166
2167
2168 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2169 data->rules[rule_idx].client_id = p->cl_id;
2170 data->rules[rule_idx].func_id = p->func_id;
2171
2172 data->rules[rule_idx].cmd_general_data =
2173 ETH_FILTER_RULES_CMD_TX_CMD;
2174
2175 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2176 &(data->rules[rule_idx++]), false);
2177 }
2178
2179
2180 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2181 data->rules[rule_idx].client_id = p->cl_id;
2182 data->rules[rule_idx].func_id = p->func_id;
2183
2184 data->rules[rule_idx].cmd_general_data =
2185 ETH_FILTER_RULES_CMD_RX_CMD;
2186
2187 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2188 &(data->rules[rule_idx++]), false);
2189 }
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2200
2201 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2202 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2203 data->rules[rule_idx].func_id = p->func_id;
2204
2205 data->rules[rule_idx].cmd_general_data =
2206 ETH_FILTER_RULES_CMD_TX_CMD;
2207
2208 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2209 &(data->rules[rule_idx++]),
2210 true);
2211 }
2212
2213
2214 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2215 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2216 data->rules[rule_idx].func_id = p->func_id;
2217
2218 data->rules[rule_idx].cmd_general_data =
2219 ETH_FILTER_RULES_CMD_RX_CMD;
2220
2221 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2222 &(data->rules[rule_idx++]),
2223 true);
2224 }
2225 }
2226
2227
2228
2229
2230
2231 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2232
2233 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2234 "tx_accept_flags 0x%lx\n",
2235 data->header.rule_cnt, p->rx_accept_flags,
2236 p->tx_accept_flags);
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2248 U64_HI(p->rdata_mapping),
2249 U64_LO(p->rdata_mapping),
2250 ETH_CONNECTION_TYPE);
2251 if (rc)
2252 return rc;
2253
2254
2255 return 1;
2256}
2257
2258static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2259 struct bnx2x_rx_mode_ramrod_params *p)
2260{
2261 return bnx2x_state_wait(bp, p->state, p->pstate);
2262}
2263
2264static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2265 struct bnx2x_rx_mode_ramrod_params *p)
2266{
2267
2268 return 0;
2269}
2270
2271int bnx2x_config_rx_mode(struct bnx2x *bp,
2272 struct bnx2x_rx_mode_ramrod_params *p)
2273{
2274 int rc;
2275
2276
2277 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2278 if (rc < 0)
2279 return rc;
2280
2281
2282 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2283 rc = p->rx_mode_obj->wait_comp(bp, p);
2284 if (rc)
2285 return rc;
2286 }
2287
2288 return rc;
2289}
2290
2291void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2292 struct bnx2x_rx_mode_obj *o)
2293{
2294 if (CHIP_IS_E1x(bp)) {
2295 o->wait_comp = bnx2x_empty_rx_mode_wait;
2296 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2297 } else {
2298 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2299 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2300 }
2301}
2302
2303
2304static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2305{
2306 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2307}
2308
2309struct bnx2x_mcast_mac_elem {
2310 struct list_head link;
2311 u8 mac[ETH_ALEN];
2312 u8 pad[2];
2313};
2314
2315struct bnx2x_pending_mcast_cmd {
2316 struct list_head link;
2317 int type;
2318 union {
2319 struct list_head macs_head;
2320 u32 macs_num;
2321 int next_bin;
2322 } data;
2323
2324 bool done;
2325
2326
2327
2328
2329
2330};
2331
2332static int bnx2x_mcast_wait(struct bnx2x *bp,
2333 struct bnx2x_mcast_obj *o)
2334{
2335 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2336 o->raw.wait_comp(bp, &o->raw))
2337 return -EBUSY;
2338
2339 return 0;
2340}
2341
2342static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2343 struct bnx2x_mcast_obj *o,
2344 struct bnx2x_mcast_ramrod_params *p,
2345 int cmd)
2346{
2347 int total_sz;
2348 struct bnx2x_pending_mcast_cmd *new_cmd;
2349 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2350 struct bnx2x_mcast_list_elem *pos;
2351 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2352 p->mcast_list_len : 0);
2353
2354
2355 if (!p->mcast_list_len)
2356 return 0;
2357
2358 total_sz = sizeof(*new_cmd) +
2359 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2360
2361
2362 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2363
2364 if (!new_cmd)
2365 return -ENOMEM;
2366
2367 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2368 "macs_list_len=%d\n", cmd, macs_list_len);
2369
2370 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2371
2372 new_cmd->type = cmd;
2373 new_cmd->done = false;
2374
2375 switch (cmd) {
2376 case BNX2X_MCAST_CMD_ADD:
2377 cur_mac = (struct bnx2x_mcast_mac_elem *)
2378 ((u8 *)new_cmd + sizeof(*new_cmd));
2379
2380
2381
2382
2383 list_for_each_entry(pos, &p->mcast_list, link) {
2384 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2385 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2386 cur_mac++;
2387 }
2388
2389 break;
2390
2391 case BNX2X_MCAST_CMD_DEL:
2392 new_cmd->data.macs_num = p->mcast_list_len;
2393 break;
2394
2395 case BNX2X_MCAST_CMD_RESTORE:
2396 new_cmd->data.next_bin = 0;
2397 break;
2398
2399 default:
2400 BNX2X_ERR("Unknown command: %d\n", cmd);
2401 return -EINVAL;
2402 }
2403
2404
2405 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2406
2407 o->set_sched(o);
2408
2409 return 1;
2410}
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2421{
2422 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2423
2424 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2425 if (o->registry.aprox_match.vec[i])
2426 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2427 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2428 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2429 vec, cur_bit)) {
2430 return cur_bit;
2431 }
2432 }
2433 inner_start = 0;
2434 }
2435
2436
2437 return -1;
2438}
2439
2440
2441
2442
2443
2444
2445
2446
2447static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2448{
2449 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2450
2451 if (cur_bit >= 0)
2452 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2453
2454 return cur_bit;
2455}
2456
2457static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2458{
2459 struct bnx2x_raw_obj *raw = &o->raw;
2460 u8 rx_tx_flag = 0;
2461
2462 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2463 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2464 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2465
2466 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2467 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2468 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2469
2470 return rx_tx_flag;
2471}
2472
2473static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2474 struct bnx2x_mcast_obj *o, int idx,
2475 union bnx2x_mcast_config_data *cfg_data,
2476 int cmd)
2477{
2478 struct bnx2x_raw_obj *r = &o->raw;
2479 struct eth_multicast_rules_ramrod_data *data =
2480 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2481 u8 func_id = r->func_id;
2482 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2483 int bin;
2484
2485 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2486 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2487
2488 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2489
2490
2491 switch (cmd) {
2492 case BNX2X_MCAST_CMD_ADD:
2493 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2494 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2495 break;
2496
2497 case BNX2X_MCAST_CMD_DEL:
2498
2499
2500
2501
2502
2503
2504 bin = bnx2x_mcast_clear_first_bin(o);
2505 break;
2506
2507 case BNX2X_MCAST_CMD_RESTORE:
2508 bin = cfg_data->bin;
2509 break;
2510
2511 default:
2512 BNX2X_ERR("Unknown command: %d\n", cmd);
2513 return;
2514 }
2515
2516 DP(BNX2X_MSG_SP, "%s bin %d\n",
2517 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2518 "Setting" : "Clearing"), bin);
2519
2520 data->rules[idx].bin_id = (u8)bin;
2521 data->rules[idx].func_id = func_id;
2522 data->rules[idx].engine_id = o->engine_id;
2523}
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535static inline int bnx2x_mcast_handle_restore_cmd_e2(
2536 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2537 int *rdata_idx)
2538{
2539 int cur_bin, cnt = *rdata_idx;
2540 union bnx2x_mcast_config_data cfg_data = {0};
2541
2542
2543 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2544 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2545
2546 cfg_data.bin = (u8)cur_bin;
2547 o->set_one_rule(bp, o, cnt, &cfg_data,
2548 BNX2X_MCAST_CMD_RESTORE);
2549
2550 cnt++;
2551
2552 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2553
2554
2555
2556
2557 if (cnt >= o->max_cmd_len)
2558 break;
2559 }
2560
2561 *rdata_idx = cnt;
2562
2563 return cur_bin;
2564}
2565
2566static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2567 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2568 int *line_idx)
2569{
2570 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2571 int cnt = *line_idx;
2572 union bnx2x_mcast_config_data cfg_data = {0};
2573
2574 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2575 link) {
2576
2577 cfg_data.mac = &pmac_pos->mac[0];
2578 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2579
2580 cnt++;
2581
2582 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2583 " mcast MAC\n",
2584 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
2585
2586 list_del(&pmac_pos->link);
2587
2588
2589
2590
2591 if (cnt >= o->max_cmd_len)
2592 break;
2593 }
2594
2595 *line_idx = cnt;
2596
2597
2598 if (list_empty(&cmd_pos->data.macs_head))
2599 cmd_pos->done = true;
2600}
2601
2602static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2603 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2604 int *line_idx)
2605{
2606 int cnt = *line_idx;
2607
2608 while (cmd_pos->data.macs_num) {
2609 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2610
2611 cnt++;
2612
2613 cmd_pos->data.macs_num--;
2614
2615 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2616 cmd_pos->data.macs_num, cnt);
2617
2618
2619
2620
2621 if (cnt >= o->max_cmd_len)
2622 break;
2623 }
2624
2625 *line_idx = cnt;
2626
2627
2628 if (!cmd_pos->data.macs_num)
2629 cmd_pos->done = true;
2630}
2631
2632static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2633 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2634 int *line_idx)
2635{
2636 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2637 line_idx);
2638
2639 if (cmd_pos->data.next_bin < 0)
2640
2641 cmd_pos->done = true;
2642 else
2643
2644 cmd_pos->data.next_bin++;
2645}
2646
2647static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2648 struct bnx2x_mcast_ramrod_params *p)
2649{
2650 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2651 int cnt = 0;
2652 struct bnx2x_mcast_obj *o = p->mcast_obj;
2653
2654 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2655 link) {
2656 switch (cmd_pos->type) {
2657 case BNX2X_MCAST_CMD_ADD:
2658 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2659 break;
2660
2661 case BNX2X_MCAST_CMD_DEL:
2662 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2663 break;
2664
2665 case BNX2X_MCAST_CMD_RESTORE:
2666 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2667 &cnt);
2668 break;
2669
2670 default:
2671 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2672 return -EINVAL;
2673 }
2674
2675
2676
2677
2678 if (cmd_pos->done) {
2679 list_del(&cmd_pos->link);
2680 kfree(cmd_pos);
2681 }
2682
2683
2684 if (cnt >= o->max_cmd_len)
2685 break;
2686 }
2687
2688 return cnt;
2689}
2690
2691static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2692 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2693 int *line_idx)
2694{
2695 struct bnx2x_mcast_list_elem *mlist_pos;
2696 union bnx2x_mcast_config_data cfg_data = {0};
2697 int cnt = *line_idx;
2698
2699 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2700 cfg_data.mac = mlist_pos->mac;
2701 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2702
2703 cnt++;
2704
2705 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2706 " mcast MAC\n",
2707 BNX2X_MAC_PRN_LIST(mlist_pos->mac));
2708 }
2709
2710 *line_idx = cnt;
2711}
2712
2713static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2714 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2715 int *line_idx)
2716{
2717 int cnt = *line_idx, i;
2718
2719 for (i = 0; i < p->mcast_list_len; i++) {
2720 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2721
2722 cnt++;
2723
2724 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2725 p->mcast_list_len - i - 1);
2726 }
2727
2728 *line_idx = cnt;
2729}
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2744 struct bnx2x_mcast_ramrod_params *p, int cmd,
2745 int start_cnt)
2746{
2747 struct bnx2x_mcast_obj *o = p->mcast_obj;
2748 int cnt = start_cnt;
2749
2750 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2751
2752 switch (cmd) {
2753 case BNX2X_MCAST_CMD_ADD:
2754 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2755 break;
2756
2757 case BNX2X_MCAST_CMD_DEL:
2758 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2759 break;
2760
2761 case BNX2X_MCAST_CMD_RESTORE:
2762 o->hdl_restore(bp, o, 0, &cnt);
2763 break;
2764
2765 default:
2766 BNX2X_ERR("Unknown command: %d\n", cmd);
2767 return -EINVAL;
2768 }
2769
2770
2771 p->mcast_list_len = 0;
2772
2773 return cnt;
2774}
2775
2776static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2777 struct bnx2x_mcast_ramrod_params *p,
2778 int cmd)
2779{
2780 struct bnx2x_mcast_obj *o = p->mcast_obj;
2781 int reg_sz = o->get_registry_size(o);
2782
2783 switch (cmd) {
2784
2785 case BNX2X_MCAST_CMD_DEL:
2786 o->set_registry_size(o, 0);
2787
2788
2789
2790 case BNX2X_MCAST_CMD_RESTORE:
2791
2792
2793
2794
2795
2796
2797
2798 p->mcast_list_len = reg_sz;
2799 break;
2800
2801 case BNX2X_MCAST_CMD_ADD:
2802 case BNX2X_MCAST_CMD_CONT:
2803
2804
2805
2806
2807 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2808 break;
2809
2810 default:
2811 BNX2X_ERR("Unknown command: %d\n", cmd);
2812 return -EINVAL;
2813
2814 }
2815
2816
2817 o->total_pending_num += p->mcast_list_len;
2818
2819 return 0;
2820}
2821
2822static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2823 struct bnx2x_mcast_ramrod_params *p,
2824 int old_num_bins)
2825{
2826 struct bnx2x_mcast_obj *o = p->mcast_obj;
2827
2828 o->set_registry_size(o, old_num_bins);
2829 o->total_pending_num -= p->mcast_list_len;
2830}
2831
2832
2833
2834
2835
2836
2837
2838
2839static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2840 struct bnx2x_mcast_ramrod_params *p,
2841 u8 len)
2842{
2843 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2844 struct eth_multicast_rules_ramrod_data *data =
2845 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2846
2847 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2848 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2849 data->header.rule_cnt = len;
2850}
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2864 struct bnx2x_mcast_obj *o)
2865{
2866 int i, cnt = 0;
2867 u64 elem;
2868
2869 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2870 elem = o->registry.aprox_match.vec[i];
2871 for (; elem; cnt++)
2872 elem &= elem - 1;
2873 }
2874
2875 o->set_registry_size(o, cnt);
2876
2877 return 0;
2878}
2879
2880static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2881 struct bnx2x_mcast_ramrod_params *p,
2882 int cmd)
2883{
2884 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2885 struct bnx2x_mcast_obj *o = p->mcast_obj;
2886 struct eth_multicast_rules_ramrod_data *data =
2887 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2888 int cnt = 0, rc;
2889
2890
2891 memset(data, 0, sizeof(*data));
2892
2893 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2894
2895
2896 if (list_empty(&o->pending_cmds_head))
2897 o->clear_sched(o);
2898
2899
2900
2901
2902
2903
2904
2905 if (p->mcast_list_len > 0)
2906 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2907
2908
2909
2910
2911 o->total_pending_num -= cnt;
2912
2913
2914 WARN_ON(o->total_pending_num < 0);
2915 WARN_ON(cnt > o->max_cmd_len);
2916
2917 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934 if (!o->total_pending_num)
2935 bnx2x_mcast_refresh_registry_e2(bp, o);
2936
2937
2938
2939
2940
2941 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2942 raw->clear_pending(raw);
2943 return 0;
2944 } else {
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2955 raw->cid, U64_HI(raw->rdata_mapping),
2956 U64_LO(raw->rdata_mapping),
2957 ETH_CONNECTION_TYPE);
2958 if (rc)
2959 return rc;
2960
2961
2962 return 1;
2963 }
2964}
2965
2966static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
2967 struct bnx2x_mcast_ramrod_params *p,
2968 int cmd)
2969{
2970
2971 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2972 p->mcast_list_len = 1;
2973
2974 return 0;
2975}
2976
2977static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
2978 struct bnx2x_mcast_ramrod_params *p,
2979 int old_num_bins)
2980{
2981
2982}
2983
2984#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
2985do { \
2986 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2987} while (0)
2988
2989static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
2990 struct bnx2x_mcast_obj *o,
2991 struct bnx2x_mcast_ramrod_params *p,
2992 u32 *mc_filter)
2993{
2994 struct bnx2x_mcast_list_elem *mlist_pos;
2995 int bit;
2996
2997 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2998 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
2999 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3000
3001 DP(BNX2X_MSG_SP, "About to configure "
3002 BNX2X_MAC_FMT" mcast MAC, bin %d\n",
3003 BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
3004
3005
3006 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3007 bit);
3008 }
3009}
3010
3011static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3012 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3013 u32 *mc_filter)
3014{
3015 int bit;
3016
3017 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3018 bit >= 0;
3019 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3020 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3021 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3022 }
3023}
3024
3025
3026
3027
3028
3029static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3030 struct bnx2x_mcast_ramrod_params *p,
3031 int cmd)
3032{
3033 int i;
3034 struct bnx2x_mcast_obj *o = p->mcast_obj;
3035 struct bnx2x_raw_obj *r = &o->raw;
3036
3037
3038
3039
3040 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3041 u32 mc_filter[MC_HASH_SIZE] = {0};
3042
3043
3044
3045
3046 switch (cmd) {
3047 case BNX2X_MCAST_CMD_ADD:
3048 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3049 break;
3050
3051 case BNX2X_MCAST_CMD_DEL:
3052 DP(BNX2X_MSG_SP, "Invalidating multicast "
3053 "MACs configuration\n");
3054
3055
3056 memset(o->registry.aprox_match.vec, 0,
3057 sizeof(o->registry.aprox_match.vec));
3058 break;
3059
3060 case BNX2X_MCAST_CMD_RESTORE:
3061 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3062 break;
3063
3064 default:
3065 BNX2X_ERR("Unknown command: %d\n", cmd);
3066 return -EINVAL;
3067 }
3068
3069
3070 for (i = 0; i < MC_HASH_SIZE; i++)
3071 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3072 } else
3073
3074 memset(o->registry.aprox_match.vec, 0,
3075 sizeof(o->registry.aprox_match.vec));
3076
3077
3078 r->clear_pending(r);
3079
3080 return 0;
3081}
3082
3083static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3084 struct bnx2x_mcast_ramrod_params *p,
3085 int cmd)
3086{
3087 struct bnx2x_mcast_obj *o = p->mcast_obj;
3088 int reg_sz = o->get_registry_size(o);
3089
3090 switch (cmd) {
3091
3092 case BNX2X_MCAST_CMD_DEL:
3093 o->set_registry_size(o, 0);
3094
3095
3096
3097 case BNX2X_MCAST_CMD_RESTORE:
3098 p->mcast_list_len = reg_sz;
3099 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3100 cmd, p->mcast_list_len);
3101 break;
3102
3103 case BNX2X_MCAST_CMD_ADD:
3104 case BNX2X_MCAST_CMD_CONT:
3105
3106
3107
3108
3109 if (p->mcast_list_len > o->max_cmd_len) {
3110 BNX2X_ERR("Can't configure more than %d multicast MACs"
3111 "on 57710\n", o->max_cmd_len);
3112 return -EINVAL;
3113 }
3114
3115
3116
3117
3118 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3119 if (p->mcast_list_len > 0)
3120 o->set_registry_size(o, p->mcast_list_len);
3121
3122 break;
3123
3124 default:
3125 BNX2X_ERR("Unknown command: %d\n", cmd);
3126 return -EINVAL;
3127
3128 }
3129
3130
3131
3132
3133 if (p->mcast_list_len)
3134 o->total_pending_num += o->max_cmd_len;
3135
3136 return 0;
3137}
3138
3139static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3140 struct bnx2x_mcast_ramrod_params *p,
3141 int old_num_macs)
3142{
3143 struct bnx2x_mcast_obj *o = p->mcast_obj;
3144
3145 o->set_registry_size(o, old_num_macs);
3146
3147
3148
3149
3150
3151 if (p->mcast_list_len)
3152 o->total_pending_num -= o->max_cmd_len;
3153}
3154
3155static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3156 struct bnx2x_mcast_obj *o, int idx,
3157 union bnx2x_mcast_config_data *cfg_data,
3158 int cmd)
3159{
3160 struct bnx2x_raw_obj *r = &o->raw;
3161 struct mac_configuration_cmd *data =
3162 (struct mac_configuration_cmd *)(r->rdata);
3163
3164
3165 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3166 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3167 &data->config_table[idx].middle_mac_addr,
3168 &data->config_table[idx].lsb_mac_addr,
3169 cfg_data->mac);
3170
3171 data->config_table[idx].vlan_id = 0;
3172 data->config_table[idx].pf_id = r->func_id;
3173 data->config_table[idx].clients_bit_vector =
3174 cpu_to_le32(1 << r->cl_id);
3175
3176 SET_FLAG(data->config_table[idx].flags,
3177 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3178 T_ETH_MAC_COMMAND_SET);
3179 }
3180}
3181
3182
3183
3184
3185
3186
3187
3188
3189static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3190 struct bnx2x_mcast_ramrod_params *p,
3191 u8 len)
3192{
3193 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3194 struct mac_configuration_cmd *data =
3195 (struct mac_configuration_cmd *)(r->rdata);
3196
3197 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3198 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3199 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3200
3201 data->hdr.offset = offset;
3202 data->hdr.client_id = 0xff;
3203 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3204 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3205 data->hdr.length = len;
3206}
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221static inline int bnx2x_mcast_handle_restore_cmd_e1(
3222 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3223 int *rdata_idx)
3224{
3225 struct bnx2x_mcast_mac_elem *elem;
3226 int i = 0;
3227 union bnx2x_mcast_config_data cfg_data = {0};
3228
3229
3230 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3231 cfg_data.mac = &elem->mac[0];
3232 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3233
3234 i++;
3235
3236 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3237 " mcast MAC\n",
3238 BNX2X_MAC_PRN_LIST(cfg_data.mac));
3239 }
3240
3241 *rdata_idx = i;
3242
3243 return -1;
3244}
3245
3246
3247static inline int bnx2x_mcast_handle_pending_cmds_e1(
3248 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3249{
3250 struct bnx2x_pending_mcast_cmd *cmd_pos;
3251 struct bnx2x_mcast_mac_elem *pmac_pos;
3252 struct bnx2x_mcast_obj *o = p->mcast_obj;
3253 union bnx2x_mcast_config_data cfg_data = {0};
3254 int cnt = 0;
3255
3256
3257
3258 if (list_empty(&o->pending_cmds_head))
3259 return 0;
3260
3261
3262 cmd_pos = list_first_entry(&o->pending_cmds_head,
3263 struct bnx2x_pending_mcast_cmd, link);
3264
3265 switch (cmd_pos->type) {
3266 case BNX2X_MCAST_CMD_ADD:
3267 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3268 cfg_data.mac = &pmac_pos->mac[0];
3269 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3270
3271 cnt++;
3272
3273 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3274 " mcast MAC\n",
3275 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
3276 }
3277 break;
3278
3279 case BNX2X_MCAST_CMD_DEL:
3280 cnt = cmd_pos->data.macs_num;
3281 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3282 break;
3283
3284 case BNX2X_MCAST_CMD_RESTORE:
3285 o->hdl_restore(bp, o, 0, &cnt);
3286 break;
3287
3288 default:
3289 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3290 return -EINVAL;
3291 }
3292
3293 list_del(&cmd_pos->link);
3294 kfree(cmd_pos);
3295
3296 return cnt;
3297}
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3308 __le16 *fw_lo, u8 *mac)
3309{
3310 mac[1] = ((u8 *)fw_hi)[0];
3311 mac[0] = ((u8 *)fw_hi)[1];
3312 mac[3] = ((u8 *)fw_mid)[0];
3313 mac[2] = ((u8 *)fw_mid)[1];
3314 mac[5] = ((u8 *)fw_lo)[0];
3315 mac[4] = ((u8 *)fw_lo)[1];
3316}
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3330 struct bnx2x_mcast_obj *o)
3331{
3332 struct bnx2x_raw_obj *raw = &o->raw;
3333 struct bnx2x_mcast_mac_elem *elem;
3334 struct mac_configuration_cmd *data =
3335 (struct mac_configuration_cmd *)(raw->rdata);
3336
3337
3338
3339
3340 if (GET_FLAG(data->config_table[0].flags,
3341 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3342 int i, len = data->hdr.length;
3343
3344
3345 if (!list_empty(&o->registry.exact_match.macs))
3346 return 0;
3347
3348 elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
3349 if (!elem) {
3350 BNX2X_ERR("Failed to allocate registry memory\n");
3351 return -ENOMEM;
3352 }
3353
3354 for (i = 0; i < len; i++, elem++) {
3355 bnx2x_get_fw_mac_addr(
3356 &data->config_table[i].msb_mac_addr,
3357 &data->config_table[i].middle_mac_addr,
3358 &data->config_table[i].lsb_mac_addr,
3359 elem->mac);
3360 DP(BNX2X_MSG_SP, "Adding registry entry for ["
3361 BNX2X_MAC_FMT"]\n",
3362 BNX2X_MAC_PRN_LIST(elem->mac));
3363 list_add_tail(&elem->link,
3364 &o->registry.exact_match.macs);
3365 }
3366 } else {
3367 elem = list_first_entry(&o->registry.exact_match.macs,
3368 struct bnx2x_mcast_mac_elem, link);
3369 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3370 kfree(elem);
3371 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3372 }
3373
3374 return 0;
3375}
3376
3377static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3378 struct bnx2x_mcast_ramrod_params *p,
3379 int cmd)
3380{
3381 struct bnx2x_mcast_obj *o = p->mcast_obj;
3382 struct bnx2x_raw_obj *raw = &o->raw;
3383 struct mac_configuration_cmd *data =
3384 (struct mac_configuration_cmd *)(raw->rdata);
3385 int cnt = 0, i, rc;
3386
3387
3388 memset(data, 0, sizeof(*data));
3389
3390
3391 for (i = 0; i < o->max_cmd_len ; i++)
3392 SET_FLAG(data->config_table[i].flags,
3393 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3394 T_ETH_MAC_COMMAND_INVALIDATE);
3395
3396
3397 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3398
3399
3400 if (list_empty(&o->pending_cmds_head))
3401 o->clear_sched(o);
3402
3403
3404 if (!cnt)
3405 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3406
3407
3408
3409
3410 o->total_pending_num -= o->max_cmd_len;
3411
3412
3413
3414 WARN_ON(cnt > o->max_cmd_len);
3415
3416
3417 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3418
3419
3420
3421
3422
3423
3424
3425 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3426 if (rc)
3427 return rc;
3428
3429
3430
3431
3432
3433 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3434 raw->clear_pending(raw);
3435 return 0;
3436 } else {
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3447 U64_HI(raw->rdata_mapping),
3448 U64_LO(raw->rdata_mapping),
3449 ETH_CONNECTION_TYPE);
3450 if (rc)
3451 return rc;
3452
3453
3454 return 1;
3455 }
3456
3457}
3458
3459static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3460{
3461 return o->registry.exact_match.num_macs_set;
3462}
3463
3464static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3465{
3466 return o->registry.aprox_match.num_bins_set;
3467}
3468
3469static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3470 int n)
3471{
3472 o->registry.exact_match.num_macs_set = n;
3473}
3474
3475static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3476 int n)
3477{
3478 o->registry.aprox_match.num_bins_set = n;
3479}
3480
3481int bnx2x_config_mcast(struct bnx2x *bp,
3482 struct bnx2x_mcast_ramrod_params *p,
3483 int cmd)
3484{
3485 struct bnx2x_mcast_obj *o = p->mcast_obj;
3486 struct bnx2x_raw_obj *r = &o->raw;
3487 int rc = 0, old_reg_size;
3488
3489
3490
3491
3492 old_reg_size = o->get_registry_size(o);
3493
3494
3495 rc = o->validate(bp, p, cmd);
3496 if (rc)
3497 return rc;
3498
3499
3500 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3501 return 0;
3502
3503 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3504 "o->max_cmd_len=%d\n", o->total_pending_num,
3505 p->mcast_list_len, o->max_cmd_len);
3506
3507
3508
3509
3510 if (r->check_pending(r) ||
3511 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3512 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3513 if (rc < 0)
3514 goto error_exit1;
3515
3516
3517
3518
3519 p->mcast_list_len = 0;
3520 }
3521
3522 if (!r->check_pending(r)) {
3523
3524
3525 r->set_pending(r);
3526
3527
3528 rc = o->config_mcast(bp, p, cmd);
3529 if (rc < 0)
3530 goto error_exit2;
3531
3532
3533 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3534 rc = o->wait_comp(bp, o);
3535 }
3536
3537 return rc;
3538
3539error_exit2:
3540 r->clear_pending(r);
3541
3542error_exit1:
3543 o->revert(bp, p, old_reg_size);
3544
3545 return rc;
3546}
3547
3548static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3549{
3550 smp_mb__before_clear_bit();
3551 clear_bit(o->sched_state, o->raw.pstate);
3552 smp_mb__after_clear_bit();
3553}
3554
3555static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3556{
3557 smp_mb__before_clear_bit();
3558 set_bit(o->sched_state, o->raw.pstate);
3559 smp_mb__after_clear_bit();
3560}
3561
3562static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3563{
3564 return !!test_bit(o->sched_state, o->raw.pstate);
3565}
3566
3567static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3568{
3569 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3570}
3571
3572void bnx2x_init_mcast_obj(struct bnx2x *bp,
3573 struct bnx2x_mcast_obj *mcast_obj,
3574 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3575 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3576 int state, unsigned long *pstate, bnx2x_obj_type type)
3577{
3578 memset(mcast_obj, 0, sizeof(*mcast_obj));
3579
3580 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3581 rdata, rdata_mapping, state, pstate, type);
3582
3583 mcast_obj->engine_id = engine_id;
3584
3585 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3586
3587 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3588 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3589 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3590 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3591
3592 if (CHIP_IS_E1(bp)) {
3593 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3594 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3595 mcast_obj->hdl_restore =
3596 bnx2x_mcast_handle_restore_cmd_e1;
3597 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3598
3599 if (CHIP_REV_IS_SLOW(bp))
3600 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3601 else
3602 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3603
3604 mcast_obj->wait_comp = bnx2x_mcast_wait;
3605 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3606 mcast_obj->validate = bnx2x_mcast_validate_e1;
3607 mcast_obj->revert = bnx2x_mcast_revert_e1;
3608 mcast_obj->get_registry_size =
3609 bnx2x_mcast_get_registry_size_exact;
3610 mcast_obj->set_registry_size =
3611 bnx2x_mcast_set_registry_size_exact;
3612
3613
3614
3615
3616 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3617
3618 } else if (CHIP_IS_E1H(bp)) {
3619 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3620 mcast_obj->enqueue_cmd = NULL;
3621 mcast_obj->hdl_restore = NULL;
3622 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3623
3624
3625
3626
3627 mcast_obj->max_cmd_len = -1;
3628 mcast_obj->wait_comp = bnx2x_mcast_wait;
3629 mcast_obj->set_one_rule = NULL;
3630 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3631 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3632 mcast_obj->get_registry_size =
3633 bnx2x_mcast_get_registry_size_aprox;
3634 mcast_obj->set_registry_size =
3635 bnx2x_mcast_set_registry_size_aprox;
3636 } else {
3637 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3638 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3639 mcast_obj->hdl_restore =
3640 bnx2x_mcast_handle_restore_cmd_e2;
3641 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3642
3643
3644 mcast_obj->max_cmd_len = 16;
3645 mcast_obj->wait_comp = bnx2x_mcast_wait;
3646 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3647 mcast_obj->validate = bnx2x_mcast_validate_e2;
3648 mcast_obj->revert = bnx2x_mcast_revert_e2;
3649 mcast_obj->get_registry_size =
3650 bnx2x_mcast_get_registry_size_aprox;
3651 mcast_obj->set_registry_size =
3652 bnx2x_mcast_set_registry_size_aprox;
3653 }
3654}
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3669{
3670 int c, old;
3671
3672 c = atomic_read(v);
3673 for (;;) {
3674 if (unlikely(c + a >= u))
3675 return false;
3676
3677 old = atomic_cmpxchg((v), c, c + a);
3678 if (likely(old == c))
3679 break;
3680 c = old;
3681 }
3682
3683 return true;
3684}
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3697{
3698 int c, old;
3699
3700 c = atomic_read(v);
3701 for (;;) {
3702 if (unlikely(c - a < u))
3703 return false;
3704
3705 old = atomic_cmpxchg((v), c, c - a);
3706 if (likely(old == c))
3707 break;
3708 c = old;
3709 }
3710
3711 return true;
3712}
3713
3714static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3715{
3716 bool rc;
3717
3718 smp_mb();
3719 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3720 smp_mb();
3721
3722 return rc;
3723}
3724
3725static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3726{
3727 bool rc;
3728
3729 smp_mb();
3730
3731
3732 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3733
3734 smp_mb();
3735
3736 return rc;
3737}
3738
3739static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3740{
3741 int cur_credit;
3742
3743 smp_mb();
3744 cur_credit = atomic_read(&o->credit);
3745
3746 return cur_credit;
3747}
3748
3749static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3750 int cnt)
3751{
3752 return true;
3753}
3754
3755
3756static bool bnx2x_credit_pool_get_entry(
3757 struct bnx2x_credit_pool_obj *o,
3758 int *offset)
3759{
3760 int idx, vec, i;
3761
3762 *offset = -1;
3763
3764
3765 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3766
3767
3768 if (!o->pool_mirror[vec])
3769 continue;
3770
3771
3772 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3773 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3774
3775 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3776
3777 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3778 *offset = o->base_pool_offset + idx;
3779 return true;
3780 }
3781 }
3782
3783 return false;
3784}
3785
3786static bool bnx2x_credit_pool_put_entry(
3787 struct bnx2x_credit_pool_obj *o,
3788 int offset)
3789{
3790 if (offset < o->base_pool_offset)
3791 return false;
3792
3793 offset -= o->base_pool_offset;
3794
3795 if (offset >= o->pool_sz)
3796 return false;
3797
3798
3799 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3800
3801 return true;
3802}
3803
3804static bool bnx2x_credit_pool_put_entry_always_true(
3805 struct bnx2x_credit_pool_obj *o,
3806 int offset)
3807{
3808 return true;
3809}
3810
3811static bool bnx2x_credit_pool_get_entry_always_true(
3812 struct bnx2x_credit_pool_obj *o,
3813 int *offset)
3814{
3815 *offset = -1;
3816 return true;
3817}
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3830 int base, int credit)
3831{
3832
3833 memset(p, 0, sizeof(*p));
3834
3835
3836 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3837
3838
3839 atomic_set(&p->credit, credit);
3840
3841
3842 p->pool_sz = credit;
3843
3844 p->base_pool_offset = base;
3845
3846
3847 smp_mb();
3848
3849 p->check = bnx2x_credit_pool_check;
3850
3851
3852 if (credit >= 0) {
3853 p->put = bnx2x_credit_pool_put;
3854 p->get = bnx2x_credit_pool_get;
3855 p->put_entry = bnx2x_credit_pool_put_entry;
3856 p->get_entry = bnx2x_credit_pool_get_entry;
3857 } else {
3858 p->put = bnx2x_credit_pool_always_true;
3859 p->get = bnx2x_credit_pool_always_true;
3860 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3861 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3862 }
3863
3864
3865 if (base < 0) {
3866 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3867 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3868 }
3869}
3870
3871void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3872 struct bnx2x_credit_pool_obj *p, u8 func_id,
3873 u8 func_num)
3874{
3875
3876#define BNX2X_CAM_SIZE_EMUL 5
3877
3878 int cam_sz;
3879
3880 if (CHIP_IS_E1(bp)) {
3881
3882 if (!CHIP_REV_IS_SLOW(bp))
3883 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3884 else
3885 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3886
3887 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3888
3889 } else if (CHIP_IS_E1H(bp)) {
3890
3891
3892
3893 if ((func_num > 0)) {
3894 if (!CHIP_REV_IS_SLOW(bp))
3895 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3896 else
3897 cam_sz = BNX2X_CAM_SIZE_EMUL;
3898 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3899 } else {
3900
3901 bnx2x_init_credit_pool(p, 0, 0);
3902 }
3903
3904 } else {
3905
3906
3907
3908
3909
3910 if ((func_num > 0)) {
3911 if (!CHIP_REV_IS_SLOW(bp))
3912 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3913 else
3914 cam_sz = BNX2X_CAM_SIZE_EMUL;
3915
3916
3917
3918
3919
3920 bnx2x_init_credit_pool(p, -1, cam_sz);
3921 } else {
3922
3923 bnx2x_init_credit_pool(p, 0, 0);
3924 }
3925
3926 }
3927}
3928
3929void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3930 struct bnx2x_credit_pool_obj *p,
3931 u8 func_id,
3932 u8 func_num)
3933{
3934 if (CHIP_IS_E1x(bp)) {
3935
3936
3937
3938
3939 bnx2x_init_credit_pool(p, 0, -1);
3940 } else {
3941
3942
3943
3944
3945 if (func_num > 0) {
3946 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3947 bnx2x_init_credit_pool(p, func_id * credit, credit);
3948 } else
3949
3950 bnx2x_init_credit_pool(p, 0, 0);
3951 }
3952}
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3964 struct bnx2x_config_rss_params *p)
3965{
3966 int i;
3967
3968 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3969 DP(BNX2X_MSG_SP, "0x0000: ");
3970 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3971 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
3972
3973
3974 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
3975 (((i + 1) & 0x3) == 0)) {
3976 DP_CONT(BNX2X_MSG_SP, "\n");
3977 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
3978 }
3979 }
3980
3981 DP_CONT(BNX2X_MSG_SP, "\n");
3982}
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992static int bnx2x_setup_rss(struct bnx2x *bp,
3993 struct bnx2x_config_rss_params *p)
3994{
3995 struct bnx2x_rss_config_obj *o = p->rss_obj;
3996 struct bnx2x_raw_obj *r = &o->raw;
3997 struct eth_rss_update_ramrod_data *data =
3998 (struct eth_rss_update_ramrod_data *)(r->rdata);
3999 u8 rss_mode = 0;
4000 int rc;
4001
4002 memset(data, 0, sizeof(*data));
4003
4004 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4005
4006
4007 data->echo = (r->cid & BNX2X_SWCID_MASK) |
4008 (r->state << BNX2X_SWCID_SHIFT);
4009
4010
4011 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4012 rss_mode = ETH_RSS_MODE_DISABLED;
4013 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4014 rss_mode = ETH_RSS_MODE_REGULAR;
4015 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4016 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4017 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4018 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4019 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4020 rss_mode = ETH_RSS_MODE_IP_DSCP;
4021
4022 data->rss_mode = rss_mode;
4023
4024 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4025
4026
4027 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4028 data->capabilities |=
4029 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4030
4031 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4032 data->capabilities |=
4033 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4034
4035 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4036 data->capabilities |=
4037 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4038
4039 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4040 data->capabilities |=
4041 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4042
4043
4044 data->rss_result_mask = p->rss_result_mask;
4045
4046
4047 data->rss_engine_id = o->engine_id;
4048
4049 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4050
4051
4052 memcpy(data->indirection_table, p->ind_table,
4053 T_ETH_INDIRECTION_TABLE_SIZE);
4054
4055
4056 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4057
4058
4059 if (netif_msg_ifup(bp))
4060 bnx2x_debug_print_ind_table(bp, p);
4061
4062
4063 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4064 memcpy(&data->rss_key[0], &p->rss_key[0],
4065 sizeof(data->rss_key));
4066 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4067 }
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4079 U64_HI(r->rdata_mapping),
4080 U64_LO(r->rdata_mapping),
4081 ETH_CONNECTION_TYPE);
4082
4083 if (rc < 0)
4084 return rc;
4085
4086 return 1;
4087}
4088
4089void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4090 u8 *ind_table)
4091{
4092 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4093}
4094
4095int bnx2x_config_rss(struct bnx2x *bp,
4096 struct bnx2x_config_rss_params *p)
4097{
4098 int rc;
4099 struct bnx2x_rss_config_obj *o = p->rss_obj;
4100 struct bnx2x_raw_obj *r = &o->raw;
4101
4102
4103 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4104 return 0;
4105
4106 r->set_pending(r);
4107
4108 rc = o->config_rss(bp, p);
4109 if (rc < 0) {
4110 r->clear_pending(r);
4111 return rc;
4112 }
4113
4114 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4115 rc = r->wait_comp(bp, r);
4116
4117 return rc;
4118}
4119
4120
4121void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4122 struct bnx2x_rss_config_obj *rss_obj,
4123 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4124 void *rdata, dma_addr_t rdata_mapping,
4125 int state, unsigned long *pstate,
4126 bnx2x_obj_type type)
4127{
4128 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4129 rdata_mapping, state, pstate, type);
4130
4131 rss_obj->engine_id = engine_id;
4132 rss_obj->config_rss = bnx2x_setup_rss;
4133}
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149int bnx2x_queue_state_change(struct bnx2x *bp,
4150 struct bnx2x_queue_state_params *params)
4151{
4152 struct bnx2x_queue_sp_obj *o = params->q_obj;
4153 int rc, pending_bit;
4154 unsigned long *pending = &o->pending;
4155
4156
4157 if (o->check_transition(bp, o, params))
4158 return -EINVAL;
4159
4160
4161 pending_bit = o->set_pending(o, params);
4162
4163
4164 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4165 o->complete_cmd(bp, o, pending_bit);
4166 else {
4167
4168 rc = o->send_cmd(bp, params);
4169 if (rc) {
4170 o->next_state = BNX2X_Q_STATE_MAX;
4171 clear_bit(pending_bit, pending);
4172 smp_mb__after_clear_bit();
4173 return rc;
4174 }
4175
4176 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4177 rc = o->wait_comp(bp, o, pending_bit);
4178 if (rc)
4179 return rc;
4180
4181 return 0;
4182 }
4183 }
4184
4185 return !!test_bit(pending_bit, pending);
4186}
4187
4188
4189static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4190 struct bnx2x_queue_state_params *params)
4191{
4192 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4193
4194
4195
4196
4197 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4198 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4199 bit = BNX2X_Q_CMD_UPDATE;
4200 else
4201 bit = cmd;
4202
4203 set_bit(bit, &obj->pending);
4204 return bit;
4205}
4206
4207static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4208 struct bnx2x_queue_sp_obj *o,
4209 enum bnx2x_queue_cmd cmd)
4210{
4211 return bnx2x_state_wait(bp, cmd, &o->pending);
4212}
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4224 struct bnx2x_queue_sp_obj *o,
4225 enum bnx2x_queue_cmd cmd)
4226{
4227 unsigned long cur_pending = o->pending;
4228
4229 if (!test_and_clear_bit(cmd, &cur_pending)) {
4230 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
4231 "pending 0x%lx, next_state %d\n", cmd,
4232 o->cids[BNX2X_PRIMARY_CID_INDEX],
4233 o->state, cur_pending, o->next_state);
4234 return -EINVAL;
4235 }
4236
4237 if (o->next_tx_only >= o->max_cos)
4238
4239
4240
4241 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4242 o->next_tx_only, o->max_cos);
4243
4244 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
4245 "setting state to %d\n", cmd,
4246 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4247
4248 if (o->next_tx_only)
4249 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d",
4250 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4251
4252 o->state = o->next_state;
4253 o->num_tx_only = o->next_tx_only;
4254 o->next_state = BNX2X_Q_STATE_MAX;
4255
4256
4257
4258
4259 wmb();
4260
4261 clear_bit(cmd, &o->pending);
4262 smp_mb__after_clear_bit();
4263
4264 return 0;
4265}
4266
4267static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4268 struct bnx2x_queue_state_params *cmd_params,
4269 struct client_init_ramrod_data *data)
4270{
4271 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4272
4273
4274
4275
4276 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) *
4277 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4278}
4279
4280static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4281 struct bnx2x_queue_sp_obj *o,
4282 struct bnx2x_general_setup_params *params,
4283 struct client_init_general_data *gen_data,
4284 unsigned long *flags)
4285{
4286 gen_data->client_id = o->cl_id;
4287
4288 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4289 gen_data->statistics_counter_id =
4290 params->stat_id;
4291 gen_data->statistics_en_flg = 1;
4292 gen_data->statistics_zero_flg =
4293 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4294 } else
4295 gen_data->statistics_counter_id =
4296 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4297
4298 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4299 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4300 gen_data->sp_client_id = params->spcl_id;
4301 gen_data->mtu = cpu_to_le16(params->mtu);
4302 gen_data->func_id = o->func_id;
4303
4304
4305 gen_data->cos = params->cos;
4306
4307 gen_data->traffic_type =
4308 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4309 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4310
4311 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d",
4312 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4313}
4314
4315static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4316 struct bnx2x_txq_setup_params *params,
4317 struct client_init_tx_data *tx_data,
4318 unsigned long *flags)
4319{
4320 tx_data->enforce_security_flg =
4321 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4322 tx_data->default_vlan =
4323 cpu_to_le16(params->default_vlan);
4324 tx_data->default_vlan_flg =
4325 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4326 tx_data->tx_switching_flg =
4327 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4328 tx_data->anti_spoofing_flg =
4329 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4330 tx_data->tx_status_block_id = params->fw_sb_id;
4331 tx_data->tx_sb_index_number = params->sb_cq_index;
4332 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4333
4334 tx_data->tx_bd_page_base.lo =
4335 cpu_to_le32(U64_LO(params->dscr_map));
4336 tx_data->tx_bd_page_base.hi =
4337 cpu_to_le32(U64_HI(params->dscr_map));
4338
4339
4340 tx_data->state = 0;
4341}
4342
4343static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4344 struct rxq_pause_params *params,
4345 struct client_init_rx_data *rx_data)
4346{
4347
4348 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4349 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4350 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4351 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4352 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4353 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4354 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4355}
4356
4357static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4358 struct bnx2x_rxq_setup_params *params,
4359 struct client_init_rx_data *rx_data,
4360 unsigned long *flags)
4361{
4362
4363 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4364 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4365 rx_data->vmqueue_mode_en_flg = 0;
4366
4367 rx_data->cache_line_alignment_log_size =
4368 params->cache_line_log;
4369 rx_data->enable_dynamic_hc =
4370 test_bit(BNX2X_Q_FLG_DHC, flags);
4371 rx_data->max_sges_for_packet = params->max_sges_pkt;
4372 rx_data->client_qzone_id = params->cl_qzone_id;
4373 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4374
4375
4376 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4377 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4378
4379
4380 rx_data->drop_ip_cs_err_flg = 0;
4381 rx_data->drop_tcp_cs_err_flg = 0;
4382 rx_data->drop_ttl0_flg = 0;
4383 rx_data->drop_udp_cs_err_flg = 0;
4384 rx_data->inner_vlan_removal_enable_flg =
4385 test_bit(BNX2X_Q_FLG_VLAN, flags);
4386 rx_data->outer_vlan_removal_enable_flg =
4387 test_bit(BNX2X_Q_FLG_OV, flags);
4388 rx_data->status_block_id = params->fw_sb_id;
4389 rx_data->rx_sb_index_number = params->sb_cq_index;
4390 rx_data->max_tpa_queues = params->max_tpa_queues;
4391 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4392 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4393 rx_data->bd_page_base.lo =
4394 cpu_to_le32(U64_LO(params->dscr_map));
4395 rx_data->bd_page_base.hi =
4396 cpu_to_le32(U64_HI(params->dscr_map));
4397 rx_data->sge_page_base.lo =
4398 cpu_to_le32(U64_LO(params->sge_map));
4399 rx_data->sge_page_base.hi =
4400 cpu_to_le32(U64_HI(params->sge_map));
4401 rx_data->cqe_page_base.lo =
4402 cpu_to_le32(U64_LO(params->rcq_map));
4403 rx_data->cqe_page_base.hi =
4404 cpu_to_le32(U64_HI(params->rcq_map));
4405 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4406
4407 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4408 rx_data->approx_mcast_engine_id = o->func_id;
4409 rx_data->is_approx_mcast = 1;
4410 }
4411
4412 rx_data->rss_engine_id = params->rss_engine_id;
4413
4414
4415 rx_data->silent_vlan_removal_flg =
4416 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4417 rx_data->silent_vlan_value =
4418 cpu_to_le16(params->silent_removal_value);
4419 rx_data->silent_vlan_mask =
4420 cpu_to_le16(params->silent_removal_mask);
4421
4422}
4423
4424
4425static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4426 struct bnx2x_queue_state_params *cmd_params,
4427 struct client_init_ramrod_data *data)
4428{
4429 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4430 &cmd_params->params.setup.gen_params,
4431 &data->general,
4432 &cmd_params->params.setup.flags);
4433
4434 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4435 &cmd_params->params.setup.txq_params,
4436 &data->tx,
4437 &cmd_params->params.setup.flags);
4438
4439 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4440 &cmd_params->params.setup.rxq_params,
4441 &data->rx,
4442 &cmd_params->params.setup.flags);
4443
4444 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4445 &cmd_params->params.setup.pause_params,
4446 &data->rx);
4447}
4448
4449
4450static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4451 struct bnx2x_queue_state_params *cmd_params,
4452 struct tx_queue_init_ramrod_data *data)
4453{
4454 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4455 &cmd_params->params.tx_only.gen_params,
4456 &data->general,
4457 &cmd_params->params.tx_only.flags);
4458
4459 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4460 &cmd_params->params.tx_only.txq_params,
4461 &data->tx,
4462 &cmd_params->params.tx_only.flags);
4463
4464 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0],
4465 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4466}
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479static inline int bnx2x_q_init(struct bnx2x *bp,
4480 struct bnx2x_queue_state_params *params)
4481{
4482 struct bnx2x_queue_sp_obj *o = params->q_obj;
4483 struct bnx2x_queue_init_params *init = ¶ms->params.init;
4484 u16 hc_usec;
4485 u8 cos;
4486
4487
4488 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4489 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4490 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4491
4492 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4493 init->tx.sb_cq_index,
4494 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4495 hc_usec);
4496 }
4497
4498
4499 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4500 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4501 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4502
4503 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4504 init->rx.sb_cq_index,
4505 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4506 hc_usec);
4507 }
4508
4509
4510 for (cos = 0; cos < o->max_cos; cos++) {
4511 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d",
4512 o->cids[cos], cos);
4513 DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]);
4514 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4515 }
4516
4517
4518 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4519
4520 mmiowb();
4521 smp_mb();
4522
4523 return 0;
4524}
4525
4526static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4527 struct bnx2x_queue_state_params *params)
4528{
4529 struct bnx2x_queue_sp_obj *o = params->q_obj;
4530 struct client_init_ramrod_data *rdata =
4531 (struct client_init_ramrod_data *)o->rdata;
4532 dma_addr_t data_mapping = o->rdata_mapping;
4533 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4534
4535
4536 memset(rdata, 0, sizeof(*rdata));
4537
4538
4539 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4550 U64_HI(data_mapping),
4551 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4552}
4553
4554static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4555 struct bnx2x_queue_state_params *params)
4556{
4557 struct bnx2x_queue_sp_obj *o = params->q_obj;
4558 struct client_init_ramrod_data *rdata =
4559 (struct client_init_ramrod_data *)o->rdata;
4560 dma_addr_t data_mapping = o->rdata_mapping;
4561 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4562
4563
4564 memset(rdata, 0, sizeof(*rdata));
4565
4566
4567 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4568 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4579 U64_HI(data_mapping),
4580 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4581}
4582
4583static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4584 struct bnx2x_queue_state_params *params)
4585{
4586 struct bnx2x_queue_sp_obj *o = params->q_obj;
4587 struct tx_queue_init_ramrod_data *rdata =
4588 (struct tx_queue_init_ramrod_data *)o->rdata;
4589 dma_addr_t data_mapping = o->rdata_mapping;
4590 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4591 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4592 ¶ms->params.tx_only;
4593 u8 cid_index = tx_only_params->cid_index;
4594
4595
4596 if (cid_index >= o->max_cos) {
4597 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4598 o->cl_id, cid_index);
4599 return -EINVAL;
4600 }
4601
4602 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d",
4603 tx_only_params->gen_params.cos,
4604 tx_only_params->gen_params.spcl_id);
4605
4606
4607 memset(rdata, 0, sizeof(*rdata));
4608
4609
4610 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4611
4612 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
4613 "sp-client id %d, cos %d",
4614 o->cids[cid_index],
4615 rdata->general.client_id,
4616 rdata->general.sp_client_id, rdata->general.cos);
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4627 U64_HI(data_mapping),
4628 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4629}
4630
4631static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4632 struct bnx2x_queue_sp_obj *obj,
4633 struct bnx2x_queue_update_params *params,
4634 struct client_update_ramrod_data *data)
4635{
4636
4637 data->client_id = obj->cl_id;
4638
4639
4640 data->func_id = obj->func_id;
4641
4642
4643 data->default_vlan = cpu_to_le16(params->def_vlan);
4644
4645
4646 data->inner_vlan_removal_enable_flg =
4647 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4648 data->inner_vlan_removal_change_flg =
4649 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4650 ¶ms->update_flags);
4651
4652
4653 data->outer_vlan_removal_enable_flg =
4654 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4655 data->outer_vlan_removal_change_flg =
4656 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4657 ¶ms->update_flags);
4658
4659
4660
4661
4662 data->anti_spoofing_enable_flg =
4663 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4664 data->anti_spoofing_change_flg =
4665 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags);
4666
4667
4668 data->activate_flg =
4669 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4670 data->activate_change_flg =
4671 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4672
4673
4674 data->default_vlan_enable_flg =
4675 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4676 data->default_vlan_change_flg =
4677 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4678 ¶ms->update_flags);
4679
4680
4681 data->silent_vlan_change_flg =
4682 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4683 ¶ms->update_flags);
4684 data->silent_vlan_removal_flg =
4685 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags);
4686 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4687 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4688}
4689
4690static inline int bnx2x_q_send_update(struct bnx2x *bp,
4691 struct bnx2x_queue_state_params *params)
4692{
4693 struct bnx2x_queue_sp_obj *o = params->q_obj;
4694 struct client_update_ramrod_data *rdata =
4695 (struct client_update_ramrod_data *)o->rdata;
4696 dma_addr_t data_mapping = o->rdata_mapping;
4697 struct bnx2x_queue_update_params *update_params =
4698 ¶ms->params.update;
4699 u8 cid_index = update_params->cid_index;
4700
4701 if (cid_index >= o->max_cos) {
4702 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4703 o->cl_id, cid_index);
4704 return -EINVAL;
4705 }
4706
4707
4708
4709 memset(rdata, 0, sizeof(*rdata));
4710
4711
4712 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4723 o->cids[cid_index], U64_HI(data_mapping),
4724 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4725}
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4736 struct bnx2x_queue_state_params *params)
4737{
4738 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4739
4740 memset(update, 0, sizeof(*update));
4741
4742 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4743
4744 return bnx2x_q_send_update(bp, params);
4745}
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4756 struct bnx2x_queue_state_params *params)
4757{
4758 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4759
4760 memset(update, 0, sizeof(*update));
4761
4762 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4763 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4764
4765 return bnx2x_q_send_update(bp, params);
4766}
4767
4768static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4769 struct bnx2x_queue_state_params *params)
4770{
4771
4772 return -1;
4773}
4774
4775static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4776 struct bnx2x_queue_state_params *params)
4777{
4778 struct bnx2x_queue_sp_obj *o = params->q_obj;
4779
4780 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4781 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4782 ETH_CONNECTION_TYPE);
4783}
4784
4785static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4786 struct bnx2x_queue_state_params *params)
4787{
4788 struct bnx2x_queue_sp_obj *o = params->q_obj;
4789 u8 cid_idx = params->params.cfc_del.cid_index;
4790
4791 if (cid_idx >= o->max_cos) {
4792 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4793 o->cl_id, cid_idx);
4794 return -EINVAL;
4795 }
4796
4797 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4798 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4799}
4800
4801static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4802 struct bnx2x_queue_state_params *params)
4803{
4804 struct bnx2x_queue_sp_obj *o = params->q_obj;
4805 u8 cid_index = params->params.terminate.cid_index;
4806
4807 if (cid_index >= o->max_cos) {
4808 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4809 o->cl_id, cid_index);
4810 return -EINVAL;
4811 }
4812
4813 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4814 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4815}
4816
4817static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4818 struct bnx2x_queue_state_params *params)
4819{
4820 struct bnx2x_queue_sp_obj *o = params->q_obj;
4821
4822 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4823 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4824 ETH_CONNECTION_TYPE);
4825}
4826
4827static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4828 struct bnx2x_queue_state_params *params)
4829{
4830 switch (params->cmd) {
4831 case BNX2X_Q_CMD_INIT:
4832 return bnx2x_q_init(bp, params);
4833 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4834 return bnx2x_q_send_setup_tx_only(bp, params);
4835 case BNX2X_Q_CMD_DEACTIVATE:
4836 return bnx2x_q_send_deactivate(bp, params);
4837 case BNX2X_Q_CMD_ACTIVATE:
4838 return bnx2x_q_send_activate(bp, params);
4839 case BNX2X_Q_CMD_UPDATE:
4840 return bnx2x_q_send_update(bp, params);
4841 case BNX2X_Q_CMD_UPDATE_TPA:
4842 return bnx2x_q_send_update_tpa(bp, params);
4843 case BNX2X_Q_CMD_HALT:
4844 return bnx2x_q_send_halt(bp, params);
4845 case BNX2X_Q_CMD_CFC_DEL:
4846 return bnx2x_q_send_cfc_del(bp, params);
4847 case BNX2X_Q_CMD_TERMINATE:
4848 return bnx2x_q_send_terminate(bp, params);
4849 case BNX2X_Q_CMD_EMPTY:
4850 return bnx2x_q_send_empty(bp, params);
4851 default:
4852 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4853 return -EINVAL;
4854 }
4855}
4856
4857static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4858 struct bnx2x_queue_state_params *params)
4859{
4860 switch (params->cmd) {
4861 case BNX2X_Q_CMD_SETUP:
4862 return bnx2x_q_send_setup_e1x(bp, params);
4863 case BNX2X_Q_CMD_INIT:
4864 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4865 case BNX2X_Q_CMD_DEACTIVATE:
4866 case BNX2X_Q_CMD_ACTIVATE:
4867 case BNX2X_Q_CMD_UPDATE:
4868 case BNX2X_Q_CMD_UPDATE_TPA:
4869 case BNX2X_Q_CMD_HALT:
4870 case BNX2X_Q_CMD_CFC_DEL:
4871 case BNX2X_Q_CMD_TERMINATE:
4872 case BNX2X_Q_CMD_EMPTY:
4873 return bnx2x_queue_send_cmd_cmn(bp, params);
4874 default:
4875 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4876 return -EINVAL;
4877 }
4878}
4879
4880static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4881 struct bnx2x_queue_state_params *params)
4882{
4883 switch (params->cmd) {
4884 case BNX2X_Q_CMD_SETUP:
4885 return bnx2x_q_send_setup_e2(bp, params);
4886 case BNX2X_Q_CMD_INIT:
4887 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4888 case BNX2X_Q_CMD_DEACTIVATE:
4889 case BNX2X_Q_CMD_ACTIVATE:
4890 case BNX2X_Q_CMD_UPDATE:
4891 case BNX2X_Q_CMD_UPDATE_TPA:
4892 case BNX2X_Q_CMD_HALT:
4893 case BNX2X_Q_CMD_CFC_DEL:
4894 case BNX2X_Q_CMD_TERMINATE:
4895 case BNX2X_Q_CMD_EMPTY:
4896 return bnx2x_queue_send_cmd_cmn(bp, params);
4897 default:
4898 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4899 return -EINVAL;
4900 }
4901}
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4920 struct bnx2x_queue_sp_obj *o,
4921 struct bnx2x_queue_state_params *params)
4922{
4923 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4924 enum bnx2x_queue_cmd cmd = params->cmd;
4925 struct bnx2x_queue_update_params *update_params =
4926 ¶ms->params.update;
4927 u8 next_tx_only = o->num_tx_only;
4928
4929
4930
4931
4932
4933 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
4934 o->pending = 0;
4935 o->next_state = BNX2X_Q_STATE_MAX;
4936 }
4937
4938
4939
4940
4941
4942 if (o->pending)
4943 return -EBUSY;
4944
4945 switch (state) {
4946 case BNX2X_Q_STATE_RESET:
4947 if (cmd == BNX2X_Q_CMD_INIT)
4948 next_state = BNX2X_Q_STATE_INITIALIZED;
4949
4950 break;
4951 case BNX2X_Q_STATE_INITIALIZED:
4952 if (cmd == BNX2X_Q_CMD_SETUP) {
4953 if (test_bit(BNX2X_Q_FLG_ACTIVE,
4954 ¶ms->params.setup.flags))
4955 next_state = BNX2X_Q_STATE_ACTIVE;
4956 else
4957 next_state = BNX2X_Q_STATE_INACTIVE;
4958 }
4959
4960 break;
4961 case BNX2X_Q_STATE_ACTIVE:
4962 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
4963 next_state = BNX2X_Q_STATE_INACTIVE;
4964
4965 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4966 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4967 next_state = BNX2X_Q_STATE_ACTIVE;
4968
4969 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4970 next_state = BNX2X_Q_STATE_MULTI_COS;
4971 next_tx_only = 1;
4972 }
4973
4974 else if (cmd == BNX2X_Q_CMD_HALT)
4975 next_state = BNX2X_Q_STATE_STOPPED;
4976
4977 else if (cmd == BNX2X_Q_CMD_UPDATE) {
4978
4979
4980
4981 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4982 &update_params->update_flags) &&
4983 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4984 &update_params->update_flags))
4985 next_state = BNX2X_Q_STATE_INACTIVE;
4986 else
4987 next_state = BNX2X_Q_STATE_ACTIVE;
4988 }
4989
4990 break;
4991 case BNX2X_Q_STATE_MULTI_COS:
4992 if (cmd == BNX2X_Q_CMD_TERMINATE)
4993 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
4994
4995 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4996 next_state = BNX2X_Q_STATE_MULTI_COS;
4997 next_tx_only = o->num_tx_only + 1;
4998 }
4999
5000 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5001 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5002 next_state = BNX2X_Q_STATE_MULTI_COS;
5003
5004 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5005
5006
5007
5008 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5009 &update_params->update_flags) &&
5010 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5011 &update_params->update_flags))
5012 next_state = BNX2X_Q_STATE_INACTIVE;
5013 else
5014 next_state = BNX2X_Q_STATE_MULTI_COS;
5015 }
5016
5017 break;
5018 case BNX2X_Q_STATE_MCOS_TERMINATED:
5019 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5020 next_tx_only = o->num_tx_only - 1;
5021 if (next_tx_only == 0)
5022 next_state = BNX2X_Q_STATE_ACTIVE;
5023 else
5024 next_state = BNX2X_Q_STATE_MULTI_COS;
5025 }
5026
5027 break;
5028 case BNX2X_Q_STATE_INACTIVE:
5029 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5030 next_state = BNX2X_Q_STATE_ACTIVE;
5031
5032 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5033 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5034 next_state = BNX2X_Q_STATE_INACTIVE;
5035
5036 else if (cmd == BNX2X_Q_CMD_HALT)
5037 next_state = BNX2X_Q_STATE_STOPPED;
5038
5039 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5040
5041
5042
5043 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5044 &update_params->update_flags) &&
5045 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5046 &update_params->update_flags)){
5047 if (o->num_tx_only == 0)
5048 next_state = BNX2X_Q_STATE_ACTIVE;
5049 else
5050 next_state = BNX2X_Q_STATE_MULTI_COS;
5051 } else
5052 next_state = BNX2X_Q_STATE_INACTIVE;
5053 }
5054
5055 break;
5056 case BNX2X_Q_STATE_STOPPED:
5057 if (cmd == BNX2X_Q_CMD_TERMINATE)
5058 next_state = BNX2X_Q_STATE_TERMINATED;
5059
5060 break;
5061 case BNX2X_Q_STATE_TERMINATED:
5062 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5063 next_state = BNX2X_Q_STATE_RESET;
5064
5065 break;
5066 default:
5067 BNX2X_ERR("Illegal state: %d\n", state);
5068 }
5069
5070
5071 if (next_state != BNX2X_Q_STATE_MAX) {
5072 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5073 state, cmd, next_state);
5074 o->next_state = next_state;
5075 o->next_tx_only = next_tx_only;
5076 return 0;
5077 }
5078
5079 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5080
5081 return -EINVAL;
5082}
5083
5084void bnx2x_init_queue_obj(struct bnx2x *bp,
5085 struct bnx2x_queue_sp_obj *obj,
5086 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5087 void *rdata,
5088 dma_addr_t rdata_mapping, unsigned long type)
5089{
5090 memset(obj, 0, sizeof(*obj));
5091
5092
5093 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5094
5095 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5096 obj->max_cos = cid_cnt;
5097 obj->cl_id = cl_id;
5098 obj->func_id = func_id;
5099 obj->rdata = rdata;
5100 obj->rdata_mapping = rdata_mapping;
5101 obj->type = type;
5102 obj->next_state = BNX2X_Q_STATE_MAX;
5103
5104 if (CHIP_IS_E1x(bp))
5105 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5106 else
5107 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5108
5109 obj->check_transition = bnx2x_queue_chk_transition;
5110
5111 obj->complete_cmd = bnx2x_queue_comp_cmd;
5112 obj->wait_comp = bnx2x_queue_wait_comp;
5113 obj->set_pending = bnx2x_queue_set_pending;
5114}
5115
5116void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5117 struct bnx2x_queue_sp_obj *obj,
5118 u32 cid, u8 index)
5119{
5120 obj->cids[index] = cid;
5121}
5122
5123
5124enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5125 struct bnx2x_func_sp_obj *o)
5126{
5127
5128 if (o->pending)
5129 return BNX2X_F_STATE_MAX;
5130
5131
5132
5133
5134
5135 rmb();
5136
5137 return o->state;
5138}
5139
5140static int bnx2x_func_wait_comp(struct bnx2x *bp,
5141 struct bnx2x_func_sp_obj *o,
5142 enum bnx2x_func_cmd cmd)
5143{
5144 return bnx2x_state_wait(bp, cmd, &o->pending);
5145}
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5158 struct bnx2x_func_sp_obj *o,
5159 enum bnx2x_func_cmd cmd)
5160{
5161 unsigned long cur_pending = o->pending;
5162
5163 if (!test_and_clear_bit(cmd, &cur_pending)) {
5164 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5165 "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
5166 o->state, cur_pending, o->next_state);
5167 return -EINVAL;
5168 }
5169
5170 DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
5171 "%d\n", cmd, BP_FUNC(bp), o->next_state);
5172
5173 o->state = o->next_state;
5174 o->next_state = BNX2X_F_STATE_MAX;
5175
5176
5177
5178
5179 wmb();
5180
5181 clear_bit(cmd, &o->pending);
5182 smp_mb__after_clear_bit();
5183
5184 return 0;
5185}
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5197 struct bnx2x_func_sp_obj *o,
5198 enum bnx2x_func_cmd cmd)
5199{
5200
5201
5202
5203 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5204 return rc;
5205}
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222static int bnx2x_func_chk_transition(struct bnx2x *bp,
5223 struct bnx2x_func_sp_obj *o,
5224 struct bnx2x_func_state_params *params)
5225{
5226 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5227 enum bnx2x_func_cmd cmd = params->cmd;
5228
5229
5230
5231
5232
5233 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5234 o->pending = 0;
5235 o->next_state = BNX2X_F_STATE_MAX;
5236 }
5237
5238
5239
5240
5241
5242 if (o->pending)
5243 return -EBUSY;
5244
5245 switch (state) {
5246 case BNX2X_F_STATE_RESET:
5247 if (cmd == BNX2X_F_CMD_HW_INIT)
5248 next_state = BNX2X_F_STATE_INITIALIZED;
5249
5250 break;
5251 case BNX2X_F_STATE_INITIALIZED:
5252 if (cmd == BNX2X_F_CMD_START)
5253 next_state = BNX2X_F_STATE_STARTED;
5254
5255 else if (cmd == BNX2X_F_CMD_HW_RESET)
5256 next_state = BNX2X_F_STATE_RESET;
5257
5258 break;
5259 case BNX2X_F_STATE_STARTED:
5260 if (cmd == BNX2X_F_CMD_STOP)
5261 next_state = BNX2X_F_STATE_INITIALIZED;
5262 else if (cmd == BNX2X_F_CMD_TX_STOP)
5263 next_state = BNX2X_F_STATE_TX_STOPPED;
5264
5265 break;
5266 case BNX2X_F_STATE_TX_STOPPED:
5267 if (cmd == BNX2X_F_CMD_TX_START)
5268 next_state = BNX2X_F_STATE_STARTED;
5269
5270 break;
5271 default:
5272 BNX2X_ERR("Unknown state: %d\n", state);
5273 }
5274
5275
5276 if (next_state != BNX2X_F_STATE_MAX) {
5277 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5278 state, cmd, next_state);
5279 o->next_state = next_state;
5280 return 0;
5281 }
5282
5283 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5284 state, cmd);
5285
5286 return -EINVAL;
5287}
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299static inline int bnx2x_func_init_func(struct bnx2x *bp,
5300 const struct bnx2x_func_sp_drv_ops *drv)
5301{
5302 return drv->init_hw_func(bp);
5303}
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316static inline int bnx2x_func_init_port(struct bnx2x *bp,
5317 const struct bnx2x_func_sp_drv_ops *drv)
5318{
5319 int rc = drv->init_hw_port(bp);
5320 if (rc)
5321 return rc;
5322
5323 return bnx2x_func_init_func(bp, drv);
5324}
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5337 const struct bnx2x_func_sp_drv_ops *drv)
5338{
5339 int rc = drv->init_hw_cmn_chip(bp);
5340 if (rc)
5341 return rc;
5342
5343 return bnx2x_func_init_port(bp, drv);
5344}
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5357 const struct bnx2x_func_sp_drv_ops *drv)
5358{
5359 int rc = drv->init_hw_cmn(bp);
5360 if (rc)
5361 return rc;
5362
5363 return bnx2x_func_init_port(bp, drv);
5364}
5365
5366static int bnx2x_func_hw_init(struct bnx2x *bp,
5367 struct bnx2x_func_state_params *params)
5368{
5369 u32 load_code = params->params.hw_init.load_phase;
5370 struct bnx2x_func_sp_obj *o = params->f_obj;
5371 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5372 int rc = 0;
5373
5374 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5375 BP_ABS_FUNC(bp), load_code);
5376
5377
5378 rc = drv->gunzip_init(bp);
5379 if (rc)
5380 return rc;
5381
5382
5383 rc = drv->init_fw(bp);
5384 if (rc) {
5385 BNX2X_ERR("Error loading firmware\n");
5386 goto fw_init_err;
5387 }
5388
5389
5390 switch (load_code) {
5391 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5392 rc = bnx2x_func_init_cmn_chip(bp, drv);
5393 if (rc)
5394 goto init_hw_err;
5395
5396 break;
5397 case FW_MSG_CODE_DRV_LOAD_COMMON:
5398 rc = bnx2x_func_init_cmn(bp, drv);
5399 if (rc)
5400 goto init_hw_err;
5401
5402 break;
5403 case FW_MSG_CODE_DRV_LOAD_PORT:
5404 rc = bnx2x_func_init_port(bp, drv);
5405 if (rc)
5406 goto init_hw_err;
5407
5408 break;
5409 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5410 rc = bnx2x_func_init_func(bp, drv);
5411 if (rc)
5412 goto init_hw_err;
5413
5414 break;
5415 default:
5416 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5417 rc = -EINVAL;
5418 }
5419
5420init_hw_err:
5421 drv->release_fw(bp);
5422
5423fw_init_err:
5424 drv->gunzip_end(bp);
5425
5426
5427
5428
5429 if (!rc)
5430 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5431
5432 return rc;
5433}
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5445 const struct bnx2x_func_sp_drv_ops *drv)
5446{
5447 drv->reset_hw_func(bp);
5448}
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5466 const struct bnx2x_func_sp_drv_ops *drv)
5467{
5468 drv->reset_hw_port(bp);
5469 bnx2x_func_reset_func(bp, drv);
5470}
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5483 const struct bnx2x_func_sp_drv_ops *drv)
5484{
5485 bnx2x_func_reset_port(bp, drv);
5486 drv->reset_hw_cmn(bp);
5487}
5488
5489
5490static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5491 struct bnx2x_func_state_params *params)
5492{
5493 u32 reset_phase = params->params.hw_reset.reset_phase;
5494 struct bnx2x_func_sp_obj *o = params->f_obj;
5495 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5496
5497 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5498 reset_phase);
5499
5500 switch (reset_phase) {
5501 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5502 bnx2x_func_reset_cmn(bp, drv);
5503 break;
5504 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5505 bnx2x_func_reset_port(bp, drv);
5506 break;
5507 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5508 bnx2x_func_reset_func(bp, drv);
5509 break;
5510 default:
5511 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5512 reset_phase);
5513 break;
5514 }
5515
5516
5517 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5518
5519 return 0;
5520}
5521
5522static inline int bnx2x_func_send_start(struct bnx2x *bp,
5523 struct bnx2x_func_state_params *params)
5524{
5525 struct bnx2x_func_sp_obj *o = params->f_obj;
5526 struct function_start_data *rdata =
5527 (struct function_start_data *)o->rdata;
5528 dma_addr_t data_mapping = o->rdata_mapping;
5529 struct bnx2x_func_start_params *start_params = ¶ms->params.start;
5530
5531 memset(rdata, 0, sizeof(*rdata));
5532
5533
5534 rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5535 rdata->sd_vlan_tag = start_params->sd_vlan_tag;
5536 rdata->path_id = BP_PATH(bp);
5537 rdata->network_cos_mode = start_params->network_cos_mode;
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5548 U64_HI(data_mapping),
5549 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5550}
5551
5552static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5553 struct bnx2x_func_state_params *params)
5554{
5555 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5556 NONE_CONNECTION_TYPE);
5557}
5558
5559static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5560 struct bnx2x_func_state_params *params)
5561{
5562 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5563 NONE_CONNECTION_TYPE);
5564}
5565static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5566 struct bnx2x_func_state_params *params)
5567{
5568 struct bnx2x_func_sp_obj *o = params->f_obj;
5569 struct flow_control_configuration *rdata =
5570 (struct flow_control_configuration *)o->rdata;
5571 dma_addr_t data_mapping = o->rdata_mapping;
5572 struct bnx2x_func_tx_start_params *tx_start_params =
5573 ¶ms->params.tx_start;
5574 int i;
5575
5576 memset(rdata, 0, sizeof(*rdata));
5577
5578 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5579 rdata->dcb_version = tx_start_params->dcb_version;
5580 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5581
5582 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5583 rdata->traffic_type_to_priority_cos[i] =
5584 tx_start_params->traffic_type_to_priority_cos[i];
5585
5586 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5587 U64_HI(data_mapping),
5588 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5589}
5590
5591static int bnx2x_func_send_cmd(struct bnx2x *bp,
5592 struct bnx2x_func_state_params *params)
5593{
5594 switch (params->cmd) {
5595 case BNX2X_F_CMD_HW_INIT:
5596 return bnx2x_func_hw_init(bp, params);
5597 case BNX2X_F_CMD_START:
5598 return bnx2x_func_send_start(bp, params);
5599 case BNX2X_F_CMD_STOP:
5600 return bnx2x_func_send_stop(bp, params);
5601 case BNX2X_F_CMD_HW_RESET:
5602 return bnx2x_func_hw_reset(bp, params);
5603 case BNX2X_F_CMD_TX_STOP:
5604 return bnx2x_func_send_tx_stop(bp, params);
5605 case BNX2X_F_CMD_TX_START:
5606 return bnx2x_func_send_tx_start(bp, params);
5607 default:
5608 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5609 return -EINVAL;
5610 }
5611}
5612
5613void bnx2x_init_func_obj(struct bnx2x *bp,
5614 struct bnx2x_func_sp_obj *obj,
5615 void *rdata, dma_addr_t rdata_mapping,
5616 struct bnx2x_func_sp_drv_ops *drv_iface)
5617{
5618 memset(obj, 0, sizeof(*obj));
5619
5620 mutex_init(&obj->one_pending_mutex);
5621
5622 obj->rdata = rdata;
5623 obj->rdata_mapping = rdata_mapping;
5624
5625 obj->send_cmd = bnx2x_func_send_cmd;
5626 obj->check_transition = bnx2x_func_chk_transition;
5627 obj->complete_cmd = bnx2x_func_comp_cmd;
5628 obj->wait_comp = bnx2x_func_wait_comp;
5629
5630 obj->drv = drv_iface;
5631}
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646int bnx2x_func_state_change(struct bnx2x *bp,
5647 struct bnx2x_func_state_params *params)
5648{
5649 struct bnx2x_func_sp_obj *o = params->f_obj;
5650 int rc;
5651 enum bnx2x_func_cmd cmd = params->cmd;
5652 unsigned long *pending = &o->pending;
5653
5654 mutex_lock(&o->one_pending_mutex);
5655
5656
5657 if (o->check_transition(bp, o, params)) {
5658 mutex_unlock(&o->one_pending_mutex);
5659 return -EINVAL;
5660 }
5661
5662
5663 set_bit(cmd, pending);
5664
5665
5666 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5667 bnx2x_func_state_change_comp(bp, o, cmd);
5668 mutex_unlock(&o->one_pending_mutex);
5669 } else {
5670
5671 rc = o->send_cmd(bp, params);
5672
5673 mutex_unlock(&o->one_pending_mutex);
5674
5675 if (rc) {
5676 o->next_state = BNX2X_F_STATE_MAX;
5677 clear_bit(cmd, pending);
5678 smp_mb__after_clear_bit();
5679 return rc;
5680 }
5681
5682 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5683 rc = o->wait_comp(bp, o, cmd);
5684 if (rc)
5685 return rc;
5686
5687 return 0;
5688 }
5689 }
5690
5691 return !!test_bit(cmd, pending);
5692}
5693