1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/crc32.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/crc32c.h>
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29#include "bnx2x_sp.h"
30
31#define BNX2X_MAX_EMUL_MULTI 16
32
33#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49 struct bnx2x_exe_queue_obj *o,
50 int exe_len,
51 union bnx2x_qable_obj *owner,
52 exe_q_validate validate,
53 exe_q_remove remove,
54 exe_q_optimize optimize,
55 exe_q_execute exec,
56 exe_q_get get)
57{
58 memset(o, 0, sizeof(*o));
59
60 INIT_LIST_HEAD(&o->exe_queue);
61 INIT_LIST_HEAD(&o->pending_comp);
62
63 spin_lock_init(&o->lock);
64
65 o->exe_chunk_len = exe_len;
66 o->owner = owner;
67
68
69 o->validate = validate;
70 o->remove = remove;
71 o->optimize = optimize;
72 o->execute = exec;
73 o->get = get;
74
75 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
76 exe_len);
77}
78
79static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80 struct bnx2x_exeq_elem *elem)
81{
82 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
83 kfree(elem);
84}
85
86static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
87{
88 struct bnx2x_exeq_elem *elem;
89 int cnt = 0;
90
91 spin_lock_bh(&o->lock);
92
93 list_for_each_entry(elem, &o->exe_queue, link)
94 cnt++;
95
96 spin_unlock_bh(&o->lock);
97
98 return cnt;
99}
100
101
102
103
104
105
106
107
108
109
110
111static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
112 struct bnx2x_exe_queue_obj *o,
113 struct bnx2x_exeq_elem *elem,
114 bool restore)
115{
116 int rc;
117
118 spin_lock_bh(&o->lock);
119
120 if (!restore) {
121
122 rc = o->optimize(bp, o->owner, elem);
123 if (rc)
124 goto free_and_exit;
125
126
127 rc = o->validate(bp, o->owner, elem);
128 if (rc) {
129 DP(BNX2X_MSG_SP, "Preamble failed: %d\n", rc);
130 goto free_and_exit;
131 }
132 }
133
134
135 list_add_tail(&elem->link, &o->exe_queue);
136
137 spin_unlock_bh(&o->lock);
138
139 return 0;
140
141free_and_exit:
142 bnx2x_exe_queue_free_elem(bp, elem);
143
144 spin_unlock_bh(&o->lock);
145
146 return rc;
147
148}
149
150static inline void __bnx2x_exe_queue_reset_pending(
151 struct bnx2x *bp,
152 struct bnx2x_exe_queue_obj *o)
153{
154 struct bnx2x_exeq_elem *elem;
155
156 while (!list_empty(&o->pending_comp)) {
157 elem = list_first_entry(&o->pending_comp,
158 struct bnx2x_exeq_elem, link);
159
160 list_del(&elem->link);
161 bnx2x_exe_queue_free_elem(bp, elem);
162 }
163}
164
165static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
166 struct bnx2x_exe_queue_obj *o)
167{
168
169 spin_lock_bh(&o->lock);
170
171 __bnx2x_exe_queue_reset_pending(bp, o);
172
173 spin_unlock_bh(&o->lock);
174
175}
176
177
178
179
180
181
182
183
184
185
186static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187 struct bnx2x_exe_queue_obj *o,
188 unsigned long *ramrod_flags)
189{
190 struct bnx2x_exeq_elem *elem, spacer;
191 int cur_len = 0, rc;
192
193 memset(&spacer, 0, sizeof(spacer));
194
195 spin_lock_bh(&o->lock);
196
197
198
199
200
201
202
203
204 if (!list_empty(&o->pending_comp)) {
205 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
206 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
207 __bnx2x_exe_queue_reset_pending(bp, o);
208 } else {
209 spin_unlock_bh(&o->lock);
210 return 1;
211 }
212 }
213
214
215
216
217
218 while (!list_empty(&o->exe_queue)) {
219 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
220 link);
221 WARN_ON(!elem->cmd_len);
222
223 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
224 cur_len += elem->cmd_len;
225
226
227
228
229
230 list_add_tail(&spacer.link, &o->pending_comp);
231 mb();
232 list_move_tail(&elem->link, &o->pending_comp);
233 list_del(&spacer.link);
234 } else
235 break;
236 }
237
238
239 if (!cur_len) {
240 spin_unlock_bh(&o->lock);
241 return 0;
242 }
243
244 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
245 if (rc < 0)
246
247
248
249
250 list_splice_init(&o->pending_comp, &o->exe_queue);
251 else if (!rc)
252
253
254
255
256 __bnx2x_exe_queue_reset_pending(bp, o);
257
258 spin_unlock_bh(&o->lock);
259 return rc;
260}
261
262static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
263{
264 bool empty = list_empty(&o->exe_queue);
265
266
267 mb();
268
269 return empty && list_empty(&o->pending_comp);
270}
271
272static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
273 struct bnx2x *bp)
274{
275 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
276 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
277}
278
279
280static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
281{
282 return !!test_bit(o->state, o->pstate);
283}
284
285static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
286{
287 smp_mb__before_clear_bit();
288 clear_bit(o->state, o->pstate);
289 smp_mb__after_clear_bit();
290}
291
292static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
293{
294 smp_mb__before_clear_bit();
295 set_bit(o->state, o->pstate);
296 smp_mb__after_clear_bit();
297}
298
299
300
301
302
303
304
305
306
307static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
308 unsigned long *pstate)
309{
310
311 int cnt = 5000;
312
313
314 if (CHIP_REV_IS_EMUL(bp))
315 cnt *= 20;
316
317 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
318
319 might_sleep();
320 while (cnt--) {
321 if (!test_bit(state, pstate)) {
322#ifdef BNX2X_STOP_ON_ERROR
323 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
324#endif
325 return 0;
326 }
327
328 usleep_range(1000, 2000);
329
330 if (bp->panic)
331 return -EIO;
332 }
333
334
335 BNX2X_ERR("timeout waiting for state %d\n", state);
336#ifdef BNX2X_STOP_ON_ERROR
337 bnx2x_panic();
338#endif
339
340 return -EBUSY;
341}
342
343static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
344{
345 return bnx2x_state_wait(bp, raw->state, raw->pstate);
346}
347
348
349
350static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
351{
352 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
353
354 WARN_ON(!mp);
355
356 return mp->get_entry(mp, offset);
357}
358
359static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
360{
361 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
362
363 WARN_ON(!mp);
364
365 return mp->get(mp, 1);
366}
367
368static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
369{
370 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
371
372 WARN_ON(!vp);
373
374 return vp->get_entry(vp, offset);
375}
376
377static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
378{
379 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
380
381 WARN_ON(!vp);
382
383 return vp->get(vp, 1);
384}
385
386static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
387{
388 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
389 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
390
391 if (!mp->get(mp, 1))
392 return false;
393
394 if (!vp->get(vp, 1)) {
395 mp->put(mp, 1);
396 return false;
397 }
398
399 return true;
400}
401
402static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
403{
404 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
405
406 return mp->put_entry(mp, offset);
407}
408
409static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
410{
411 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
412
413 return mp->put(mp, 1);
414}
415
416static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
417{
418 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
419
420 return vp->put_entry(vp, offset);
421}
422
423static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
424{
425 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
426
427 return vp->put(vp, 1);
428}
429
430static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
431{
432 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
433 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
434
435 if (!mp->put(mp, 1))
436 return false;
437
438 if (!vp->put(vp, 1)) {
439 mp->get(mp, 1);
440 return false;
441 }
442
443 return true;
444}
445
446static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
447 int n, u8 *buf)
448{
449 struct bnx2x_vlan_mac_registry_elem *pos;
450 u8 *next = buf;
451 int counter = 0;
452
453
454 list_for_each_entry(pos, &o->head, link) {
455 if (counter < n) {
456
457 memset(next, 0, MAC_LEADING_ZERO_CNT);
458
459
460 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
461 ETH_ALEN);
462
463
464
465
466 counter++;
467 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
468
469 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
470 counter, next, pos->u.mac.mac);
471 }
472 }
473 return counter * ETH_ALEN;
474}
475
476
477static int bnx2x_check_mac_add(struct bnx2x *bp,
478 struct bnx2x_vlan_mac_obj *o,
479 union bnx2x_classification_ramrod_data *data)
480{
481 struct bnx2x_vlan_mac_registry_elem *pos;
482
483 DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
484
485 if (!is_valid_ether_addr(data->mac.mac))
486 return -EINVAL;
487
488
489 list_for_each_entry(pos, &o->head, link)
490 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
491 return -EEXIST;
492
493 return 0;
494}
495
496static int bnx2x_check_vlan_add(struct bnx2x *bp,
497 struct bnx2x_vlan_mac_obj *o,
498 union bnx2x_classification_ramrod_data *data)
499{
500 struct bnx2x_vlan_mac_registry_elem *pos;
501
502 DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
503
504 list_for_each_entry(pos, &o->head, link)
505 if (data->vlan.vlan == pos->u.vlan.vlan)
506 return -EEXIST;
507
508 return 0;
509}
510
511static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
512 struct bnx2x_vlan_mac_obj *o,
513 union bnx2x_classification_ramrod_data *data)
514{
515 struct bnx2x_vlan_mac_registry_elem *pos;
516
517 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
518 data->vlan_mac.mac, data->vlan_mac.vlan);
519
520 list_for_each_entry(pos, &o->head, link)
521 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
522 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
523 ETH_ALEN)))
524 return -EEXIST;
525
526 return 0;
527}
528
529
530
531static struct bnx2x_vlan_mac_registry_elem *
532 bnx2x_check_mac_del(struct bnx2x *bp,
533 struct bnx2x_vlan_mac_obj *o,
534 union bnx2x_classification_ramrod_data *data)
535{
536 struct bnx2x_vlan_mac_registry_elem *pos;
537
538 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
539
540 list_for_each_entry(pos, &o->head, link)
541 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
542 return pos;
543
544 return NULL;
545}
546
547static struct bnx2x_vlan_mac_registry_elem *
548 bnx2x_check_vlan_del(struct bnx2x *bp,
549 struct bnx2x_vlan_mac_obj *o,
550 union bnx2x_classification_ramrod_data *data)
551{
552 struct bnx2x_vlan_mac_registry_elem *pos;
553
554 DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
555
556 list_for_each_entry(pos, &o->head, link)
557 if (data->vlan.vlan == pos->u.vlan.vlan)
558 return pos;
559
560 return NULL;
561}
562
563static struct bnx2x_vlan_mac_registry_elem *
564 bnx2x_check_vlan_mac_del(struct bnx2x *bp,
565 struct bnx2x_vlan_mac_obj *o,
566 union bnx2x_classification_ramrod_data *data)
567{
568 struct bnx2x_vlan_mac_registry_elem *pos;
569
570 DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
571 data->vlan_mac.mac, data->vlan_mac.vlan);
572
573 list_for_each_entry(pos, &o->head, link)
574 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
575 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
576 ETH_ALEN)))
577 return pos;
578
579 return NULL;
580}
581
582
583static bool bnx2x_check_move(struct bnx2x *bp,
584 struct bnx2x_vlan_mac_obj *src_o,
585 struct bnx2x_vlan_mac_obj *dst_o,
586 union bnx2x_classification_ramrod_data *data)
587{
588 struct bnx2x_vlan_mac_registry_elem *pos;
589 int rc;
590
591
592
593
594 pos = src_o->check_del(bp, src_o, data);
595
596
597 rc = dst_o->check_add(bp, dst_o, data);
598
599
600
601
602 if (rc || !pos)
603 return false;
604
605 return true;
606}
607
608static bool bnx2x_check_move_always_err(
609 struct bnx2x *bp,
610 struct bnx2x_vlan_mac_obj *src_o,
611 struct bnx2x_vlan_mac_obj *dst_o,
612 union bnx2x_classification_ramrod_data *data)
613{
614 return false;
615}
616
617
618static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
619{
620 struct bnx2x_raw_obj *raw = &o->raw;
621 u8 rx_tx_flag = 0;
622
623 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
624 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
625 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
626
627 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
628 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
629 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
630
631 return rx_tx_flag;
632}
633
634
635void bnx2x_set_mac_in_nig(struct bnx2x *bp,
636 bool add, unsigned char *dev_addr, int index)
637{
638 u32 wb_data[2];
639 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
640 NIG_REG_LLH0_FUNC_MEM;
641
642 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
643 return;
644
645 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
646 return;
647
648 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
649 (add ? "ADD" : "DELETE"), index);
650
651 if (add) {
652
653 reg_offset += 8*index;
654
655 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
656 (dev_addr[4] << 8) | dev_addr[5]);
657 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
658
659 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
660 }
661
662 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
663 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
664}
665
666
667
668
669
670
671
672
673
674
675
676static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
677 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
678 struct eth_classify_cmd_header *hdr)
679{
680 struct bnx2x_raw_obj *raw = &o->raw;
681
682 hdr->client_id = raw->cl_id;
683 hdr->func_id = raw->func_id;
684
685
686 hdr->cmd_general_data |=
687 bnx2x_vlan_mac_get_rx_tx_flag(o);
688
689 if (add)
690 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
691
692 hdr->cmd_general_data |=
693 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
694}
695
696
697
698
699
700
701
702
703
704
705
706
707static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
708 struct eth_classify_header *hdr, int rule_cnt)
709{
710 hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
711 (type << BNX2X_SWCID_SHIFT));
712 hdr->rule_cnt = (u8)rule_cnt;
713}
714
715
716
717static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
718 struct bnx2x_vlan_mac_obj *o,
719 struct bnx2x_exeq_elem *elem, int rule_idx,
720 int cam_offset)
721{
722 struct bnx2x_raw_obj *raw = &o->raw;
723 struct eth_classify_rules_ramrod_data *data =
724 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
725 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
726 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
727 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
728 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
729 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749 if (cmd != BNX2X_VLAN_MAC_MOVE) {
750 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
751 bnx2x_set_mac_in_nig(bp, add, mac,
752 BNX2X_LLH_CAM_ISCSI_ETH_LINE);
753 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
754 bnx2x_set_mac_in_nig(bp, add, mac,
755 BNX2X_LLH_CAM_ETH_LINE);
756 }
757
758
759 if (rule_idx == 0)
760 memset(data, 0, sizeof(*data));
761
762
763 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
764 &rule_entry->mac.header);
765
766 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
767 (add ? "add" : "delete"), mac, raw->cl_id);
768
769
770 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
771 &rule_entry->mac.mac_mid,
772 &rule_entry->mac.mac_lsb, mac);
773
774
775 if (cmd == BNX2X_VLAN_MAC_MOVE) {
776 rule_entry++;
777 rule_cnt++;
778
779
780 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
781 elem->cmd_data.vlan_mac.target_obj,
782 true, CLASSIFY_RULE_OPCODE_MAC,
783 &rule_entry->mac.header);
784
785
786 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
787 &rule_entry->mac.mac_mid,
788 &rule_entry->mac.mac_lsb, mac);
789 }
790
791
792
793
794 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
795 rule_cnt);
796}
797
798
799
800
801
802
803
804
805
806
807
808
809static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
810 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
811 struct mac_configuration_hdr *hdr)
812{
813 struct bnx2x_raw_obj *r = &o->raw;
814
815 hdr->length = 1;
816 hdr->offset = (u8)cam_offset;
817 hdr->client_id = cpu_to_le16(0xff);
818 hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
819 (type << BNX2X_SWCID_SHIFT));
820}
821
822static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
823 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
824 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
825{
826 struct bnx2x_raw_obj *r = &o->raw;
827 u32 cl_bit_vec = (1 << r->cl_id);
828
829 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
830 cfg_entry->pf_id = r->func_id;
831 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
832
833 if (add) {
834 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
835 T_ETH_MAC_COMMAND_SET);
836 SET_FLAG(cfg_entry->flags,
837 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
838
839
840 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
841 &cfg_entry->middle_mac_addr,
842 &cfg_entry->lsb_mac_addr, mac);
843 } else
844 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
845 T_ETH_MAC_COMMAND_INVALIDATE);
846}
847
848static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
849 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
850 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
851{
852 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
853 struct bnx2x_raw_obj *raw = &o->raw;
854
855 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
856 &config->hdr);
857 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
858 cfg_entry);
859
860 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
861 (add ? "setting" : "clearing"),
862 mac, raw->cl_id, cam_offset);
863}
864
865
866
867
868
869
870
871
872
873
874static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
875 struct bnx2x_vlan_mac_obj *o,
876 struct bnx2x_exeq_elem *elem, int rule_idx,
877 int cam_offset)
878{
879 struct bnx2x_raw_obj *raw = &o->raw;
880 struct mac_configuration_cmd *config =
881 (struct mac_configuration_cmd *)(raw->rdata);
882
883
884
885
886 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
887 true : false;
888
889
890 memset(config, 0, sizeof(*config));
891
892 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
893 cam_offset, add,
894 elem->cmd_data.vlan_mac.u.mac.mac, 0,
895 ETH_VLAN_FILTER_ANY_VLAN, config);
896}
897
898static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
899 struct bnx2x_vlan_mac_obj *o,
900 struct bnx2x_exeq_elem *elem, int rule_idx,
901 int cam_offset)
902{
903 struct bnx2x_raw_obj *raw = &o->raw;
904 struct eth_classify_rules_ramrod_data *data =
905 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
906 int rule_cnt = rule_idx + 1;
907 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
908 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
909 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
910 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
911
912
913 if (rule_idx == 0)
914 memset(data, 0, sizeof(*data));
915
916
917 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
918 &rule_entry->vlan.header);
919
920 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
921 vlan);
922
923
924 rule_entry->vlan.vlan = cpu_to_le16(vlan);
925
926
927 if (cmd == BNX2X_VLAN_MAC_MOVE) {
928 rule_entry++;
929 rule_cnt++;
930
931
932 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
933 elem->cmd_data.vlan_mac.target_obj,
934 true, CLASSIFY_RULE_OPCODE_VLAN,
935 &rule_entry->vlan.header);
936
937
938 rule_entry->vlan.vlan = cpu_to_le16(vlan);
939 }
940
941
942
943
944 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
945 rule_cnt);
946}
947
948static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
949 struct bnx2x_vlan_mac_obj *o,
950 struct bnx2x_exeq_elem *elem,
951 int rule_idx, int cam_offset)
952{
953 struct bnx2x_raw_obj *raw = &o->raw;
954 struct eth_classify_rules_ramrod_data *data =
955 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
956 int rule_cnt = rule_idx + 1;
957 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
958 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
959 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
960 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
961 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
962
963
964
965 if (rule_idx == 0)
966 memset(data, 0, sizeof(*data));
967
968
969 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
970 &rule_entry->pair.header);
971
972
973 rule_entry->pair.vlan = cpu_to_le16(vlan);
974 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
975 &rule_entry->pair.mac_mid,
976 &rule_entry->pair.mac_lsb, mac);
977
978
979 if (cmd == BNX2X_VLAN_MAC_MOVE) {
980 rule_entry++;
981 rule_cnt++;
982
983
984 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
985 elem->cmd_data.vlan_mac.target_obj,
986 true, CLASSIFY_RULE_OPCODE_PAIR,
987 &rule_entry->pair.header);
988
989
990 rule_entry->pair.vlan = cpu_to_le16(vlan);
991 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
992 &rule_entry->pair.mac_mid,
993 &rule_entry->pair.mac_lsb, mac);
994 }
995
996
997
998
999 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1000 rule_cnt);
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
1013 struct bnx2x_vlan_mac_obj *o,
1014 struct bnx2x_exeq_elem *elem,
1015 int rule_idx, int cam_offset)
1016{
1017 struct bnx2x_raw_obj *raw = &o->raw;
1018 struct mac_configuration_cmd *config =
1019 (struct mac_configuration_cmd *)(raw->rdata);
1020
1021
1022
1023
1024 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1025 true : false;
1026
1027
1028 memset(config, 0, sizeof(*config));
1029
1030 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1031 cam_offset, add,
1032 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1033 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1034 ETH_VLAN_FILTER_CLASSIFY, config);
1035}
1036
1037#define list_next_entry(pos, member) \
1038 list_entry((pos)->member.next, typeof(*(pos)), member)
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1060 struct bnx2x_vlan_mac_ramrod_params *p,
1061 struct bnx2x_vlan_mac_registry_elem **ppos)
1062{
1063 struct bnx2x_vlan_mac_registry_elem *pos;
1064 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1065
1066
1067 if (list_empty(&o->head)) {
1068 *ppos = NULL;
1069 return 0;
1070 }
1071
1072
1073 if (*ppos == NULL)
1074 *ppos = list_first_entry(&o->head,
1075 struct bnx2x_vlan_mac_registry_elem,
1076 link);
1077 else
1078 *ppos = list_next_entry(*ppos, link);
1079
1080 pos = *ppos;
1081
1082
1083 if (list_is_last(&pos->link, &o->head))
1084 *ppos = NULL;
1085
1086
1087 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1088
1089
1090 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1091
1092
1093 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1094
1095
1096 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1097
1098 return bnx2x_config_vlan_mac(bp, p);
1099}
1100
1101
1102
1103
1104
1105
1106static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1107 struct bnx2x_exe_queue_obj *o,
1108 struct bnx2x_exeq_elem *elem)
1109{
1110 struct bnx2x_exeq_elem *pos;
1111 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1112
1113
1114 list_for_each_entry(pos, &o->exe_queue, link)
1115 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1116 sizeof(*data)) &&
1117 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1118 return pos;
1119
1120 return NULL;
1121}
1122
1123static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1124 struct bnx2x_exe_queue_obj *o,
1125 struct bnx2x_exeq_elem *elem)
1126{
1127 struct bnx2x_exeq_elem *pos;
1128 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1129
1130
1131 list_for_each_entry(pos, &o->exe_queue, link)
1132 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1133 sizeof(*data)) &&
1134 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1135 return pos;
1136
1137 return NULL;
1138}
1139
1140static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1141 struct bnx2x_exe_queue_obj *o,
1142 struct bnx2x_exeq_elem *elem)
1143{
1144 struct bnx2x_exeq_elem *pos;
1145 struct bnx2x_vlan_mac_ramrod_data *data =
1146 &elem->cmd_data.vlan_mac.u.vlan_mac;
1147
1148
1149 list_for_each_entry(pos, &o->exe_queue, link)
1150 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1151 sizeof(*data)) &&
1152 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1153 return pos;
1154
1155 return NULL;
1156}
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1172 union bnx2x_qable_obj *qo,
1173 struct bnx2x_exeq_elem *elem)
1174{
1175 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1176 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1177 int rc;
1178
1179
1180 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1181 if (rc) {
1182 DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
1183 return rc;
1184 }
1185
1186
1187
1188
1189
1190 if (exeq->get(exeq, elem)) {
1191 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1192 return -EEXIST;
1193 }
1194
1195
1196
1197
1198
1199
1200
1201 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1202 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1203 o->get_credit(o)))
1204 return -EINVAL;
1205
1206 return 0;
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1222 union bnx2x_qable_obj *qo,
1223 struct bnx2x_exeq_elem *elem)
1224{
1225 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1226 struct bnx2x_vlan_mac_registry_elem *pos;
1227 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1228 struct bnx2x_exeq_elem query_elem;
1229
1230
1231
1232
1233 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1234 if (!pos) {
1235 DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
1236 return -EEXIST;
1237 }
1238
1239
1240
1241
1242
1243 memcpy(&query_elem, elem, sizeof(query_elem));
1244
1245
1246 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1247 if (exeq->get(exeq, &query_elem)) {
1248 BNX2X_ERR("There is a pending MOVE command already\n");
1249 return -EINVAL;
1250 }
1251
1252
1253 if (exeq->get(exeq, elem)) {
1254 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1255 return -EEXIST;
1256 }
1257
1258
1259 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1260 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1261 o->put_credit(o))) {
1262 BNX2X_ERR("Failed to return a credit\n");
1263 return -EINVAL;
1264 }
1265
1266 return 0;
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1282 union bnx2x_qable_obj *qo,
1283 struct bnx2x_exeq_elem *elem)
1284{
1285 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1286 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1287 struct bnx2x_exeq_elem query_elem;
1288 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1289 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1290
1291
1292
1293
1294
1295 if (!src_o->check_move(bp, src_o, dest_o,
1296 &elem->cmd_data.vlan_mac.u)) {
1297 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
1298 return -EINVAL;
1299 }
1300
1301
1302
1303
1304
1305
1306 memcpy(&query_elem, elem, sizeof(query_elem));
1307
1308
1309 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1310 if (src_exeq->get(src_exeq, &query_elem)) {
1311 BNX2X_ERR("There is a pending DEL command on the source queue already\n");
1312 return -EINVAL;
1313 }
1314
1315
1316 if (src_exeq->get(src_exeq, elem)) {
1317 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1318 return -EEXIST;
1319 }
1320
1321
1322 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1323 if (dest_exeq->get(dest_exeq, &query_elem)) {
1324 BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
1325 return -EINVAL;
1326 }
1327
1328
1329 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1330 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1331 dest_o->get_credit(dest_o)))
1332 return -EINVAL;
1333
1334 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1335 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1336 src_o->put_credit(src_o))) {
1337
1338 dest_o->put_credit(dest_o);
1339 return -EINVAL;
1340 }
1341
1342 return 0;
1343}
1344
1345static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1346 union bnx2x_qable_obj *qo,
1347 struct bnx2x_exeq_elem *elem)
1348{
1349 switch (elem->cmd_data.vlan_mac.cmd) {
1350 case BNX2X_VLAN_MAC_ADD:
1351 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1352 case BNX2X_VLAN_MAC_DEL:
1353 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1354 case BNX2X_VLAN_MAC_MOVE:
1355 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1356 default:
1357 return -EINVAL;
1358 }
1359}
1360
1361static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1362 union bnx2x_qable_obj *qo,
1363 struct bnx2x_exeq_elem *elem)
1364{
1365 int rc = 0;
1366
1367
1368 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1369 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1370 return 0;
1371
1372 switch (elem->cmd_data.vlan_mac.cmd) {
1373 case BNX2X_VLAN_MAC_ADD:
1374 case BNX2X_VLAN_MAC_MOVE:
1375 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1376 break;
1377 case BNX2X_VLAN_MAC_DEL:
1378 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1379 break;
1380 default:
1381 return -EINVAL;
1382 }
1383
1384 if (rc != true)
1385 return -EINVAL;
1386
1387 return 0;
1388}
1389
1390
1391
1392
1393
1394
1395
1396
1397static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1398 struct bnx2x_vlan_mac_obj *o)
1399{
1400 int cnt = 5000, rc;
1401 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1402 struct bnx2x_raw_obj *raw = &o->raw;
1403
1404 while (cnt--) {
1405
1406 rc = raw->wait_comp(bp, raw);
1407 if (rc)
1408 return rc;
1409
1410
1411 if (!bnx2x_exe_queue_empty(exeq))
1412 usleep_range(1000, 2000);
1413 else
1414 return 0;
1415 }
1416
1417 return -EBUSY;
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1430 struct bnx2x_vlan_mac_obj *o,
1431 union event_ring_elem *cqe,
1432 unsigned long *ramrod_flags)
1433{
1434 struct bnx2x_raw_obj *r = &o->raw;
1435 int rc;
1436
1437
1438 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1439
1440
1441 r->clear_pending(r);
1442
1443
1444 if (cqe->message.error)
1445 return -EINVAL;
1446
1447
1448 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1449 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1450 if (rc < 0)
1451 return rc;
1452 }
1453
1454
1455 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1456 return 1;
1457
1458 return 0;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1469 union bnx2x_qable_obj *qo,
1470 struct bnx2x_exeq_elem *elem)
1471{
1472 struct bnx2x_exeq_elem query, *pos;
1473 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1474 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1475
1476 memcpy(&query, elem, sizeof(query));
1477
1478 switch (elem->cmd_data.vlan_mac.cmd) {
1479 case BNX2X_VLAN_MAC_ADD:
1480 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1481 break;
1482 case BNX2X_VLAN_MAC_DEL:
1483 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1484 break;
1485 default:
1486
1487 return 0;
1488 }
1489
1490
1491 pos = exeq->get(exeq, &query);
1492 if (pos) {
1493
1494
1495 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1496 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1497 if ((query.cmd_data.vlan_mac.cmd ==
1498 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1499 BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
1500 return -EINVAL;
1501 } else if (!o->get_credit(o)) {
1502 BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
1503 return -EINVAL;
1504 }
1505 }
1506
1507 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1508 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1509 "ADD" : "DEL");
1510
1511 list_del(&pos->link);
1512 bnx2x_exe_queue_free_elem(bp, pos);
1513 return 1;
1514 }
1515
1516 return 0;
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530static inline int bnx2x_vlan_mac_get_registry_elem(
1531 struct bnx2x *bp,
1532 struct bnx2x_vlan_mac_obj *o,
1533 struct bnx2x_exeq_elem *elem,
1534 bool restore,
1535 struct bnx2x_vlan_mac_registry_elem **re)
1536{
1537 enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1538 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1539
1540
1541 if (!restore &&
1542 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1543 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1544 if (!reg_elem)
1545 return -ENOMEM;
1546
1547
1548 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1549
1550
1551
1552
1553 WARN_ON(1);
1554 kfree(reg_elem);
1555 return -EINVAL;
1556 }
1557
1558 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1559
1560
1561 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u,
1562 sizeof(reg_elem->u));
1563
1564
1565 reg_elem->vlan_mac_flags =
1566 elem->cmd_data.vlan_mac.vlan_mac_flags;
1567 } else
1568 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1569
1570 *re = reg_elem;
1571 return 0;
1572}
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1585 union bnx2x_qable_obj *qo,
1586 struct list_head *exe_chunk,
1587 unsigned long *ramrod_flags)
1588{
1589 struct bnx2x_exeq_elem *elem;
1590 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1591 struct bnx2x_raw_obj *r = &o->raw;
1592 int rc, idx = 0;
1593 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1594 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1595 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1596 enum bnx2x_vlan_mac_cmd cmd;
1597
1598
1599
1600
1601
1602 if (!drv_only) {
1603 WARN_ON(r->check_pending(r));
1604
1605
1606 r->set_pending(r);
1607
1608
1609 list_for_each_entry(elem, exe_chunk, link) {
1610 cmd = elem->cmd_data.vlan_mac.cmd;
1611
1612
1613
1614
1615 if (cmd == BNX2X_VLAN_MAC_MOVE)
1616 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1617 else
1618 cam_obj = o;
1619
1620 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1621 elem, restore,
1622 ®_elem);
1623 if (rc)
1624 goto error_exit;
1625
1626 WARN_ON(!reg_elem);
1627
1628
1629 if (!restore &&
1630 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1631 (cmd == BNX2X_VLAN_MAC_MOVE)))
1632 list_add(®_elem->link, &cam_obj->head);
1633
1634
1635 o->set_one_rule(bp, o, elem, idx,
1636 reg_elem->cam_offset);
1637
1638
1639 if (cmd == BNX2X_VLAN_MAC_MOVE)
1640 idx += 2;
1641 else
1642 idx++;
1643 }
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1654 U64_HI(r->rdata_mapping),
1655 U64_LO(r->rdata_mapping),
1656 ETH_CONNECTION_TYPE);
1657 if (rc)
1658 goto error_exit;
1659 }
1660
1661
1662 list_for_each_entry(elem, exe_chunk, link) {
1663 cmd = elem->cmd_data.vlan_mac.cmd;
1664 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1665 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1666 reg_elem = o->check_del(bp, o,
1667 &elem->cmd_data.vlan_mac.u);
1668
1669 WARN_ON(!reg_elem);
1670
1671 o->put_cam_offset(o, reg_elem->cam_offset);
1672 list_del(®_elem->link);
1673 kfree(reg_elem);
1674 }
1675 }
1676
1677 if (!drv_only)
1678 return 1;
1679 else
1680 return 0;
1681
1682error_exit:
1683 r->clear_pending(r);
1684
1685
1686 list_for_each_entry(elem, exe_chunk, link) {
1687 cmd = elem->cmd_data.vlan_mac.cmd;
1688
1689 if (cmd == BNX2X_VLAN_MAC_MOVE)
1690 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1691 else
1692 cam_obj = o;
1693
1694
1695 if (!restore &&
1696 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1697 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1698 reg_elem = o->check_del(bp, cam_obj,
1699 &elem->cmd_data.vlan_mac.u);
1700 if (reg_elem) {
1701 list_del(®_elem->link);
1702 kfree(reg_elem);
1703 }
1704 }
1705 }
1706
1707 return rc;
1708}
1709
1710static inline int bnx2x_vlan_mac_push_new_cmd(
1711 struct bnx2x *bp,
1712 struct bnx2x_vlan_mac_ramrod_params *p)
1713{
1714 struct bnx2x_exeq_elem *elem;
1715 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1716 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1717
1718
1719 elem = bnx2x_exe_queue_alloc_elem(bp);
1720 if (!elem)
1721 return -ENOMEM;
1722
1723
1724 switch (p->user_req.cmd) {
1725 case BNX2X_VLAN_MAC_MOVE:
1726 elem->cmd_len = 2;
1727 break;
1728 default:
1729 elem->cmd_len = 1;
1730 }
1731
1732
1733 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1734
1735
1736 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746int bnx2x_config_vlan_mac(
1747 struct bnx2x *bp,
1748 struct bnx2x_vlan_mac_ramrod_params *p)
1749{
1750 int rc = 0;
1751 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1752 unsigned long *ramrod_flags = &p->ramrod_flags;
1753 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1754 struct bnx2x_raw_obj *raw = &o->raw;
1755
1756
1757
1758
1759 if (!cont) {
1760 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1761 if (rc)
1762 return rc;
1763 }
1764
1765
1766
1767
1768
1769 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1770 rc = 1;
1771
1772 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1773 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
1774 raw->clear_pending(raw);
1775 }
1776
1777
1778 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1779 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1780 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1781 if (rc < 0)
1782 return rc;
1783 }
1784
1785
1786
1787
1788
1789 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1790
1791
1792
1793
1794 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1795
1796 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1797 max_iterations--) {
1798
1799
1800 rc = raw->wait_comp(bp, raw);
1801 if (rc)
1802 return rc;
1803
1804
1805 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1806 ramrod_flags);
1807 if (rc < 0)
1808 return rc;
1809 }
1810
1811 return 0;
1812 }
1813
1814 return rc;
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1833 struct bnx2x_vlan_mac_obj *o,
1834 unsigned long *vlan_mac_flags,
1835 unsigned long *ramrod_flags)
1836{
1837 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1838 int rc = 0;
1839 struct bnx2x_vlan_mac_ramrod_params p;
1840 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1841 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1842
1843
1844
1845 spin_lock_bh(&exeq->lock);
1846
1847 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1848 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1849 *vlan_mac_flags) {
1850 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1851 if (rc) {
1852 BNX2X_ERR("Failed to remove command\n");
1853 spin_unlock_bh(&exeq->lock);
1854 return rc;
1855 }
1856 list_del(&exeq_pos->link);
1857 }
1858 }
1859
1860 spin_unlock_bh(&exeq->lock);
1861
1862
1863 memset(&p, 0, sizeof(p));
1864 p.vlan_mac_obj = o;
1865 p.ramrod_flags = *ramrod_flags;
1866 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1867
1868
1869
1870
1871
1872 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1873 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1874 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1875
1876 list_for_each_entry(pos, &o->head, link) {
1877 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1878 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1879 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1880 rc = bnx2x_config_vlan_mac(bp, &p);
1881 if (rc < 0) {
1882 BNX2X_ERR("Failed to add a new DEL command\n");
1883 return rc;
1884 }
1885 }
1886 }
1887
1888 p.ramrod_flags = *ramrod_flags;
1889 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1890
1891 return bnx2x_config_vlan_mac(bp, &p);
1892}
1893
1894static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1895 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1896 unsigned long *pstate, bnx2x_obj_type type)
1897{
1898 raw->func_id = func_id;
1899 raw->cid = cid;
1900 raw->cl_id = cl_id;
1901 raw->rdata = rdata;
1902 raw->rdata_mapping = rdata_mapping;
1903 raw->state = state;
1904 raw->pstate = pstate;
1905 raw->obj_type = type;
1906 raw->check_pending = bnx2x_raw_check_pending;
1907 raw->clear_pending = bnx2x_raw_clear_pending;
1908 raw->set_pending = bnx2x_raw_set_pending;
1909 raw->wait_comp = bnx2x_raw_wait;
1910}
1911
1912static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1913 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1914 int state, unsigned long *pstate, bnx2x_obj_type type,
1915 struct bnx2x_credit_pool_obj *macs_pool,
1916 struct bnx2x_credit_pool_obj *vlans_pool)
1917{
1918 INIT_LIST_HEAD(&o->head);
1919
1920 o->macs_pool = macs_pool;
1921 o->vlans_pool = vlans_pool;
1922
1923 o->delete_all = bnx2x_vlan_mac_del_all;
1924 o->restore = bnx2x_vlan_mac_restore;
1925 o->complete = bnx2x_complete_vlan_mac;
1926 o->wait = bnx2x_wait_vlan_mac;
1927
1928 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1929 state, pstate, type);
1930}
1931
1932
1933void bnx2x_init_mac_obj(struct bnx2x *bp,
1934 struct bnx2x_vlan_mac_obj *mac_obj,
1935 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1936 dma_addr_t rdata_mapping, int state,
1937 unsigned long *pstate, bnx2x_obj_type type,
1938 struct bnx2x_credit_pool_obj *macs_pool)
1939{
1940 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1941
1942 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1943 rdata_mapping, state, pstate, type,
1944 macs_pool, NULL);
1945
1946
1947 mac_obj->get_credit = bnx2x_get_credit_mac;
1948 mac_obj->put_credit = bnx2x_put_credit_mac;
1949 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1950 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1951
1952 if (CHIP_IS_E1x(bp)) {
1953 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1954 mac_obj->check_del = bnx2x_check_mac_del;
1955 mac_obj->check_add = bnx2x_check_mac_add;
1956 mac_obj->check_move = bnx2x_check_move_always_err;
1957 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1958
1959
1960 bnx2x_exe_queue_init(bp,
1961 &mac_obj->exe_queue, 1, qable_obj,
1962 bnx2x_validate_vlan_mac,
1963 bnx2x_remove_vlan_mac,
1964 bnx2x_optimize_vlan_mac,
1965 bnx2x_execute_vlan_mac,
1966 bnx2x_exeq_get_mac);
1967 } else {
1968 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1969 mac_obj->check_del = bnx2x_check_mac_del;
1970 mac_obj->check_add = bnx2x_check_mac_add;
1971 mac_obj->check_move = bnx2x_check_move;
1972 mac_obj->ramrod_cmd =
1973 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1974 mac_obj->get_n_elements = bnx2x_get_n_elements;
1975
1976
1977 bnx2x_exe_queue_init(bp,
1978 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1979 qable_obj, bnx2x_validate_vlan_mac,
1980 bnx2x_remove_vlan_mac,
1981 bnx2x_optimize_vlan_mac,
1982 bnx2x_execute_vlan_mac,
1983 bnx2x_exeq_get_mac);
1984 }
1985}
1986
1987void bnx2x_init_vlan_obj(struct bnx2x *bp,
1988 struct bnx2x_vlan_mac_obj *vlan_obj,
1989 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1990 dma_addr_t rdata_mapping, int state,
1991 unsigned long *pstate, bnx2x_obj_type type,
1992 struct bnx2x_credit_pool_obj *vlans_pool)
1993{
1994 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1995
1996 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1997 rdata_mapping, state, pstate, type, NULL,
1998 vlans_pool);
1999
2000 vlan_obj->get_credit = bnx2x_get_credit_vlan;
2001 vlan_obj->put_credit = bnx2x_put_credit_vlan;
2002 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
2003 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
2004
2005 if (CHIP_IS_E1x(bp)) {
2006 BNX2X_ERR("Do not support chips others than E2 and newer\n");
2007 BUG();
2008 } else {
2009 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
2010 vlan_obj->check_del = bnx2x_check_vlan_del;
2011 vlan_obj->check_add = bnx2x_check_vlan_add;
2012 vlan_obj->check_move = bnx2x_check_move;
2013 vlan_obj->ramrod_cmd =
2014 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2015
2016
2017 bnx2x_exe_queue_init(bp,
2018 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2019 qable_obj, bnx2x_validate_vlan_mac,
2020 bnx2x_remove_vlan_mac,
2021 bnx2x_optimize_vlan_mac,
2022 bnx2x_execute_vlan_mac,
2023 bnx2x_exeq_get_vlan);
2024 }
2025}
2026
2027void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2028 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2029 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2030 dma_addr_t rdata_mapping, int state,
2031 unsigned long *pstate, bnx2x_obj_type type,
2032 struct bnx2x_credit_pool_obj *macs_pool,
2033 struct bnx2x_credit_pool_obj *vlans_pool)
2034{
2035 union bnx2x_qable_obj *qable_obj =
2036 (union bnx2x_qable_obj *)vlan_mac_obj;
2037
2038 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2039 rdata_mapping, state, pstate, type,
2040 macs_pool, vlans_pool);
2041
2042
2043 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2044 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2045
2046
2047
2048
2049
2050 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2051 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2052
2053 if (CHIP_IS_E1(bp)) {
2054 BNX2X_ERR("Do not support chips others than E2\n");
2055 BUG();
2056 } else if (CHIP_IS_E1H(bp)) {
2057 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2058 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2059 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2060 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2061 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2062
2063
2064 bnx2x_exe_queue_init(bp,
2065 &vlan_mac_obj->exe_queue, 1, qable_obj,
2066 bnx2x_validate_vlan_mac,
2067 bnx2x_remove_vlan_mac,
2068 bnx2x_optimize_vlan_mac,
2069 bnx2x_execute_vlan_mac,
2070 bnx2x_exeq_get_vlan_mac);
2071 } else {
2072 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2073 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2074 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2075 vlan_mac_obj->check_move = bnx2x_check_move;
2076 vlan_mac_obj->ramrod_cmd =
2077 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2078
2079
2080 bnx2x_exe_queue_init(bp,
2081 &vlan_mac_obj->exe_queue,
2082 CLASSIFY_RULES_COUNT,
2083 qable_obj, bnx2x_validate_vlan_mac,
2084 bnx2x_remove_vlan_mac,
2085 bnx2x_optimize_vlan_mac,
2086 bnx2x_execute_vlan_mac,
2087 bnx2x_exeq_get_vlan_mac);
2088 }
2089
2090}
2091
2092
2093static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2094 struct tstorm_eth_mac_filter_config *mac_filters,
2095 u16 pf_id)
2096{
2097 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2098
2099 u32 addr = BAR_TSTRORM_INTMEM +
2100 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2101
2102 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2103}
2104
2105static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2106 struct bnx2x_rx_mode_ramrod_params *p)
2107{
2108
2109 u32 mask = (1 << p->cl_id);
2110
2111 struct tstorm_eth_mac_filter_config *mac_filters =
2112 (struct tstorm_eth_mac_filter_config *)p->rdata;
2113
2114
2115 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2116 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2117 u8 unmatched_unicast = 0;
2118
2119
2120
2121 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2122
2123 drop_all_ucast = 0;
2124
2125 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2126
2127 drop_all_mcast = 0;
2128
2129 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2130
2131 drop_all_ucast = 0;
2132 accp_all_ucast = 1;
2133 }
2134 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2135
2136 drop_all_mcast = 0;
2137 accp_all_mcast = 1;
2138 }
2139 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2140
2141 accp_all_bcast = 1;
2142 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2143
2144 unmatched_unicast = 1;
2145
2146 mac_filters->ucast_drop_all = drop_all_ucast ?
2147 mac_filters->ucast_drop_all | mask :
2148 mac_filters->ucast_drop_all & ~mask;
2149
2150 mac_filters->mcast_drop_all = drop_all_mcast ?
2151 mac_filters->mcast_drop_all | mask :
2152 mac_filters->mcast_drop_all & ~mask;
2153
2154 mac_filters->ucast_accept_all = accp_all_ucast ?
2155 mac_filters->ucast_accept_all | mask :
2156 mac_filters->ucast_accept_all & ~mask;
2157
2158 mac_filters->mcast_accept_all = accp_all_mcast ?
2159 mac_filters->mcast_accept_all | mask :
2160 mac_filters->mcast_accept_all & ~mask;
2161
2162 mac_filters->bcast_accept_all = accp_all_bcast ?
2163 mac_filters->bcast_accept_all | mask :
2164 mac_filters->bcast_accept_all & ~mask;
2165
2166 mac_filters->unmatched_unicast = unmatched_unicast ?
2167 mac_filters->unmatched_unicast | mask :
2168 mac_filters->unmatched_unicast & ~mask;
2169
2170 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2171 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2172 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2173 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2174 mac_filters->bcast_accept_all);
2175
2176
2177 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2178
2179
2180 clear_bit(p->state, p->pstate);
2181 smp_mb__after_clear_bit();
2182
2183 return 0;
2184}
2185
2186
2187static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2188 struct eth_classify_header *hdr,
2189 u8 rule_cnt)
2190{
2191 hdr->echo = cpu_to_le32(cid);
2192 hdr->rule_cnt = rule_cnt;
2193}
2194
2195static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2196 unsigned long *accept_flags,
2197 struct eth_filter_rules_cmd *cmd,
2198 bool clear_accept_all)
2199{
2200 u16 state;
2201
2202
2203 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2204 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2205
2206 if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
2207 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2208
2209 if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
2210 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2211
2212 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
2213 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2214 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2215 }
2216
2217 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
2218 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2219 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2220 }
2221
2222 if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
2223 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2224
2225 if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
2226 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2227 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2228 }
2229
2230 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
2231 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2232
2233
2234 if (clear_accept_all) {
2235 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2236 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2237 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2238 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2239 }
2240
2241 cmd->state = cpu_to_le16(state);
2242
2243}
2244
2245static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2246 struct bnx2x_rx_mode_ramrod_params *p)
2247{
2248 struct eth_filter_rules_ramrod_data *data = p->rdata;
2249 int rc;
2250 u8 rule_idx = 0;
2251
2252
2253 memset(data, 0, sizeof(*data));
2254
2255
2256
2257
2258 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2259 data->rules[rule_idx].client_id = p->cl_id;
2260 data->rules[rule_idx].func_id = p->func_id;
2261
2262 data->rules[rule_idx].cmd_general_data =
2263 ETH_FILTER_RULES_CMD_TX_CMD;
2264
2265 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2266 &(data->rules[rule_idx++]),
2267 false);
2268 }
2269
2270
2271 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2272 data->rules[rule_idx].client_id = p->cl_id;
2273 data->rules[rule_idx].func_id = p->func_id;
2274
2275 data->rules[rule_idx].cmd_general_data =
2276 ETH_FILTER_RULES_CMD_RX_CMD;
2277
2278 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2279 &(data->rules[rule_idx++]),
2280 false);
2281 }
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2292
2293 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2294 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2295 data->rules[rule_idx].func_id = p->func_id;
2296
2297 data->rules[rule_idx].cmd_general_data =
2298 ETH_FILTER_RULES_CMD_TX_CMD;
2299
2300 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
2301 &(data->rules[rule_idx]),
2302 true);
2303 rule_idx++;
2304 }
2305
2306
2307 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2308 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2309 data->rules[rule_idx].func_id = p->func_id;
2310
2311 data->rules[rule_idx].cmd_general_data =
2312 ETH_FILTER_RULES_CMD_RX_CMD;
2313
2314 bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
2315 &(data->rules[rule_idx]),
2316 true);
2317 rule_idx++;
2318 }
2319 }
2320
2321
2322
2323
2324
2325 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2326
2327 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2328 data->header.rule_cnt, p->rx_accept_flags,
2329 p->tx_accept_flags);
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2341 U64_HI(p->rdata_mapping),
2342 U64_LO(p->rdata_mapping),
2343 ETH_CONNECTION_TYPE);
2344 if (rc)
2345 return rc;
2346
2347
2348 return 1;
2349}
2350
2351static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2352 struct bnx2x_rx_mode_ramrod_params *p)
2353{
2354 return bnx2x_state_wait(bp, p->state, p->pstate);
2355}
2356
2357static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2358 struct bnx2x_rx_mode_ramrod_params *p)
2359{
2360
2361 return 0;
2362}
2363
2364int bnx2x_config_rx_mode(struct bnx2x *bp,
2365 struct bnx2x_rx_mode_ramrod_params *p)
2366{
2367 int rc;
2368
2369
2370 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2371 if (rc < 0)
2372 return rc;
2373
2374
2375 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2376 rc = p->rx_mode_obj->wait_comp(bp, p);
2377 if (rc)
2378 return rc;
2379 }
2380
2381 return rc;
2382}
2383
2384void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2385 struct bnx2x_rx_mode_obj *o)
2386{
2387 if (CHIP_IS_E1x(bp)) {
2388 o->wait_comp = bnx2x_empty_rx_mode_wait;
2389 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2390 } else {
2391 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2392 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2393 }
2394}
2395
2396
2397static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2398{
2399 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2400}
2401
2402struct bnx2x_mcast_mac_elem {
2403 struct list_head link;
2404 u8 mac[ETH_ALEN];
2405 u8 pad[2];
2406};
2407
2408struct bnx2x_pending_mcast_cmd {
2409 struct list_head link;
2410 int type;
2411 union {
2412 struct list_head macs_head;
2413 u32 macs_num;
2414 int next_bin;
2415 } data;
2416
2417 bool done;
2418
2419
2420
2421
2422
2423};
2424
2425static int bnx2x_mcast_wait(struct bnx2x *bp,
2426 struct bnx2x_mcast_obj *o)
2427{
2428 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2429 o->raw.wait_comp(bp, &o->raw))
2430 return -EBUSY;
2431
2432 return 0;
2433}
2434
2435static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2436 struct bnx2x_mcast_obj *o,
2437 struct bnx2x_mcast_ramrod_params *p,
2438 enum bnx2x_mcast_cmd cmd)
2439{
2440 int total_sz;
2441 struct bnx2x_pending_mcast_cmd *new_cmd;
2442 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2443 struct bnx2x_mcast_list_elem *pos;
2444 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2445 p->mcast_list_len : 0);
2446
2447
2448 if (!p->mcast_list_len)
2449 return 0;
2450
2451 total_sz = sizeof(*new_cmd) +
2452 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2453
2454
2455 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2456
2457 if (!new_cmd)
2458 return -ENOMEM;
2459
2460 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
2461 cmd, macs_list_len);
2462
2463 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2464
2465 new_cmd->type = cmd;
2466 new_cmd->done = false;
2467
2468 switch (cmd) {
2469 case BNX2X_MCAST_CMD_ADD:
2470 cur_mac = (struct bnx2x_mcast_mac_elem *)
2471 ((u8 *)new_cmd + sizeof(*new_cmd));
2472
2473
2474
2475
2476 list_for_each_entry(pos, &p->mcast_list, link) {
2477 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2478 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2479 cur_mac++;
2480 }
2481
2482 break;
2483
2484 case BNX2X_MCAST_CMD_DEL:
2485 new_cmd->data.macs_num = p->mcast_list_len;
2486 break;
2487
2488 case BNX2X_MCAST_CMD_RESTORE:
2489 new_cmd->data.next_bin = 0;
2490 break;
2491
2492 default:
2493 kfree(new_cmd);
2494 BNX2X_ERR("Unknown command: %d\n", cmd);
2495 return -EINVAL;
2496 }
2497
2498
2499 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2500
2501 o->set_sched(o);
2502
2503 return 1;
2504}
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2515{
2516 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2517
2518 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2519 if (o->registry.aprox_match.vec[i])
2520 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2521 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2522 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2523 vec, cur_bit)) {
2524 return cur_bit;
2525 }
2526 }
2527 inner_start = 0;
2528 }
2529
2530
2531 return -1;
2532}
2533
2534
2535
2536
2537
2538
2539
2540
2541static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2542{
2543 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2544
2545 if (cur_bit >= 0)
2546 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2547
2548 return cur_bit;
2549}
2550
2551static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2552{
2553 struct bnx2x_raw_obj *raw = &o->raw;
2554 u8 rx_tx_flag = 0;
2555
2556 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2557 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2558 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2559
2560 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2561 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2562 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2563
2564 return rx_tx_flag;
2565}
2566
2567static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2568 struct bnx2x_mcast_obj *o, int idx,
2569 union bnx2x_mcast_config_data *cfg_data,
2570 enum bnx2x_mcast_cmd cmd)
2571{
2572 struct bnx2x_raw_obj *r = &o->raw;
2573 struct eth_multicast_rules_ramrod_data *data =
2574 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2575 u8 func_id = r->func_id;
2576 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2577 int bin;
2578
2579 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2580 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2581
2582 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2583
2584
2585 switch (cmd) {
2586 case BNX2X_MCAST_CMD_ADD:
2587 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2588 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2589 break;
2590
2591 case BNX2X_MCAST_CMD_DEL:
2592
2593
2594
2595
2596
2597
2598 bin = bnx2x_mcast_clear_first_bin(o);
2599 break;
2600
2601 case BNX2X_MCAST_CMD_RESTORE:
2602 bin = cfg_data->bin;
2603 break;
2604
2605 default:
2606 BNX2X_ERR("Unknown command: %d\n", cmd);
2607 return;
2608 }
2609
2610 DP(BNX2X_MSG_SP, "%s bin %d\n",
2611 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2612 "Setting" : "Clearing"), bin);
2613
2614 data->rules[idx].bin_id = (u8)bin;
2615 data->rules[idx].func_id = func_id;
2616 data->rules[idx].engine_id = o->engine_id;
2617}
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629static inline int bnx2x_mcast_handle_restore_cmd_e2(
2630 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2631 int *rdata_idx)
2632{
2633 int cur_bin, cnt = *rdata_idx;
2634 union bnx2x_mcast_config_data cfg_data = {NULL};
2635
2636
2637 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2638 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2639
2640 cfg_data.bin = (u8)cur_bin;
2641 o->set_one_rule(bp, o, cnt, &cfg_data,
2642 BNX2X_MCAST_CMD_RESTORE);
2643
2644 cnt++;
2645
2646 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2647
2648
2649
2650
2651 if (cnt >= o->max_cmd_len)
2652 break;
2653 }
2654
2655 *rdata_idx = cnt;
2656
2657 return cur_bin;
2658}
2659
2660static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2661 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2662 int *line_idx)
2663{
2664 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2665 int cnt = *line_idx;
2666 union bnx2x_mcast_config_data cfg_data = {NULL};
2667
2668 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2669 link) {
2670
2671 cfg_data.mac = &pmac_pos->mac[0];
2672 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2673
2674 cnt++;
2675
2676 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2677 pmac_pos->mac);
2678
2679 list_del(&pmac_pos->link);
2680
2681
2682
2683
2684 if (cnt >= o->max_cmd_len)
2685 break;
2686 }
2687
2688 *line_idx = cnt;
2689
2690
2691 if (list_empty(&cmd_pos->data.macs_head))
2692 cmd_pos->done = true;
2693}
2694
2695static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2696 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2697 int *line_idx)
2698{
2699 int cnt = *line_idx;
2700
2701 while (cmd_pos->data.macs_num) {
2702 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2703
2704 cnt++;
2705
2706 cmd_pos->data.macs_num--;
2707
2708 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2709 cmd_pos->data.macs_num, cnt);
2710
2711
2712
2713
2714 if (cnt >= o->max_cmd_len)
2715 break;
2716 }
2717
2718 *line_idx = cnt;
2719
2720
2721 if (!cmd_pos->data.macs_num)
2722 cmd_pos->done = true;
2723}
2724
2725static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2726 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2727 int *line_idx)
2728{
2729 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2730 line_idx);
2731
2732 if (cmd_pos->data.next_bin < 0)
2733
2734 cmd_pos->done = true;
2735 else
2736
2737 cmd_pos->data.next_bin++;
2738}
2739
2740static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2741 struct bnx2x_mcast_ramrod_params *p)
2742{
2743 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2744 int cnt = 0;
2745 struct bnx2x_mcast_obj *o = p->mcast_obj;
2746
2747 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2748 link) {
2749 switch (cmd_pos->type) {
2750 case BNX2X_MCAST_CMD_ADD:
2751 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2752 break;
2753
2754 case BNX2X_MCAST_CMD_DEL:
2755 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2756 break;
2757
2758 case BNX2X_MCAST_CMD_RESTORE:
2759 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2760 &cnt);
2761 break;
2762
2763 default:
2764 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2765 return -EINVAL;
2766 }
2767
2768
2769
2770
2771 if (cmd_pos->done) {
2772 list_del(&cmd_pos->link);
2773 kfree(cmd_pos);
2774 }
2775
2776
2777 if (cnt >= o->max_cmd_len)
2778 break;
2779 }
2780
2781 return cnt;
2782}
2783
2784static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2785 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2786 int *line_idx)
2787{
2788 struct bnx2x_mcast_list_elem *mlist_pos;
2789 union bnx2x_mcast_config_data cfg_data = {NULL};
2790 int cnt = *line_idx;
2791
2792 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2793 cfg_data.mac = mlist_pos->mac;
2794 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2795
2796 cnt++;
2797
2798 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2799 mlist_pos->mac);
2800 }
2801
2802 *line_idx = cnt;
2803}
2804
2805static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2806 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2807 int *line_idx)
2808{
2809 int cnt = *line_idx, i;
2810
2811 for (i = 0; i < p->mcast_list_len; i++) {
2812 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2813
2814 cnt++;
2815
2816 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2817 p->mcast_list_len - i - 1);
2818 }
2819
2820 *line_idx = cnt;
2821}
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2836 struct bnx2x_mcast_ramrod_params *p,
2837 enum bnx2x_mcast_cmd cmd,
2838 int start_cnt)
2839{
2840 struct bnx2x_mcast_obj *o = p->mcast_obj;
2841 int cnt = start_cnt;
2842
2843 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2844
2845 switch (cmd) {
2846 case BNX2X_MCAST_CMD_ADD:
2847 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2848 break;
2849
2850 case BNX2X_MCAST_CMD_DEL:
2851 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2852 break;
2853
2854 case BNX2X_MCAST_CMD_RESTORE:
2855 o->hdl_restore(bp, o, 0, &cnt);
2856 break;
2857
2858 default:
2859 BNX2X_ERR("Unknown command: %d\n", cmd);
2860 return -EINVAL;
2861 }
2862
2863
2864 p->mcast_list_len = 0;
2865
2866 return cnt;
2867}
2868
2869static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2870 struct bnx2x_mcast_ramrod_params *p,
2871 enum bnx2x_mcast_cmd cmd)
2872{
2873 struct bnx2x_mcast_obj *o = p->mcast_obj;
2874 int reg_sz = o->get_registry_size(o);
2875
2876 switch (cmd) {
2877
2878 case BNX2X_MCAST_CMD_DEL:
2879 o->set_registry_size(o, 0);
2880
2881
2882
2883 case BNX2X_MCAST_CMD_RESTORE:
2884
2885
2886
2887
2888
2889
2890
2891 p->mcast_list_len = reg_sz;
2892 break;
2893
2894 case BNX2X_MCAST_CMD_ADD:
2895 case BNX2X_MCAST_CMD_CONT:
2896
2897
2898
2899
2900 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2901 break;
2902
2903 default:
2904 BNX2X_ERR("Unknown command: %d\n", cmd);
2905 return -EINVAL;
2906
2907 }
2908
2909
2910 o->total_pending_num += p->mcast_list_len;
2911
2912 return 0;
2913}
2914
2915static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2916 struct bnx2x_mcast_ramrod_params *p,
2917 int old_num_bins)
2918{
2919 struct bnx2x_mcast_obj *o = p->mcast_obj;
2920
2921 o->set_registry_size(o, old_num_bins);
2922 o->total_pending_num -= p->mcast_list_len;
2923}
2924
2925
2926
2927
2928
2929
2930
2931
2932static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2933 struct bnx2x_mcast_ramrod_params *p,
2934 u8 len)
2935{
2936 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2937 struct eth_multicast_rules_ramrod_data *data =
2938 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2939
2940 data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
2941 (BNX2X_FILTER_MCAST_PENDING <<
2942 BNX2X_SWCID_SHIFT));
2943 data->header.rule_cnt = len;
2944}
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2958 struct bnx2x_mcast_obj *o)
2959{
2960 int i, cnt = 0;
2961 u64 elem;
2962
2963 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2964 elem = o->registry.aprox_match.vec[i];
2965 for (; elem; cnt++)
2966 elem &= elem - 1;
2967 }
2968
2969 o->set_registry_size(o, cnt);
2970
2971 return 0;
2972}
2973
2974static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2975 struct bnx2x_mcast_ramrod_params *p,
2976 enum bnx2x_mcast_cmd cmd)
2977{
2978 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2979 struct bnx2x_mcast_obj *o = p->mcast_obj;
2980 struct eth_multicast_rules_ramrod_data *data =
2981 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2982 int cnt = 0, rc;
2983
2984
2985 memset(data, 0, sizeof(*data));
2986
2987 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2988
2989
2990 if (list_empty(&o->pending_cmds_head))
2991 o->clear_sched(o);
2992
2993
2994
2995
2996
2997
2998
2999 if (p->mcast_list_len > 0)
3000 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
3001
3002
3003
3004
3005 o->total_pending_num -= cnt;
3006
3007
3008 WARN_ON(o->total_pending_num < 0);
3009 WARN_ON(cnt > o->max_cmd_len);
3010
3011 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028 if (!o->total_pending_num)
3029 bnx2x_mcast_refresh_registry_e2(bp, o);
3030
3031
3032
3033
3034
3035 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3036 raw->clear_pending(raw);
3037 return 0;
3038 } else {
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3049 raw->cid, U64_HI(raw->rdata_mapping),
3050 U64_LO(raw->rdata_mapping),
3051 ETH_CONNECTION_TYPE);
3052 if (rc)
3053 return rc;
3054
3055
3056 return 1;
3057 }
3058}
3059
3060static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3061 struct bnx2x_mcast_ramrod_params *p,
3062 enum bnx2x_mcast_cmd cmd)
3063{
3064
3065 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3066 p->mcast_list_len = 1;
3067
3068 return 0;
3069}
3070
3071static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3072 struct bnx2x_mcast_ramrod_params *p,
3073 int old_num_bins)
3074{
3075
3076}
3077
3078#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3079do { \
3080 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3081} while (0)
3082
3083static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3084 struct bnx2x_mcast_obj *o,
3085 struct bnx2x_mcast_ramrod_params *p,
3086 u32 *mc_filter)
3087{
3088 struct bnx2x_mcast_list_elem *mlist_pos;
3089 int bit;
3090
3091 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3092 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3093 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3094
3095 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3096 mlist_pos->mac, bit);
3097
3098
3099 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3100 bit);
3101 }
3102}
3103
3104static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3105 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3106 u32 *mc_filter)
3107{
3108 int bit;
3109
3110 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3111 bit >= 0;
3112 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3113 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3114 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3115 }
3116}
3117
3118
3119
3120
3121
3122static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3123 struct bnx2x_mcast_ramrod_params *p,
3124 enum bnx2x_mcast_cmd cmd)
3125{
3126 int i;
3127 struct bnx2x_mcast_obj *o = p->mcast_obj;
3128 struct bnx2x_raw_obj *r = &o->raw;
3129
3130
3131
3132
3133 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3134 u32 mc_filter[MC_HASH_SIZE] = {0};
3135
3136
3137
3138
3139 switch (cmd) {
3140 case BNX2X_MCAST_CMD_ADD:
3141 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3142 break;
3143
3144 case BNX2X_MCAST_CMD_DEL:
3145 DP(BNX2X_MSG_SP,
3146 "Invalidating multicast MACs configuration\n");
3147
3148
3149 memset(o->registry.aprox_match.vec, 0,
3150 sizeof(o->registry.aprox_match.vec));
3151 break;
3152
3153 case BNX2X_MCAST_CMD_RESTORE:
3154 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3155 break;
3156
3157 default:
3158 BNX2X_ERR("Unknown command: %d\n", cmd);
3159 return -EINVAL;
3160 }
3161
3162
3163 for (i = 0; i < MC_HASH_SIZE; i++)
3164 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3165 } else
3166
3167 memset(o->registry.aprox_match.vec, 0,
3168 sizeof(o->registry.aprox_match.vec));
3169
3170
3171 r->clear_pending(r);
3172
3173 return 0;
3174}
3175
3176static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3177 struct bnx2x_mcast_ramrod_params *p,
3178 enum bnx2x_mcast_cmd cmd)
3179{
3180 struct bnx2x_mcast_obj *o = p->mcast_obj;
3181 int reg_sz = o->get_registry_size(o);
3182
3183 switch (cmd) {
3184
3185 case BNX2X_MCAST_CMD_DEL:
3186 o->set_registry_size(o, 0);
3187
3188
3189
3190 case BNX2X_MCAST_CMD_RESTORE:
3191 p->mcast_list_len = reg_sz;
3192 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3193 cmd, p->mcast_list_len);
3194 break;
3195
3196 case BNX2X_MCAST_CMD_ADD:
3197 case BNX2X_MCAST_CMD_CONT:
3198
3199
3200
3201
3202 if (p->mcast_list_len > o->max_cmd_len) {
3203 BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
3204 o->max_cmd_len);
3205 return -EINVAL;
3206 }
3207
3208
3209
3210
3211 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3212 if (p->mcast_list_len > 0)
3213 o->set_registry_size(o, p->mcast_list_len);
3214
3215 break;
3216
3217 default:
3218 BNX2X_ERR("Unknown command: %d\n", cmd);
3219 return -EINVAL;
3220
3221 }
3222
3223
3224
3225
3226 if (p->mcast_list_len)
3227 o->total_pending_num += o->max_cmd_len;
3228
3229 return 0;
3230}
3231
3232static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3233 struct bnx2x_mcast_ramrod_params *p,
3234 int old_num_macs)
3235{
3236 struct bnx2x_mcast_obj *o = p->mcast_obj;
3237
3238 o->set_registry_size(o, old_num_macs);
3239
3240
3241
3242
3243
3244 if (p->mcast_list_len)
3245 o->total_pending_num -= o->max_cmd_len;
3246}
3247
3248static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3249 struct bnx2x_mcast_obj *o, int idx,
3250 union bnx2x_mcast_config_data *cfg_data,
3251 enum bnx2x_mcast_cmd cmd)
3252{
3253 struct bnx2x_raw_obj *r = &o->raw;
3254 struct mac_configuration_cmd *data =
3255 (struct mac_configuration_cmd *)(r->rdata);
3256
3257
3258 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3259 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3260 &data->config_table[idx].middle_mac_addr,
3261 &data->config_table[idx].lsb_mac_addr,
3262 cfg_data->mac);
3263
3264 data->config_table[idx].vlan_id = 0;
3265 data->config_table[idx].pf_id = r->func_id;
3266 data->config_table[idx].clients_bit_vector =
3267 cpu_to_le32(1 << r->cl_id);
3268
3269 SET_FLAG(data->config_table[idx].flags,
3270 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3271 T_ETH_MAC_COMMAND_SET);
3272 }
3273}
3274
3275
3276
3277
3278
3279
3280
3281
3282static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3283 struct bnx2x_mcast_ramrod_params *p,
3284 u8 len)
3285{
3286 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3287 struct mac_configuration_cmd *data =
3288 (struct mac_configuration_cmd *)(r->rdata);
3289
3290 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3291 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3292 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3293
3294 data->hdr.offset = offset;
3295 data->hdr.client_id = cpu_to_le16(0xff);
3296 data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
3297 (BNX2X_FILTER_MCAST_PENDING <<
3298 BNX2X_SWCID_SHIFT));
3299 data->hdr.length = len;
3300}
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315static inline int bnx2x_mcast_handle_restore_cmd_e1(
3316 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3317 int *rdata_idx)
3318{
3319 struct bnx2x_mcast_mac_elem *elem;
3320 int i = 0;
3321 union bnx2x_mcast_config_data cfg_data = {NULL};
3322
3323
3324 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3325 cfg_data.mac = &elem->mac[0];
3326 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3327
3328 i++;
3329
3330 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3331 cfg_data.mac);
3332 }
3333
3334 *rdata_idx = i;
3335
3336 return -1;
3337}
3338
3339
3340static inline int bnx2x_mcast_handle_pending_cmds_e1(
3341 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3342{
3343 struct bnx2x_pending_mcast_cmd *cmd_pos;
3344 struct bnx2x_mcast_mac_elem *pmac_pos;
3345 struct bnx2x_mcast_obj *o = p->mcast_obj;
3346 union bnx2x_mcast_config_data cfg_data = {NULL};
3347 int cnt = 0;
3348
3349
3350
3351 if (list_empty(&o->pending_cmds_head))
3352 return 0;
3353
3354
3355 cmd_pos = list_first_entry(&o->pending_cmds_head,
3356 struct bnx2x_pending_mcast_cmd, link);
3357
3358 switch (cmd_pos->type) {
3359 case BNX2X_MCAST_CMD_ADD:
3360 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3361 cfg_data.mac = &pmac_pos->mac[0];
3362 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3363
3364 cnt++;
3365
3366 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3367 pmac_pos->mac);
3368 }
3369 break;
3370
3371 case BNX2X_MCAST_CMD_DEL:
3372 cnt = cmd_pos->data.macs_num;
3373 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3374 break;
3375
3376 case BNX2X_MCAST_CMD_RESTORE:
3377 o->hdl_restore(bp, o, 0, &cnt);
3378 break;
3379
3380 default:
3381 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3382 return -EINVAL;
3383 }
3384
3385 list_del(&cmd_pos->link);
3386 kfree(cmd_pos);
3387
3388 return cnt;
3389}
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3400 __le16 *fw_lo, u8 *mac)
3401{
3402 mac[1] = ((u8 *)fw_hi)[0];
3403 mac[0] = ((u8 *)fw_hi)[1];
3404 mac[3] = ((u8 *)fw_mid)[0];
3405 mac[2] = ((u8 *)fw_mid)[1];
3406 mac[5] = ((u8 *)fw_lo)[0];
3407 mac[4] = ((u8 *)fw_lo)[1];
3408}
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3422 struct bnx2x_mcast_obj *o)
3423{
3424 struct bnx2x_raw_obj *raw = &o->raw;
3425 struct bnx2x_mcast_mac_elem *elem;
3426 struct mac_configuration_cmd *data =
3427 (struct mac_configuration_cmd *)(raw->rdata);
3428
3429
3430
3431
3432 if (GET_FLAG(data->config_table[0].flags,
3433 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3434 int i, len = data->hdr.length;
3435
3436
3437 if (!list_empty(&o->registry.exact_match.macs))
3438 return 0;
3439
3440 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
3441 if (!elem) {
3442 BNX2X_ERR("Failed to allocate registry memory\n");
3443 return -ENOMEM;
3444 }
3445
3446 for (i = 0; i < len; i++, elem++) {
3447 bnx2x_get_fw_mac_addr(
3448 &data->config_table[i].msb_mac_addr,
3449 &data->config_table[i].middle_mac_addr,
3450 &data->config_table[i].lsb_mac_addr,
3451 elem->mac);
3452 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3453 elem->mac);
3454 list_add_tail(&elem->link,
3455 &o->registry.exact_match.macs);
3456 }
3457 } else {
3458 elem = list_first_entry(&o->registry.exact_match.macs,
3459 struct bnx2x_mcast_mac_elem, link);
3460 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3461 kfree(elem);
3462 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3463 }
3464
3465 return 0;
3466}
3467
3468static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3469 struct bnx2x_mcast_ramrod_params *p,
3470 enum bnx2x_mcast_cmd cmd)
3471{
3472 struct bnx2x_mcast_obj *o = p->mcast_obj;
3473 struct bnx2x_raw_obj *raw = &o->raw;
3474 struct mac_configuration_cmd *data =
3475 (struct mac_configuration_cmd *)(raw->rdata);
3476 int cnt = 0, i, rc;
3477
3478
3479 memset(data, 0, sizeof(*data));
3480
3481
3482 for (i = 0; i < o->max_cmd_len ; i++)
3483 SET_FLAG(data->config_table[i].flags,
3484 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3485 T_ETH_MAC_COMMAND_INVALIDATE);
3486
3487
3488 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3489
3490
3491 if (list_empty(&o->pending_cmds_head))
3492 o->clear_sched(o);
3493
3494
3495 if (!cnt)
3496 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3497
3498
3499
3500
3501 o->total_pending_num -= o->max_cmd_len;
3502
3503
3504
3505 WARN_ON(cnt > o->max_cmd_len);
3506
3507
3508 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3509
3510
3511
3512
3513
3514
3515
3516 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3517 if (rc)
3518 return rc;
3519
3520
3521
3522
3523
3524 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3525 raw->clear_pending(raw);
3526 return 0;
3527 } else {
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3538 U64_HI(raw->rdata_mapping),
3539 U64_LO(raw->rdata_mapping),
3540 ETH_CONNECTION_TYPE);
3541 if (rc)
3542 return rc;
3543
3544
3545 return 1;
3546 }
3547
3548}
3549
3550static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3551{
3552 return o->registry.exact_match.num_macs_set;
3553}
3554
3555static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3556{
3557 return o->registry.aprox_match.num_bins_set;
3558}
3559
3560static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3561 int n)
3562{
3563 o->registry.exact_match.num_macs_set = n;
3564}
3565
3566static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3567 int n)
3568{
3569 o->registry.aprox_match.num_bins_set = n;
3570}
3571
3572int bnx2x_config_mcast(struct bnx2x *bp,
3573 struct bnx2x_mcast_ramrod_params *p,
3574 enum bnx2x_mcast_cmd cmd)
3575{
3576 struct bnx2x_mcast_obj *o = p->mcast_obj;
3577 struct bnx2x_raw_obj *r = &o->raw;
3578 int rc = 0, old_reg_size;
3579
3580
3581
3582
3583 old_reg_size = o->get_registry_size(o);
3584
3585
3586 rc = o->validate(bp, p, cmd);
3587 if (rc)
3588 return rc;
3589
3590
3591 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3592 return 0;
3593
3594 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3595 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3596
3597
3598
3599
3600 if (r->check_pending(r) ||
3601 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3602 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3603 if (rc < 0)
3604 goto error_exit1;
3605
3606
3607
3608
3609 p->mcast_list_len = 0;
3610 }
3611
3612 if (!r->check_pending(r)) {
3613
3614
3615 r->set_pending(r);
3616
3617
3618 rc = o->config_mcast(bp, p, cmd);
3619 if (rc < 0)
3620 goto error_exit2;
3621
3622
3623 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3624 rc = o->wait_comp(bp, o);
3625 }
3626
3627 return rc;
3628
3629error_exit2:
3630 r->clear_pending(r);
3631
3632error_exit1:
3633 o->revert(bp, p, old_reg_size);
3634
3635 return rc;
3636}
3637
3638static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3639{
3640 smp_mb__before_clear_bit();
3641 clear_bit(o->sched_state, o->raw.pstate);
3642 smp_mb__after_clear_bit();
3643}
3644
3645static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3646{
3647 smp_mb__before_clear_bit();
3648 set_bit(o->sched_state, o->raw.pstate);
3649 smp_mb__after_clear_bit();
3650}
3651
3652static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3653{
3654 return !!test_bit(o->sched_state, o->raw.pstate);
3655}
3656
3657static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3658{
3659 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3660}
3661
3662void bnx2x_init_mcast_obj(struct bnx2x *bp,
3663 struct bnx2x_mcast_obj *mcast_obj,
3664 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3665 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3666 int state, unsigned long *pstate, bnx2x_obj_type type)
3667{
3668 memset(mcast_obj, 0, sizeof(*mcast_obj));
3669
3670 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3671 rdata, rdata_mapping, state, pstate, type);
3672
3673 mcast_obj->engine_id = engine_id;
3674
3675 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3676
3677 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3678 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3679 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3680 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3681
3682 if (CHIP_IS_E1(bp)) {
3683 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3684 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3685 mcast_obj->hdl_restore =
3686 bnx2x_mcast_handle_restore_cmd_e1;
3687 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3688
3689 if (CHIP_REV_IS_SLOW(bp))
3690 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3691 else
3692 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3693
3694 mcast_obj->wait_comp = bnx2x_mcast_wait;
3695 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3696 mcast_obj->validate = bnx2x_mcast_validate_e1;
3697 mcast_obj->revert = bnx2x_mcast_revert_e1;
3698 mcast_obj->get_registry_size =
3699 bnx2x_mcast_get_registry_size_exact;
3700 mcast_obj->set_registry_size =
3701 bnx2x_mcast_set_registry_size_exact;
3702
3703
3704
3705
3706 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3707
3708 } else if (CHIP_IS_E1H(bp)) {
3709 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3710 mcast_obj->enqueue_cmd = NULL;
3711 mcast_obj->hdl_restore = NULL;
3712 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3713
3714
3715
3716
3717 mcast_obj->max_cmd_len = -1;
3718 mcast_obj->wait_comp = bnx2x_mcast_wait;
3719 mcast_obj->set_one_rule = NULL;
3720 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3721 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3722 mcast_obj->get_registry_size =
3723 bnx2x_mcast_get_registry_size_aprox;
3724 mcast_obj->set_registry_size =
3725 bnx2x_mcast_set_registry_size_aprox;
3726 } else {
3727 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3728 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3729 mcast_obj->hdl_restore =
3730 bnx2x_mcast_handle_restore_cmd_e2;
3731 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3732
3733
3734 mcast_obj->max_cmd_len = 16;
3735 mcast_obj->wait_comp = bnx2x_mcast_wait;
3736 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3737 mcast_obj->validate = bnx2x_mcast_validate_e2;
3738 mcast_obj->revert = bnx2x_mcast_revert_e2;
3739 mcast_obj->get_registry_size =
3740 bnx2x_mcast_get_registry_size_aprox;
3741 mcast_obj->set_registry_size =
3742 bnx2x_mcast_set_registry_size_aprox;
3743 }
3744}
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3759{
3760 int c, old;
3761
3762 c = atomic_read(v);
3763 for (;;) {
3764 if (unlikely(c + a >= u))
3765 return false;
3766
3767 old = atomic_cmpxchg((v), c, c + a);
3768 if (likely(old == c))
3769 break;
3770 c = old;
3771 }
3772
3773 return true;
3774}
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3787{
3788 int c, old;
3789
3790 c = atomic_read(v);
3791 for (;;) {
3792 if (unlikely(c - a < u))
3793 return false;
3794
3795 old = atomic_cmpxchg((v), c, c - a);
3796 if (likely(old == c))
3797 break;
3798 c = old;
3799 }
3800
3801 return true;
3802}
3803
3804static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3805{
3806 bool rc;
3807
3808 smp_mb();
3809 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3810 smp_mb();
3811
3812 return rc;
3813}
3814
3815static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3816{
3817 bool rc;
3818
3819 smp_mb();
3820
3821
3822 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3823
3824 smp_mb();
3825
3826 return rc;
3827}
3828
3829static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3830{
3831 int cur_credit;
3832
3833 smp_mb();
3834 cur_credit = atomic_read(&o->credit);
3835
3836 return cur_credit;
3837}
3838
3839static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3840 int cnt)
3841{
3842 return true;
3843}
3844
3845
3846static bool bnx2x_credit_pool_get_entry(
3847 struct bnx2x_credit_pool_obj *o,
3848 int *offset)
3849{
3850 int idx, vec, i;
3851
3852 *offset = -1;
3853
3854
3855 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3856
3857
3858 if (!o->pool_mirror[vec])
3859 continue;
3860
3861
3862 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3863 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3864
3865 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3866
3867 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3868 *offset = o->base_pool_offset + idx;
3869 return true;
3870 }
3871 }
3872
3873 return false;
3874}
3875
3876static bool bnx2x_credit_pool_put_entry(
3877 struct bnx2x_credit_pool_obj *o,
3878 int offset)
3879{
3880 if (offset < o->base_pool_offset)
3881 return false;
3882
3883 offset -= o->base_pool_offset;
3884
3885 if (offset >= o->pool_sz)
3886 return false;
3887
3888
3889 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3890
3891 return true;
3892}
3893
3894static bool bnx2x_credit_pool_put_entry_always_true(
3895 struct bnx2x_credit_pool_obj *o,
3896 int offset)
3897{
3898 return true;
3899}
3900
3901static bool bnx2x_credit_pool_get_entry_always_true(
3902 struct bnx2x_credit_pool_obj *o,
3903 int *offset)
3904{
3905 *offset = -1;
3906 return true;
3907}
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3920 int base, int credit)
3921{
3922
3923 memset(p, 0, sizeof(*p));
3924
3925
3926 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3927
3928
3929 atomic_set(&p->credit, credit);
3930
3931
3932 p->pool_sz = credit;
3933
3934 p->base_pool_offset = base;
3935
3936
3937 smp_mb();
3938
3939 p->check = bnx2x_credit_pool_check;
3940
3941
3942 if (credit >= 0) {
3943 p->put = bnx2x_credit_pool_put;
3944 p->get = bnx2x_credit_pool_get;
3945 p->put_entry = bnx2x_credit_pool_put_entry;
3946 p->get_entry = bnx2x_credit_pool_get_entry;
3947 } else {
3948 p->put = bnx2x_credit_pool_always_true;
3949 p->get = bnx2x_credit_pool_always_true;
3950 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3951 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3952 }
3953
3954
3955 if (base < 0) {
3956 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3957 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3958 }
3959}
3960
3961void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3962 struct bnx2x_credit_pool_obj *p, u8 func_id,
3963 u8 func_num)
3964{
3965
3966#define BNX2X_CAM_SIZE_EMUL 5
3967
3968 int cam_sz;
3969
3970 if (CHIP_IS_E1(bp)) {
3971
3972 if (!CHIP_REV_IS_SLOW(bp))
3973 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3974 else
3975 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3976
3977 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3978
3979 } else if (CHIP_IS_E1H(bp)) {
3980
3981
3982
3983 if ((func_num > 0)) {
3984 if (!CHIP_REV_IS_SLOW(bp))
3985 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3986 else
3987 cam_sz = BNX2X_CAM_SIZE_EMUL;
3988 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3989 } else {
3990
3991 bnx2x_init_credit_pool(p, 0, 0);
3992 }
3993
3994 } else {
3995
3996
3997
3998
3999
4000 if ((func_num > 0)) {
4001 if (!CHIP_REV_IS_SLOW(bp))
4002 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
4003 else
4004 cam_sz = BNX2X_CAM_SIZE_EMUL;
4005
4006
4007
4008
4009
4010 bnx2x_init_credit_pool(p, -1, cam_sz);
4011 } else {
4012
4013 bnx2x_init_credit_pool(p, 0, 0);
4014 }
4015
4016 }
4017}
4018
4019void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4020 struct bnx2x_credit_pool_obj *p,
4021 u8 func_id,
4022 u8 func_num)
4023{
4024 if (CHIP_IS_E1x(bp)) {
4025
4026
4027
4028
4029 bnx2x_init_credit_pool(p, 0, -1);
4030 } else {
4031
4032
4033
4034
4035 if (func_num > 0) {
4036 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4037 bnx2x_init_credit_pool(p, func_id * credit, credit);
4038 } else
4039
4040 bnx2x_init_credit_pool(p, 0, 0);
4041 }
4042}
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4054 struct bnx2x_config_rss_params *p)
4055{
4056 int i;
4057
4058 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4059 DP(BNX2X_MSG_SP, "0x0000: ");
4060 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4061 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4062
4063
4064 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4065 (((i + 1) & 0x3) == 0)) {
4066 DP_CONT(BNX2X_MSG_SP, "\n");
4067 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4068 }
4069 }
4070
4071 DP_CONT(BNX2X_MSG_SP, "\n");
4072}
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082static int bnx2x_setup_rss(struct bnx2x *bp,
4083 struct bnx2x_config_rss_params *p)
4084{
4085 struct bnx2x_rss_config_obj *o = p->rss_obj;
4086 struct bnx2x_raw_obj *r = &o->raw;
4087 struct eth_rss_update_ramrod_data *data =
4088 (struct eth_rss_update_ramrod_data *)(r->rdata);
4089 u8 rss_mode = 0;
4090 int rc;
4091
4092 memset(data, 0, sizeof(*data));
4093
4094 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4095
4096
4097 data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
4098 (r->state << BNX2X_SWCID_SHIFT));
4099
4100
4101 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4102 rss_mode = ETH_RSS_MODE_DISABLED;
4103 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4104 rss_mode = ETH_RSS_MODE_REGULAR;
4105
4106 data->rss_mode = rss_mode;
4107
4108 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4109
4110
4111 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4112 data->capabilities |=
4113 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4114
4115 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4116 data->capabilities |=
4117 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4118
4119 if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
4120 data->capabilities |=
4121 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4122
4123 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4124 data->capabilities |=
4125 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4126
4127 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4128 data->capabilities |=
4129 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4130
4131 if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
4132 data->capabilities |=
4133 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4134
4135
4136 data->rss_result_mask = p->rss_result_mask;
4137
4138
4139 data->rss_engine_id = o->engine_id;
4140
4141 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4142
4143
4144 memcpy(data->indirection_table, p->ind_table,
4145 T_ETH_INDIRECTION_TABLE_SIZE);
4146
4147
4148 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4149
4150
4151 if (netif_msg_ifup(bp))
4152 bnx2x_debug_print_ind_table(bp, p);
4153
4154
4155 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4156 memcpy(&data->rss_key[0], &p->rss_key[0],
4157 sizeof(data->rss_key));
4158 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4159 }
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4171 U64_HI(r->rdata_mapping),
4172 U64_LO(r->rdata_mapping),
4173 ETH_CONNECTION_TYPE);
4174
4175 if (rc < 0)
4176 return rc;
4177
4178 return 1;
4179}
4180
4181void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4182 u8 *ind_table)
4183{
4184 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4185}
4186
4187int bnx2x_config_rss(struct bnx2x *bp,
4188 struct bnx2x_config_rss_params *p)
4189{
4190 int rc;
4191 struct bnx2x_rss_config_obj *o = p->rss_obj;
4192 struct bnx2x_raw_obj *r = &o->raw;
4193
4194
4195 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4196 return 0;
4197
4198 r->set_pending(r);
4199
4200 rc = o->config_rss(bp, p);
4201 if (rc < 0) {
4202 r->clear_pending(r);
4203 return rc;
4204 }
4205
4206 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4207 rc = r->wait_comp(bp, r);
4208
4209 return rc;
4210}
4211
4212
4213void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4214 struct bnx2x_rss_config_obj *rss_obj,
4215 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4216 void *rdata, dma_addr_t rdata_mapping,
4217 int state, unsigned long *pstate,
4218 bnx2x_obj_type type)
4219{
4220 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4221 rdata_mapping, state, pstate, type);
4222
4223 rss_obj->engine_id = engine_id;
4224 rss_obj->config_rss = bnx2x_setup_rss;
4225}
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241int bnx2x_queue_state_change(struct bnx2x *bp,
4242 struct bnx2x_queue_state_params *params)
4243{
4244 struct bnx2x_queue_sp_obj *o = params->q_obj;
4245 int rc, pending_bit;
4246 unsigned long *pending = &o->pending;
4247
4248
4249 rc = o->check_transition(bp, o, params);
4250 if (rc) {
4251 BNX2X_ERR("check transition returned an error. rc %d\n", rc);
4252 return -EINVAL;
4253 }
4254
4255
4256 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4257 pending_bit = o->set_pending(o, params);
4258 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4259
4260
4261 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4262 o->complete_cmd(bp, o, pending_bit);
4263 else {
4264
4265 rc = o->send_cmd(bp, params);
4266 if (rc) {
4267 o->next_state = BNX2X_Q_STATE_MAX;
4268 clear_bit(pending_bit, pending);
4269 smp_mb__after_clear_bit();
4270 return rc;
4271 }
4272
4273 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4274 rc = o->wait_comp(bp, o, pending_bit);
4275 if (rc)
4276 return rc;
4277
4278 return 0;
4279 }
4280 }
4281
4282 return !!test_bit(pending_bit, pending);
4283}
4284
4285
4286static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4287 struct bnx2x_queue_state_params *params)
4288{
4289 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4290
4291
4292
4293
4294 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4295 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4296 bit = BNX2X_Q_CMD_UPDATE;
4297 else
4298 bit = cmd;
4299
4300 set_bit(bit, &obj->pending);
4301 return bit;
4302}
4303
4304static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4305 struct bnx2x_queue_sp_obj *o,
4306 enum bnx2x_queue_cmd cmd)
4307{
4308 return bnx2x_state_wait(bp, cmd, &o->pending);
4309}
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4321 struct bnx2x_queue_sp_obj *o,
4322 enum bnx2x_queue_cmd cmd)
4323{
4324 unsigned long cur_pending = o->pending;
4325
4326 if (!test_and_clear_bit(cmd, &cur_pending)) {
4327 BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4328 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4329 o->state, cur_pending, o->next_state);
4330 return -EINVAL;
4331 }
4332
4333 if (o->next_tx_only >= o->max_cos)
4334
4335
4336
4337 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4338 o->next_tx_only, o->max_cos);
4339
4340 DP(BNX2X_MSG_SP,
4341 "Completing command %d for queue %d, setting state to %d\n",
4342 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4343
4344 if (o->next_tx_only)
4345 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4346 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4347
4348 o->state = o->next_state;
4349 o->num_tx_only = o->next_tx_only;
4350 o->next_state = BNX2X_Q_STATE_MAX;
4351
4352
4353
4354
4355 wmb();
4356
4357 clear_bit(cmd, &o->pending);
4358 smp_mb__after_clear_bit();
4359
4360 return 0;
4361}
4362
4363static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4364 struct bnx2x_queue_state_params *cmd_params,
4365 struct client_init_ramrod_data *data)
4366{
4367 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4368
4369
4370
4371
4372 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) *
4373 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4374}
4375
4376static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4377 struct bnx2x_queue_sp_obj *o,
4378 struct bnx2x_general_setup_params *params,
4379 struct client_init_general_data *gen_data,
4380 unsigned long *flags)
4381{
4382 gen_data->client_id = o->cl_id;
4383
4384 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4385 gen_data->statistics_counter_id =
4386 params->stat_id;
4387 gen_data->statistics_en_flg = 1;
4388 gen_data->statistics_zero_flg =
4389 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4390 } else
4391 gen_data->statistics_counter_id =
4392 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4393
4394 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4395 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4396 gen_data->sp_client_id = params->spcl_id;
4397 gen_data->mtu = cpu_to_le16(params->mtu);
4398 gen_data->func_id = o->func_id;
4399
4400
4401 gen_data->cos = params->cos;
4402
4403 gen_data->traffic_type =
4404 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4405 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4406
4407 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4408 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4409}
4410
4411static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4412 struct bnx2x_txq_setup_params *params,
4413 struct client_init_tx_data *tx_data,
4414 unsigned long *flags)
4415{
4416 tx_data->enforce_security_flg =
4417 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4418 tx_data->default_vlan =
4419 cpu_to_le16(params->default_vlan);
4420 tx_data->default_vlan_flg =
4421 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4422 tx_data->tx_switching_flg =
4423 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4424 tx_data->anti_spoofing_flg =
4425 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4426 tx_data->force_default_pri_flg =
4427 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4428
4429 tx_data->tx_status_block_id = params->fw_sb_id;
4430 tx_data->tx_sb_index_number = params->sb_cq_index;
4431 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4432
4433 tx_data->tx_bd_page_base.lo =
4434 cpu_to_le32(U64_LO(params->dscr_map));
4435 tx_data->tx_bd_page_base.hi =
4436 cpu_to_le32(U64_HI(params->dscr_map));
4437
4438
4439 tx_data->state = 0;
4440}
4441
4442static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4443 struct rxq_pause_params *params,
4444 struct client_init_rx_data *rx_data)
4445{
4446
4447 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4448 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4449 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4450 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4451 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4452 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4453 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4454}
4455
4456static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4457 struct bnx2x_rxq_setup_params *params,
4458 struct client_init_rx_data *rx_data,
4459 unsigned long *flags)
4460{
4461 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4462 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4463 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4464 CLIENT_INIT_RX_DATA_TPA_MODE;
4465 rx_data->vmqueue_mode_en_flg = 0;
4466
4467 rx_data->cache_line_alignment_log_size =
4468 params->cache_line_log;
4469 rx_data->enable_dynamic_hc =
4470 test_bit(BNX2X_Q_FLG_DHC, flags);
4471 rx_data->max_sges_for_packet = params->max_sges_pkt;
4472 rx_data->client_qzone_id = params->cl_qzone_id;
4473 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4474
4475
4476 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4477 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4478
4479
4480 rx_data->drop_ip_cs_err_flg = 0;
4481 rx_data->drop_tcp_cs_err_flg = 0;
4482 rx_data->drop_ttl0_flg = 0;
4483 rx_data->drop_udp_cs_err_flg = 0;
4484 rx_data->inner_vlan_removal_enable_flg =
4485 test_bit(BNX2X_Q_FLG_VLAN, flags);
4486 rx_data->outer_vlan_removal_enable_flg =
4487 test_bit(BNX2X_Q_FLG_OV, flags);
4488 rx_data->status_block_id = params->fw_sb_id;
4489 rx_data->rx_sb_index_number = params->sb_cq_index;
4490 rx_data->max_tpa_queues = params->max_tpa_queues;
4491 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4492 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4493 rx_data->bd_page_base.lo =
4494 cpu_to_le32(U64_LO(params->dscr_map));
4495 rx_data->bd_page_base.hi =
4496 cpu_to_le32(U64_HI(params->dscr_map));
4497 rx_data->sge_page_base.lo =
4498 cpu_to_le32(U64_LO(params->sge_map));
4499 rx_data->sge_page_base.hi =
4500 cpu_to_le32(U64_HI(params->sge_map));
4501 rx_data->cqe_page_base.lo =
4502 cpu_to_le32(U64_LO(params->rcq_map));
4503 rx_data->cqe_page_base.hi =
4504 cpu_to_le32(U64_HI(params->rcq_map));
4505 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4506
4507 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4508 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4509 rx_data->is_approx_mcast = 1;
4510 }
4511
4512 rx_data->rss_engine_id = params->rss_engine_id;
4513
4514
4515 rx_data->silent_vlan_removal_flg =
4516 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4517 rx_data->silent_vlan_value =
4518 cpu_to_le16(params->silent_removal_value);
4519 rx_data->silent_vlan_mask =
4520 cpu_to_le16(params->silent_removal_mask);
4521
4522}
4523
4524
4525static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4526 struct bnx2x_queue_state_params *cmd_params,
4527 struct client_init_ramrod_data *data)
4528{
4529 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4530 &cmd_params->params.setup.gen_params,
4531 &data->general,
4532 &cmd_params->params.setup.flags);
4533
4534 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4535 &cmd_params->params.setup.txq_params,
4536 &data->tx,
4537 &cmd_params->params.setup.flags);
4538
4539 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4540 &cmd_params->params.setup.rxq_params,
4541 &data->rx,
4542 &cmd_params->params.setup.flags);
4543
4544 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4545 &cmd_params->params.setup.pause_params,
4546 &data->rx);
4547}
4548
4549
4550static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4551 struct bnx2x_queue_state_params *cmd_params,
4552 struct tx_queue_init_ramrod_data *data)
4553{
4554 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4555 &cmd_params->params.tx_only.gen_params,
4556 &data->general,
4557 &cmd_params->params.tx_only.flags);
4558
4559 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4560 &cmd_params->params.tx_only.txq_params,
4561 &data->tx,
4562 &cmd_params->params.tx_only.flags);
4563
4564 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
4565 cmd_params->q_obj->cids[0],
4566 data->tx.tx_bd_page_base.lo,
4567 data->tx.tx_bd_page_base.hi);
4568}
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581static inline int bnx2x_q_init(struct bnx2x *bp,
4582 struct bnx2x_queue_state_params *params)
4583{
4584 struct bnx2x_queue_sp_obj *o = params->q_obj;
4585 struct bnx2x_queue_init_params *init = ¶ms->params.init;
4586 u16 hc_usec;
4587 u8 cos;
4588
4589
4590 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4591 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4592 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4593
4594 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4595 init->tx.sb_cq_index,
4596 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4597 hc_usec);
4598 }
4599
4600
4601 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4602 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4603 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4604
4605 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4606 init->rx.sb_cq_index,
4607 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4608 hc_usec);
4609 }
4610
4611
4612 for (cos = 0; cos < o->max_cos; cos++) {
4613 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4614 o->cids[cos], cos);
4615 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4616 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4617 }
4618
4619
4620 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4621
4622 mmiowb();
4623 smp_mb();
4624
4625 return 0;
4626}
4627
4628static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4629 struct bnx2x_queue_state_params *params)
4630{
4631 struct bnx2x_queue_sp_obj *o = params->q_obj;
4632 struct client_init_ramrod_data *rdata =
4633 (struct client_init_ramrod_data *)o->rdata;
4634 dma_addr_t data_mapping = o->rdata_mapping;
4635 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4636
4637
4638 memset(rdata, 0, sizeof(*rdata));
4639
4640
4641 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4652 U64_HI(data_mapping),
4653 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4654}
4655
4656static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4657 struct bnx2x_queue_state_params *params)
4658{
4659 struct bnx2x_queue_sp_obj *o = params->q_obj;
4660 struct client_init_ramrod_data *rdata =
4661 (struct client_init_ramrod_data *)o->rdata;
4662 dma_addr_t data_mapping = o->rdata_mapping;
4663 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4664
4665
4666 memset(rdata, 0, sizeof(*rdata));
4667
4668
4669 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4670 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4681 U64_HI(data_mapping),
4682 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4683}
4684
4685static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4686 struct bnx2x_queue_state_params *params)
4687{
4688 struct bnx2x_queue_sp_obj *o = params->q_obj;
4689 struct tx_queue_init_ramrod_data *rdata =
4690 (struct tx_queue_init_ramrod_data *)o->rdata;
4691 dma_addr_t data_mapping = o->rdata_mapping;
4692 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4693 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4694 ¶ms->params.tx_only;
4695 u8 cid_index = tx_only_params->cid_index;
4696
4697
4698 if (cid_index >= o->max_cos) {
4699 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4700 o->cl_id, cid_index);
4701 return -EINVAL;
4702 }
4703
4704 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4705 tx_only_params->gen_params.cos,
4706 tx_only_params->gen_params.spcl_id);
4707
4708
4709 memset(rdata, 0, sizeof(*rdata));
4710
4711
4712 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4713
4714 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4715 o->cids[cid_index], rdata->general.client_id,
4716 rdata->general.sp_client_id, rdata->general.cos);
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4727 U64_HI(data_mapping),
4728 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4729}
4730
4731static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4732 struct bnx2x_queue_sp_obj *obj,
4733 struct bnx2x_queue_update_params *params,
4734 struct client_update_ramrod_data *data)
4735{
4736
4737 data->client_id = obj->cl_id;
4738
4739
4740 data->func_id = obj->func_id;
4741
4742
4743 data->default_vlan = cpu_to_le16(params->def_vlan);
4744
4745
4746 data->inner_vlan_removal_enable_flg =
4747 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4748 data->inner_vlan_removal_change_flg =
4749 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4750 ¶ms->update_flags);
4751
4752
4753 data->outer_vlan_removal_enable_flg =
4754 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4755 data->outer_vlan_removal_change_flg =
4756 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4757 ¶ms->update_flags);
4758
4759
4760
4761
4762 data->anti_spoofing_enable_flg =
4763 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4764 data->anti_spoofing_change_flg =
4765 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags);
4766
4767
4768 data->activate_flg =
4769 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4770 data->activate_change_flg =
4771 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4772
4773
4774 data->default_vlan_enable_flg =
4775 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4776 data->default_vlan_change_flg =
4777 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4778 ¶ms->update_flags);
4779
4780
4781 data->silent_vlan_change_flg =
4782 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4783 ¶ms->update_flags);
4784 data->silent_vlan_removal_flg =
4785 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags);
4786 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4787 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4788}
4789
4790static inline int bnx2x_q_send_update(struct bnx2x *bp,
4791 struct bnx2x_queue_state_params *params)
4792{
4793 struct bnx2x_queue_sp_obj *o = params->q_obj;
4794 struct client_update_ramrod_data *rdata =
4795 (struct client_update_ramrod_data *)o->rdata;
4796 dma_addr_t data_mapping = o->rdata_mapping;
4797 struct bnx2x_queue_update_params *update_params =
4798 ¶ms->params.update;
4799 u8 cid_index = update_params->cid_index;
4800
4801 if (cid_index >= o->max_cos) {
4802 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4803 o->cl_id, cid_index);
4804 return -EINVAL;
4805 }
4806
4807
4808
4809 memset(rdata, 0, sizeof(*rdata));
4810
4811
4812 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4823 o->cids[cid_index], U64_HI(data_mapping),
4824 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4825}
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4836 struct bnx2x_queue_state_params *params)
4837{
4838 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4839
4840 memset(update, 0, sizeof(*update));
4841
4842 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4843
4844 return bnx2x_q_send_update(bp, params);
4845}
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4856 struct bnx2x_queue_state_params *params)
4857{
4858 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4859
4860 memset(update, 0, sizeof(*update));
4861
4862 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4863 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4864
4865 return bnx2x_q_send_update(bp, params);
4866}
4867
4868static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4869 struct bnx2x_queue_state_params *params)
4870{
4871
4872 return -1;
4873}
4874
4875static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4876 struct bnx2x_queue_state_params *params)
4877{
4878 struct bnx2x_queue_sp_obj *o = params->q_obj;
4879
4880 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4881 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4882 ETH_CONNECTION_TYPE);
4883}
4884
4885static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4886 struct bnx2x_queue_state_params *params)
4887{
4888 struct bnx2x_queue_sp_obj *o = params->q_obj;
4889 u8 cid_idx = params->params.cfc_del.cid_index;
4890
4891 if (cid_idx >= o->max_cos) {
4892 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4893 o->cl_id, cid_idx);
4894 return -EINVAL;
4895 }
4896
4897 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4898 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4899}
4900
4901static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4902 struct bnx2x_queue_state_params *params)
4903{
4904 struct bnx2x_queue_sp_obj *o = params->q_obj;
4905 u8 cid_index = params->params.terminate.cid_index;
4906
4907 if (cid_index >= o->max_cos) {
4908 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4909 o->cl_id, cid_index);
4910 return -EINVAL;
4911 }
4912
4913 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4914 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4915}
4916
4917static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4918 struct bnx2x_queue_state_params *params)
4919{
4920 struct bnx2x_queue_sp_obj *o = params->q_obj;
4921
4922 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4923 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4924 ETH_CONNECTION_TYPE);
4925}
4926
4927static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4928 struct bnx2x_queue_state_params *params)
4929{
4930 switch (params->cmd) {
4931 case BNX2X_Q_CMD_INIT:
4932 return bnx2x_q_init(bp, params);
4933 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4934 return bnx2x_q_send_setup_tx_only(bp, params);
4935 case BNX2X_Q_CMD_DEACTIVATE:
4936 return bnx2x_q_send_deactivate(bp, params);
4937 case BNX2X_Q_CMD_ACTIVATE:
4938 return bnx2x_q_send_activate(bp, params);
4939 case BNX2X_Q_CMD_UPDATE:
4940 return bnx2x_q_send_update(bp, params);
4941 case BNX2X_Q_CMD_UPDATE_TPA:
4942 return bnx2x_q_send_update_tpa(bp, params);
4943 case BNX2X_Q_CMD_HALT:
4944 return bnx2x_q_send_halt(bp, params);
4945 case BNX2X_Q_CMD_CFC_DEL:
4946 return bnx2x_q_send_cfc_del(bp, params);
4947 case BNX2X_Q_CMD_TERMINATE:
4948 return bnx2x_q_send_terminate(bp, params);
4949 case BNX2X_Q_CMD_EMPTY:
4950 return bnx2x_q_send_empty(bp, params);
4951 default:
4952 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4953 return -EINVAL;
4954 }
4955}
4956
4957static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4958 struct bnx2x_queue_state_params *params)
4959{
4960 switch (params->cmd) {
4961 case BNX2X_Q_CMD_SETUP:
4962 return bnx2x_q_send_setup_e1x(bp, params);
4963 case BNX2X_Q_CMD_INIT:
4964 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4965 case BNX2X_Q_CMD_DEACTIVATE:
4966 case BNX2X_Q_CMD_ACTIVATE:
4967 case BNX2X_Q_CMD_UPDATE:
4968 case BNX2X_Q_CMD_UPDATE_TPA:
4969 case BNX2X_Q_CMD_HALT:
4970 case BNX2X_Q_CMD_CFC_DEL:
4971 case BNX2X_Q_CMD_TERMINATE:
4972 case BNX2X_Q_CMD_EMPTY:
4973 return bnx2x_queue_send_cmd_cmn(bp, params);
4974 default:
4975 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4976 return -EINVAL;
4977 }
4978}
4979
4980static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4981 struct bnx2x_queue_state_params *params)
4982{
4983 switch (params->cmd) {
4984 case BNX2X_Q_CMD_SETUP:
4985 return bnx2x_q_send_setup_e2(bp, params);
4986 case BNX2X_Q_CMD_INIT:
4987 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4988 case BNX2X_Q_CMD_DEACTIVATE:
4989 case BNX2X_Q_CMD_ACTIVATE:
4990 case BNX2X_Q_CMD_UPDATE:
4991 case BNX2X_Q_CMD_UPDATE_TPA:
4992 case BNX2X_Q_CMD_HALT:
4993 case BNX2X_Q_CMD_CFC_DEL:
4994 case BNX2X_Q_CMD_TERMINATE:
4995 case BNX2X_Q_CMD_EMPTY:
4996 return bnx2x_queue_send_cmd_cmn(bp, params);
4997 default:
4998 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4999 return -EINVAL;
5000 }
5001}
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019static int bnx2x_queue_chk_transition(struct bnx2x *bp,
5020 struct bnx2x_queue_sp_obj *o,
5021 struct bnx2x_queue_state_params *params)
5022{
5023 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5024 enum bnx2x_queue_cmd cmd = params->cmd;
5025 struct bnx2x_queue_update_params *update_params =
5026 ¶ms->params.update;
5027 u8 next_tx_only = o->num_tx_only;
5028
5029
5030
5031
5032
5033 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5034 o->pending = 0;
5035 o->next_state = BNX2X_Q_STATE_MAX;
5036 }
5037
5038
5039
5040
5041
5042 if (o->pending) {
5043 BNX2X_ERR("Blocking transition since pending was %lx\n",
5044 o->pending);
5045 return -EBUSY;
5046 }
5047
5048 switch (state) {
5049 case BNX2X_Q_STATE_RESET:
5050 if (cmd == BNX2X_Q_CMD_INIT)
5051 next_state = BNX2X_Q_STATE_INITIALIZED;
5052
5053 break;
5054 case BNX2X_Q_STATE_INITIALIZED:
5055 if (cmd == BNX2X_Q_CMD_SETUP) {
5056 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5057 ¶ms->params.setup.flags))
5058 next_state = BNX2X_Q_STATE_ACTIVE;
5059 else
5060 next_state = BNX2X_Q_STATE_INACTIVE;
5061 }
5062
5063 break;
5064 case BNX2X_Q_STATE_ACTIVE:
5065 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5066 next_state = BNX2X_Q_STATE_INACTIVE;
5067
5068 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5069 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5070 next_state = BNX2X_Q_STATE_ACTIVE;
5071
5072 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5073 next_state = BNX2X_Q_STATE_MULTI_COS;
5074 next_tx_only = 1;
5075 }
5076
5077 else if (cmd == BNX2X_Q_CMD_HALT)
5078 next_state = BNX2X_Q_STATE_STOPPED;
5079
5080 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5081
5082
5083
5084 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5085 &update_params->update_flags) &&
5086 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5087 &update_params->update_flags))
5088 next_state = BNX2X_Q_STATE_INACTIVE;
5089 else
5090 next_state = BNX2X_Q_STATE_ACTIVE;
5091 }
5092
5093 break;
5094 case BNX2X_Q_STATE_MULTI_COS:
5095 if (cmd == BNX2X_Q_CMD_TERMINATE)
5096 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5097
5098 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5099 next_state = BNX2X_Q_STATE_MULTI_COS;
5100 next_tx_only = o->num_tx_only + 1;
5101 }
5102
5103 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5104 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5105 next_state = BNX2X_Q_STATE_MULTI_COS;
5106
5107 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5108
5109
5110
5111 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5112 &update_params->update_flags) &&
5113 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5114 &update_params->update_flags))
5115 next_state = BNX2X_Q_STATE_INACTIVE;
5116 else
5117 next_state = BNX2X_Q_STATE_MULTI_COS;
5118 }
5119
5120 break;
5121 case BNX2X_Q_STATE_MCOS_TERMINATED:
5122 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5123 next_tx_only = o->num_tx_only - 1;
5124 if (next_tx_only == 0)
5125 next_state = BNX2X_Q_STATE_ACTIVE;
5126 else
5127 next_state = BNX2X_Q_STATE_MULTI_COS;
5128 }
5129
5130 break;
5131 case BNX2X_Q_STATE_INACTIVE:
5132 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5133 next_state = BNX2X_Q_STATE_ACTIVE;
5134
5135 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5136 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5137 next_state = BNX2X_Q_STATE_INACTIVE;
5138
5139 else if (cmd == BNX2X_Q_CMD_HALT)
5140 next_state = BNX2X_Q_STATE_STOPPED;
5141
5142 else if (cmd == BNX2X_Q_CMD_UPDATE) {
5143
5144
5145
5146 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5147 &update_params->update_flags) &&
5148 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5149 &update_params->update_flags)){
5150 if (o->num_tx_only == 0)
5151 next_state = BNX2X_Q_STATE_ACTIVE;
5152 else
5153 next_state = BNX2X_Q_STATE_MULTI_COS;
5154 } else
5155 next_state = BNX2X_Q_STATE_INACTIVE;
5156 }
5157
5158 break;
5159 case BNX2X_Q_STATE_STOPPED:
5160 if (cmd == BNX2X_Q_CMD_TERMINATE)
5161 next_state = BNX2X_Q_STATE_TERMINATED;
5162
5163 break;
5164 case BNX2X_Q_STATE_TERMINATED:
5165 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5166 next_state = BNX2X_Q_STATE_RESET;
5167
5168 break;
5169 default:
5170 BNX2X_ERR("Illegal state: %d\n", state);
5171 }
5172
5173
5174 if (next_state != BNX2X_Q_STATE_MAX) {
5175 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5176 state, cmd, next_state);
5177 o->next_state = next_state;
5178 o->next_tx_only = next_tx_only;
5179 return 0;
5180 }
5181
5182 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5183
5184 return -EINVAL;
5185}
5186
5187void bnx2x_init_queue_obj(struct bnx2x *bp,
5188 struct bnx2x_queue_sp_obj *obj,
5189 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5190 void *rdata,
5191 dma_addr_t rdata_mapping, unsigned long type)
5192{
5193 memset(obj, 0, sizeof(*obj));
5194
5195
5196 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5197
5198 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5199 obj->max_cos = cid_cnt;
5200 obj->cl_id = cl_id;
5201 obj->func_id = func_id;
5202 obj->rdata = rdata;
5203 obj->rdata_mapping = rdata_mapping;
5204 obj->type = type;
5205 obj->next_state = BNX2X_Q_STATE_MAX;
5206
5207 if (CHIP_IS_E1x(bp))
5208 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5209 else
5210 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5211
5212 obj->check_transition = bnx2x_queue_chk_transition;
5213
5214 obj->complete_cmd = bnx2x_queue_comp_cmd;
5215 obj->wait_comp = bnx2x_queue_wait_comp;
5216 obj->set_pending = bnx2x_queue_set_pending;
5217}
5218
5219
5220int bnx2x_get_q_logical_state(struct bnx2x *bp,
5221 struct bnx2x_queue_sp_obj *obj)
5222{
5223 switch (obj->state) {
5224 case BNX2X_Q_STATE_ACTIVE:
5225 case BNX2X_Q_STATE_MULTI_COS:
5226 return BNX2X_Q_LOGICAL_STATE_ACTIVE;
5227 case BNX2X_Q_STATE_RESET:
5228 case BNX2X_Q_STATE_INITIALIZED:
5229 case BNX2X_Q_STATE_MCOS_TERMINATED:
5230 case BNX2X_Q_STATE_INACTIVE:
5231 case BNX2X_Q_STATE_STOPPED:
5232 case BNX2X_Q_STATE_TERMINATED:
5233 case BNX2X_Q_STATE_FLRED:
5234 return BNX2X_Q_LOGICAL_STATE_STOPPED;
5235 default:
5236 return -EINVAL;
5237 }
5238}
5239
5240
5241enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5242 struct bnx2x_func_sp_obj *o)
5243{
5244
5245 if (o->pending)
5246 return BNX2X_F_STATE_MAX;
5247
5248
5249
5250
5251
5252 rmb();
5253
5254 return o->state;
5255}
5256
5257static int bnx2x_func_wait_comp(struct bnx2x *bp,
5258 struct bnx2x_func_sp_obj *o,
5259 enum bnx2x_func_cmd cmd)
5260{
5261 return bnx2x_state_wait(bp, cmd, &o->pending);
5262}
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5275 struct bnx2x_func_sp_obj *o,
5276 enum bnx2x_func_cmd cmd)
5277{
5278 unsigned long cur_pending = o->pending;
5279
5280 if (!test_and_clear_bit(cmd, &cur_pending)) {
5281 BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5282 cmd, BP_FUNC(bp), o->state,
5283 cur_pending, o->next_state);
5284 return -EINVAL;
5285 }
5286
5287 DP(BNX2X_MSG_SP,
5288 "Completing command %d for func %d, setting state to %d\n",
5289 cmd, BP_FUNC(bp), o->next_state);
5290
5291 o->state = o->next_state;
5292 o->next_state = BNX2X_F_STATE_MAX;
5293
5294
5295
5296
5297 wmb();
5298
5299 clear_bit(cmd, &o->pending);
5300 smp_mb__after_clear_bit();
5301
5302 return 0;
5303}
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5315 struct bnx2x_func_sp_obj *o,
5316 enum bnx2x_func_cmd cmd)
5317{
5318
5319
5320
5321 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5322 return rc;
5323}
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340static int bnx2x_func_chk_transition(struct bnx2x *bp,
5341 struct bnx2x_func_sp_obj *o,
5342 struct bnx2x_func_state_params *params)
5343{
5344 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5345 enum bnx2x_func_cmd cmd = params->cmd;
5346
5347
5348
5349
5350
5351 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5352 o->pending = 0;
5353 o->next_state = BNX2X_F_STATE_MAX;
5354 }
5355
5356
5357
5358
5359
5360 if (o->pending)
5361 return -EBUSY;
5362
5363 switch (state) {
5364 case BNX2X_F_STATE_RESET:
5365 if (cmd == BNX2X_F_CMD_HW_INIT)
5366 next_state = BNX2X_F_STATE_INITIALIZED;
5367
5368 break;
5369 case BNX2X_F_STATE_INITIALIZED:
5370 if (cmd == BNX2X_F_CMD_START)
5371 next_state = BNX2X_F_STATE_STARTED;
5372
5373 else if (cmd == BNX2X_F_CMD_HW_RESET)
5374 next_state = BNX2X_F_STATE_RESET;
5375
5376 break;
5377 case BNX2X_F_STATE_STARTED:
5378 if (cmd == BNX2X_F_CMD_STOP)
5379 next_state = BNX2X_F_STATE_INITIALIZED;
5380
5381
5382
5383
5384 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5385 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5386 next_state = BNX2X_F_STATE_STARTED;
5387
5388 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5389 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5390 next_state = BNX2X_F_STATE_STARTED;
5391
5392
5393
5394
5395 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5396 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5397 next_state = BNX2X_F_STATE_STARTED;
5398
5399 else if (cmd == BNX2X_F_CMD_TX_STOP)
5400 next_state = BNX2X_F_STATE_TX_STOPPED;
5401
5402 break;
5403 case BNX2X_F_STATE_TX_STOPPED:
5404 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5405 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5406 next_state = BNX2X_F_STATE_TX_STOPPED;
5407
5408 else if (cmd == BNX2X_F_CMD_TX_START)
5409 next_state = BNX2X_F_STATE_STARTED;
5410
5411 break;
5412 default:
5413 BNX2X_ERR("Unknown state: %d\n", state);
5414 }
5415
5416
5417 if (next_state != BNX2X_F_STATE_MAX) {
5418 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5419 state, cmd, next_state);
5420 o->next_state = next_state;
5421 return 0;
5422 }
5423
5424 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5425 state, cmd);
5426
5427 return -EINVAL;
5428}
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440static inline int bnx2x_func_init_func(struct bnx2x *bp,
5441 const struct bnx2x_func_sp_drv_ops *drv)
5442{
5443 return drv->init_hw_func(bp);
5444}
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457static inline int bnx2x_func_init_port(struct bnx2x *bp,
5458 const struct bnx2x_func_sp_drv_ops *drv)
5459{
5460 int rc = drv->init_hw_port(bp);
5461 if (rc)
5462 return rc;
5463
5464 return bnx2x_func_init_func(bp, drv);
5465}
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5478 const struct bnx2x_func_sp_drv_ops *drv)
5479{
5480 int rc = drv->init_hw_cmn_chip(bp);
5481 if (rc)
5482 return rc;
5483
5484 return bnx2x_func_init_port(bp, drv);
5485}
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5498 const struct bnx2x_func_sp_drv_ops *drv)
5499{
5500 int rc = drv->init_hw_cmn(bp);
5501 if (rc)
5502 return rc;
5503
5504 return bnx2x_func_init_port(bp, drv);
5505}
5506
5507static int bnx2x_func_hw_init(struct bnx2x *bp,
5508 struct bnx2x_func_state_params *params)
5509{
5510 u32 load_code = params->params.hw_init.load_phase;
5511 struct bnx2x_func_sp_obj *o = params->f_obj;
5512 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5513 int rc = 0;
5514
5515 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5516 BP_ABS_FUNC(bp), load_code);
5517
5518
5519 rc = drv->gunzip_init(bp);
5520 if (rc)
5521 return rc;
5522
5523
5524 rc = drv->init_fw(bp);
5525 if (rc) {
5526 BNX2X_ERR("Error loading firmware\n");
5527 goto init_err;
5528 }
5529
5530
5531 switch (load_code) {
5532 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5533 rc = bnx2x_func_init_cmn_chip(bp, drv);
5534 if (rc)
5535 goto init_err;
5536
5537 break;
5538 case FW_MSG_CODE_DRV_LOAD_COMMON:
5539 rc = bnx2x_func_init_cmn(bp, drv);
5540 if (rc)
5541 goto init_err;
5542
5543 break;
5544 case FW_MSG_CODE_DRV_LOAD_PORT:
5545 rc = bnx2x_func_init_port(bp, drv);
5546 if (rc)
5547 goto init_err;
5548
5549 break;
5550 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5551 rc = bnx2x_func_init_func(bp, drv);
5552 if (rc)
5553 goto init_err;
5554
5555 break;
5556 default:
5557 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5558 rc = -EINVAL;
5559 }
5560
5561init_err:
5562 drv->gunzip_end(bp);
5563
5564
5565
5566
5567 if (!rc)
5568 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5569
5570 return rc;
5571}
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5583 const struct bnx2x_func_sp_drv_ops *drv)
5584{
5585 drv->reset_hw_func(bp);
5586}
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5604 const struct bnx2x_func_sp_drv_ops *drv)
5605{
5606 drv->reset_hw_port(bp);
5607 bnx2x_func_reset_func(bp, drv);
5608}
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5621 const struct bnx2x_func_sp_drv_ops *drv)
5622{
5623 bnx2x_func_reset_port(bp, drv);
5624 drv->reset_hw_cmn(bp);
5625}
5626
5627
5628static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5629 struct bnx2x_func_state_params *params)
5630{
5631 u32 reset_phase = params->params.hw_reset.reset_phase;
5632 struct bnx2x_func_sp_obj *o = params->f_obj;
5633 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5634
5635 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5636 reset_phase);
5637
5638 switch (reset_phase) {
5639 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5640 bnx2x_func_reset_cmn(bp, drv);
5641 break;
5642 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5643 bnx2x_func_reset_port(bp, drv);
5644 break;
5645 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5646 bnx2x_func_reset_func(bp, drv);
5647 break;
5648 default:
5649 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5650 reset_phase);
5651 break;
5652 }
5653
5654
5655 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5656
5657 return 0;
5658}
5659
5660static inline int bnx2x_func_send_start(struct bnx2x *bp,
5661 struct bnx2x_func_state_params *params)
5662{
5663 struct bnx2x_func_sp_obj *o = params->f_obj;
5664 struct function_start_data *rdata =
5665 (struct function_start_data *)o->rdata;
5666 dma_addr_t data_mapping = o->rdata_mapping;
5667 struct bnx2x_func_start_params *start_params = ¶ms->params.start;
5668
5669 memset(rdata, 0, sizeof(*rdata));
5670
5671
5672 rdata->function_mode = (u8)start_params->mf_mode;
5673 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5674 rdata->path_id = BP_PATH(bp);
5675 rdata->network_cos_mode = start_params->network_cos_mode;
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5686 U64_HI(data_mapping),
5687 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5688}
5689
5690static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5691 struct bnx2x_func_state_params *params)
5692{
5693 struct bnx2x_func_sp_obj *o = params->f_obj;
5694 struct function_update_data *rdata =
5695 (struct function_update_data *)o->rdata;
5696 dma_addr_t data_mapping = o->rdata_mapping;
5697 struct bnx2x_func_switch_update_params *switch_update_params =
5698 ¶ms->params.switch_update;
5699
5700 memset(rdata, 0, sizeof(*rdata));
5701
5702
5703 rdata->tx_switch_suspend_change_flg = 1;
5704 rdata->tx_switch_suspend = switch_update_params->suspend;
5705 rdata->echo = SWITCH_UPDATE;
5706
5707 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5708 U64_HI(data_mapping),
5709 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5710}
5711
5712static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5713 struct bnx2x_func_state_params *params)
5714{
5715 struct bnx2x_func_sp_obj *o = params->f_obj;
5716 struct function_update_data *rdata =
5717 (struct function_update_data *)o->afex_rdata;
5718 dma_addr_t data_mapping = o->afex_rdata_mapping;
5719 struct bnx2x_func_afex_update_params *afex_update_params =
5720 ¶ms->params.afex_update;
5721
5722 memset(rdata, 0, sizeof(*rdata));
5723
5724
5725 rdata->vif_id_change_flg = 1;
5726 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5727 rdata->afex_default_vlan_change_flg = 1;
5728 rdata->afex_default_vlan =
5729 cpu_to_le16(afex_update_params->afex_default_vlan);
5730 rdata->allowed_priorities_change_flg = 1;
5731 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5732 rdata->echo = AFEX_UPDATE;
5733
5734
5735
5736
5737
5738
5739
5740 DP(BNX2X_MSG_SP,
5741 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5742 rdata->vif_id,
5743 rdata->afex_default_vlan, rdata->allowed_priorities);
5744
5745 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5746 U64_HI(data_mapping),
5747 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5748}
5749
5750static
5751inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5752 struct bnx2x_func_state_params *params)
5753{
5754 struct bnx2x_func_sp_obj *o = params->f_obj;
5755 struct afex_vif_list_ramrod_data *rdata =
5756 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5757 struct bnx2x_func_afex_viflists_params *afex_vif_params =
5758 ¶ms->params.afex_viflists;
5759 u64 *p_rdata = (u64 *)rdata;
5760
5761 memset(rdata, 0, sizeof(*rdata));
5762
5763
5764 rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
5765 rdata->func_bit_map = afex_vif_params->func_bit_map;
5766 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5767 rdata->func_to_clear = afex_vif_params->func_to_clear;
5768
5769
5770 rdata->echo = afex_vif_params->afex_vif_list_command;
5771
5772
5773
5774
5775
5776
5777
5778
5779 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5780 rdata->afex_vif_list_command, rdata->vif_list_index,
5781 rdata->func_bit_map, rdata->func_to_clear);
5782
5783
5784 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5785 U64_HI(*p_rdata), U64_LO(*p_rdata),
5786 NONE_CONNECTION_TYPE);
5787}
5788
5789static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5790 struct bnx2x_func_state_params *params)
5791{
5792 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5793 NONE_CONNECTION_TYPE);
5794}
5795
5796static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5797 struct bnx2x_func_state_params *params)
5798{
5799 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5800 NONE_CONNECTION_TYPE);
5801}
5802static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5803 struct bnx2x_func_state_params *params)
5804{
5805 struct bnx2x_func_sp_obj *o = params->f_obj;
5806 struct flow_control_configuration *rdata =
5807 (struct flow_control_configuration *)o->rdata;
5808 dma_addr_t data_mapping = o->rdata_mapping;
5809 struct bnx2x_func_tx_start_params *tx_start_params =
5810 ¶ms->params.tx_start;
5811 int i;
5812
5813 memset(rdata, 0, sizeof(*rdata));
5814
5815 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5816 rdata->dcb_version = tx_start_params->dcb_version;
5817 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5818
5819 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5820 rdata->traffic_type_to_priority_cos[i] =
5821 tx_start_params->traffic_type_to_priority_cos[i];
5822
5823 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5824 U64_HI(data_mapping),
5825 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5826}
5827
5828static int bnx2x_func_send_cmd(struct bnx2x *bp,
5829 struct bnx2x_func_state_params *params)
5830{
5831 switch (params->cmd) {
5832 case BNX2X_F_CMD_HW_INIT:
5833 return bnx2x_func_hw_init(bp, params);
5834 case BNX2X_F_CMD_START:
5835 return bnx2x_func_send_start(bp, params);
5836 case BNX2X_F_CMD_STOP:
5837 return bnx2x_func_send_stop(bp, params);
5838 case BNX2X_F_CMD_HW_RESET:
5839 return bnx2x_func_hw_reset(bp, params);
5840 case BNX2X_F_CMD_AFEX_UPDATE:
5841 return bnx2x_func_send_afex_update(bp, params);
5842 case BNX2X_F_CMD_AFEX_VIFLISTS:
5843 return bnx2x_func_send_afex_viflists(bp, params);
5844 case BNX2X_F_CMD_TX_STOP:
5845 return bnx2x_func_send_tx_stop(bp, params);
5846 case BNX2X_F_CMD_TX_START:
5847 return bnx2x_func_send_tx_start(bp, params);
5848 case BNX2X_F_CMD_SWITCH_UPDATE:
5849 return bnx2x_func_send_switch_update(bp, params);
5850 default:
5851 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5852 return -EINVAL;
5853 }
5854}
5855
5856void bnx2x_init_func_obj(struct bnx2x *bp,
5857 struct bnx2x_func_sp_obj *obj,
5858 void *rdata, dma_addr_t rdata_mapping,
5859 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5860 struct bnx2x_func_sp_drv_ops *drv_iface)
5861{
5862 memset(obj, 0, sizeof(*obj));
5863
5864 mutex_init(&obj->one_pending_mutex);
5865
5866 obj->rdata = rdata;
5867 obj->rdata_mapping = rdata_mapping;
5868 obj->afex_rdata = afex_rdata;
5869 obj->afex_rdata_mapping = afex_rdata_mapping;
5870 obj->send_cmd = bnx2x_func_send_cmd;
5871 obj->check_transition = bnx2x_func_chk_transition;
5872 obj->complete_cmd = bnx2x_func_comp_cmd;
5873 obj->wait_comp = bnx2x_func_wait_comp;
5874
5875 obj->drv = drv_iface;
5876}
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887
5888
5889
5890
5891int bnx2x_func_state_change(struct bnx2x *bp,
5892 struct bnx2x_func_state_params *params)
5893{
5894 struct bnx2x_func_sp_obj *o = params->f_obj;
5895 int rc, cnt = 300;
5896 enum bnx2x_func_cmd cmd = params->cmd;
5897 unsigned long *pending = &o->pending;
5898
5899 mutex_lock(&o->one_pending_mutex);
5900
5901
5902 rc = o->check_transition(bp, o, params);
5903 if ((rc == -EBUSY) &&
5904 (test_bit(RAMROD_RETRY, ¶ms->ramrod_flags))) {
5905 while ((rc == -EBUSY) && (--cnt > 0)) {
5906 mutex_unlock(&o->one_pending_mutex);
5907 msleep(10);
5908 mutex_lock(&o->one_pending_mutex);
5909 rc = o->check_transition(bp, o, params);
5910 }
5911 if (rc == -EBUSY) {
5912 mutex_unlock(&o->one_pending_mutex);
5913 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5914 return rc;
5915 }
5916 } else if (rc) {
5917 mutex_unlock(&o->one_pending_mutex);
5918 return rc;
5919 }
5920
5921
5922 set_bit(cmd, pending);
5923
5924
5925 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5926 bnx2x_func_state_change_comp(bp, o, cmd);
5927 mutex_unlock(&o->one_pending_mutex);
5928 } else {
5929
5930 rc = o->send_cmd(bp, params);
5931
5932 mutex_unlock(&o->one_pending_mutex);
5933
5934 if (rc) {
5935 o->next_state = BNX2X_F_STATE_MAX;
5936 clear_bit(cmd, pending);
5937 smp_mb__after_clear_bit();
5938 return rc;
5939 }
5940
5941 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5942 rc = o->wait_comp(bp, o, cmd);
5943 if (rc)
5944 return rc;
5945
5946 return 0;
5947 }
5948 }
5949
5950 return !!test_bit(cmd, pending);
5951}
5952