1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "bna.h"
19#include "bfi.h"
20
21
22static void
23bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
24{
25 ib->coalescing_timeo = coalescing_timeo;
26 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
27 (u32)ib->coalescing_timeo, 0);
28}
29
30
31
32#define bna_rxf_vlan_cfg_soft_reset(rxf) \
33do { \
34 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
35 (rxf)->vlan_strip_pending = true; \
36} while (0)
37
38#define bna_rxf_rss_cfg_soft_reset(rxf) \
39do { \
40 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
41 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
42 BNA_RSS_F_CFG_PENDING | \
43 BNA_RSS_F_STATUS_PENDING); \
44} while (0)
45
46static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
47static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
48static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
49static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54 enum bna_cleanup_type cleanup);
55static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56 enum bna_cleanup_type cleanup);
57static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58 enum bna_cleanup_type cleanup);
59
60bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
61 enum bna_rxf_event);
62bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
63 enum bna_rxf_event);
64bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
65 enum bna_rxf_event);
66bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
67 enum bna_rxf_event);
68bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
69 enum bna_rxf_event);
70bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
71 enum bna_rxf_event);
72
73static void
74bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
75{
76 call_rxf_stop_cbfn(rxf);
77}
78
79static void
80bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
81{
82 switch (event) {
83 case RXF_E_START:
84 if (rxf->flags & BNA_RXF_F_PAUSED) {
85 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
86 call_rxf_start_cbfn(rxf);
87 } else
88 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
89 break;
90
91 case RXF_E_STOP:
92 call_rxf_stop_cbfn(rxf);
93 break;
94
95 case RXF_E_FAIL:
96
97 break;
98
99 case RXF_E_CONFIG:
100 call_rxf_cam_fltr_cbfn(rxf);
101 break;
102
103 case RXF_E_PAUSE:
104 rxf->flags |= BNA_RXF_F_PAUSED;
105 call_rxf_pause_cbfn(rxf);
106 break;
107
108 case RXF_E_RESUME:
109 rxf->flags &= ~BNA_RXF_F_PAUSED;
110 call_rxf_resume_cbfn(rxf);
111 break;
112
113 default:
114 bfa_sm_fault(event);
115 }
116}
117
118static void
119bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
120{
121 call_rxf_pause_cbfn(rxf);
122}
123
124static void
125bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
126{
127 switch (event) {
128 case RXF_E_STOP:
129 case RXF_E_FAIL:
130 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
131 break;
132
133 case RXF_E_CONFIG:
134 call_rxf_cam_fltr_cbfn(rxf);
135 break;
136
137 case RXF_E_RESUME:
138 rxf->flags &= ~BNA_RXF_F_PAUSED;
139 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
140 break;
141
142 default:
143 bfa_sm_fault(event);
144 }
145}
146
147static void
148bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
149{
150 if (!bna_rxf_cfg_apply(rxf)) {
151
152 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
153 }
154}
155
156static void
157bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
158{
159 switch (event) {
160 case RXF_E_STOP:
161 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
162 break;
163
164 case RXF_E_FAIL:
165 bna_rxf_cfg_reset(rxf);
166 call_rxf_start_cbfn(rxf);
167 call_rxf_cam_fltr_cbfn(rxf);
168 call_rxf_resume_cbfn(rxf);
169 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
170 break;
171
172 case RXF_E_CONFIG:
173
174 break;
175
176 case RXF_E_PAUSE:
177 rxf->flags |= BNA_RXF_F_PAUSED;
178 call_rxf_start_cbfn(rxf);
179 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
180 break;
181
182 case RXF_E_FW_RESP:
183 if (!bna_rxf_cfg_apply(rxf)) {
184
185 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
186 }
187 break;
188
189 default:
190 bfa_sm_fault(event);
191 }
192}
193
194static void
195bna_rxf_sm_started_entry(struct bna_rxf *rxf)
196{
197 call_rxf_start_cbfn(rxf);
198 call_rxf_cam_fltr_cbfn(rxf);
199 call_rxf_resume_cbfn(rxf);
200}
201
202static void
203bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
204{
205 switch (event) {
206 case RXF_E_STOP:
207 case RXF_E_FAIL:
208 bna_rxf_cfg_reset(rxf);
209 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
210 break;
211
212 case RXF_E_CONFIG:
213 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
214 break;
215
216 case RXF_E_PAUSE:
217 rxf->flags |= BNA_RXF_F_PAUSED;
218 if (!bna_rxf_fltr_clear(rxf))
219 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
220 else
221 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
222 break;
223
224 default:
225 bfa_sm_fault(event);
226 }
227}
228
229static void
230bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
231{
232}
233
234static void
235bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
236{
237 switch (event) {
238 case RXF_E_FAIL:
239 bna_rxf_cfg_reset(rxf);
240 call_rxf_pause_cbfn(rxf);
241 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
242 break;
243
244 case RXF_E_FW_RESP:
245 if (!bna_rxf_fltr_clear(rxf)) {
246
247 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
248 }
249 break;
250
251 default:
252 bfa_sm_fault(event);
253 }
254}
255
256static void
257bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
258{
259}
260
261static void
262bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
263{
264 switch (event) {
265 case RXF_E_FAIL:
266 case RXF_E_FW_RESP:
267 bna_rxf_cfg_reset(rxf);
268 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
269 break;
270
271 default:
272 bfa_sm_fault(event);
273 }
274}
275
276static void
277bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
278 enum bfi_enet_h2i_msgs req_type)
279{
280 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
281
282 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
283 req->mh.num_entries = htons(
284 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
285 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
286 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
287 sizeof(struct bfi_enet_ucast_req), &req->mh);
288 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
289}
290
291static void
292bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
293{
294 struct bfi_enet_mcast_add_req *req =
295 &rxf->bfi_enet_cmd.mcast_add_req;
296
297 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
298 0, rxf->rx->rid);
299 req->mh.num_entries = htons(
300 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
301 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
302 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
303 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
304 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
305}
306
307static void
308bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
309{
310 struct bfi_enet_mcast_del_req *req =
311 &rxf->bfi_enet_cmd.mcast_del_req;
312
313 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
314 0, rxf->rx->rid);
315 req->mh.num_entries = htons(
316 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
317 req->handle = htons(handle);
318 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
319 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
320 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
321}
322
323static void
324bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
325{
326 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
327
328 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
329 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
330 req->mh.num_entries = htons(
331 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
332 req->enable = status;
333 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
334 sizeof(struct bfi_enet_enable_req), &req->mh);
335 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
336}
337
338static void
339bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
340{
341 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
342
343 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
344 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
345 req->mh.num_entries = htons(
346 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
347 req->enable = status;
348 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
349 sizeof(struct bfi_enet_enable_req), &req->mh);
350 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
351}
352
353static void
354bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
355{
356 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
357 int i;
358 int j;
359
360 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
361 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
362 req->mh.num_entries = htons(
363 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
364 req->block_idx = block_idx;
365 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
366 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
367 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
368 req->bit_mask[i] =
369 htonl(rxf->vlan_filter_table[j]);
370 else
371 req->bit_mask[i] = 0xFFFFFFFF;
372 }
373 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
374 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
375 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
376}
377
378static void
379bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
380{
381 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
382
383 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
384 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
385 req->mh.num_entries = htons(
386 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
387 req->enable = rxf->vlan_strip_status;
388 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
389 sizeof(struct bfi_enet_enable_req), &req->mh);
390 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
391}
392
393static void
394bna_bfi_rit_cfg(struct bna_rxf *rxf)
395{
396 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
397
398 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
399 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
400 req->mh.num_entries = htons(
401 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
402 req->size = htons(rxf->rit_size);
403 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
404 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
405 sizeof(struct bfi_enet_rit_req), &req->mh);
406 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
407}
408
409static void
410bna_bfi_rss_cfg(struct bna_rxf *rxf)
411{
412 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
413 int i;
414
415 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
416 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
417 req->mh.num_entries = htons(
418 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
419 req->cfg.type = rxf->rss_cfg.hash_type;
420 req->cfg.mask = rxf->rss_cfg.hash_mask;
421 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
422 req->cfg.key[i] =
423 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
424 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
425 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
426 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
427}
428
429static void
430bna_bfi_rss_enable(struct bna_rxf *rxf)
431{
432 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
433
434 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
435 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
436 req->mh.num_entries = htons(
437 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
438 req->enable = rxf->rss_status;
439 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
440 sizeof(struct bfi_enet_enable_req), &req->mh);
441 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
442}
443
444
445static struct bna_mac *
446bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
447{
448 struct bna_mac *mac;
449 struct list_head *qe;
450
451 list_for_each(qe, &rxf->mcast_active_q) {
452 mac = (struct bna_mac *)qe;
453 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
454 return mac;
455 }
456
457 list_for_each(qe, &rxf->mcast_pending_del_q) {
458 mac = (struct bna_mac *)qe;
459 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
460 return mac;
461 }
462
463 return NULL;
464}
465
466static struct bna_mcam_handle *
467bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
468{
469 struct bna_mcam_handle *mchandle;
470 struct list_head *qe;
471
472 list_for_each(qe, &rxf->mcast_handle_q) {
473 mchandle = (struct bna_mcam_handle *)qe;
474 if (mchandle->handle == handle)
475 return mchandle;
476 }
477
478 return NULL;
479}
480
481static void
482bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
483{
484 struct bna_mac *mcmac;
485 struct bna_mcam_handle *mchandle;
486
487 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
488 mchandle = bna_rxf_mchandle_get(rxf, handle);
489 if (mchandle == NULL) {
490 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
491 mchandle->handle = handle;
492 mchandle->refcnt = 0;
493 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
494 }
495 mchandle->refcnt++;
496 mcmac->handle = mchandle;
497}
498
499static int
500bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
501 enum bna_cleanup_type cleanup)
502{
503 struct bna_mcam_handle *mchandle;
504 int ret = 0;
505
506 mchandle = mac->handle;
507 if (mchandle == NULL)
508 return ret;
509
510 mchandle->refcnt--;
511 if (mchandle->refcnt == 0) {
512 if (cleanup == BNA_HARD_CLEANUP) {
513 bna_bfi_mcast_del_req(rxf, mchandle->handle);
514 ret = 1;
515 }
516 list_del(&mchandle->qe);
517 bfa_q_qe_init(&mchandle->qe);
518 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
519 }
520 mac->handle = NULL;
521
522 return ret;
523}
524
525static int
526bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
527{
528 struct bna_mac *mac = NULL;
529 struct list_head *qe;
530 int ret;
531
532
533 while (!list_empty(&rxf->mcast_pending_del_q)) {
534 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
535 bfa_q_qe_init(qe);
536 mac = (struct bna_mac *)qe;
537 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
538 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
539 if (ret)
540 return ret;
541 }
542
543
544 if (!list_empty(&rxf->mcast_pending_add_q)) {
545 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
546 bfa_q_qe_init(qe);
547 mac = (struct bna_mac *)qe;
548 list_add_tail(&mac->qe, &rxf->mcast_active_q);
549 bna_bfi_mcast_add_req(rxf, mac);
550 return 1;
551 }
552
553 return 0;
554}
555
556static int
557bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
558{
559 u8 vlan_pending_bitmask;
560 int block_idx = 0;
561
562 if (rxf->vlan_pending_bitmask) {
563 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
564 while (!(vlan_pending_bitmask & 0x1)) {
565 block_idx++;
566 vlan_pending_bitmask >>= 1;
567 }
568 rxf->vlan_pending_bitmask &= ~(1 << block_idx);
569 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
570 return 1;
571 }
572
573 return 0;
574}
575
576static int
577bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
578{
579 struct list_head *qe;
580 struct bna_mac *mac;
581 int ret;
582
583
584 while (!list_empty(&rxf->mcast_pending_del_q)) {
585 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
586 bfa_q_qe_init(qe);
587 mac = (struct bna_mac *)qe;
588 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
589 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
590 if (ret)
591 return ret;
592 }
593
594
595 while (!list_empty(&rxf->mcast_active_q)) {
596 bfa_q_deq(&rxf->mcast_active_q, &qe);
597 bfa_q_qe_init(qe);
598 list_add_tail(qe, &rxf->mcast_pending_add_q);
599 mac = (struct bna_mac *)qe;
600 if (bna_rxf_mcast_del(rxf, mac, cleanup))
601 return 1;
602 }
603
604 return 0;
605}
606
607static int
608bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
609{
610 if (rxf->rss_pending) {
611 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
612 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
613 bna_bfi_rit_cfg(rxf);
614 return 1;
615 }
616
617 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
618 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
619 bna_bfi_rss_cfg(rxf);
620 return 1;
621 }
622
623 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
624 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
625 bna_bfi_rss_enable(rxf);
626 return 1;
627 }
628 }
629
630 return 0;
631}
632
633static int
634bna_rxf_cfg_apply(struct bna_rxf *rxf)
635{
636 if (bna_rxf_ucast_cfg_apply(rxf))
637 return 1;
638
639 if (bna_rxf_mcast_cfg_apply(rxf))
640 return 1;
641
642 if (bna_rxf_promisc_cfg_apply(rxf))
643 return 1;
644
645 if (bna_rxf_allmulti_cfg_apply(rxf))
646 return 1;
647
648 if (bna_rxf_vlan_cfg_apply(rxf))
649 return 1;
650
651 if (bna_rxf_vlan_strip_cfg_apply(rxf))
652 return 1;
653
654 if (bna_rxf_rss_cfg_apply(rxf))
655 return 1;
656
657 return 0;
658}
659
660
661static int
662bna_rxf_fltr_clear(struct bna_rxf *rxf)
663{
664 if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
665 return 1;
666
667 if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
668 return 1;
669
670 if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
671 return 1;
672
673 if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
674 return 1;
675
676 return 0;
677}
678
679static void
680bna_rxf_cfg_reset(struct bna_rxf *rxf)
681{
682 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
683 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
686 bna_rxf_vlan_cfg_soft_reset(rxf);
687 bna_rxf_rss_cfg_soft_reset(rxf);
688}
689
690static void
691bna_rit_init(struct bna_rxf *rxf, int rit_size)
692{
693 struct bna_rx *rx = rxf->rx;
694 struct bna_rxp *rxp;
695 struct list_head *qe;
696 int offset = 0;
697
698 rxf->rit_size = rit_size;
699 list_for_each(qe, &rx->rxp_q) {
700 rxp = (struct bna_rxp *)qe;
701 rxf->rit[offset] = rxp->cq.ccb->id;
702 offset++;
703 }
704
705}
706
707void
708bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
709{
710 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
711}
712
713void
714bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
715 struct bfi_msgq_mhdr *msghdr)
716{
717 struct bfi_enet_rsp *rsp =
718 (struct bfi_enet_rsp *)msghdr;
719
720 if (rsp->error) {
721
722 rxf->ucast_active_set = 0;
723 }
724
725 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
726}
727
728void
729bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
730 struct bfi_msgq_mhdr *msghdr)
731{
732 struct bfi_enet_mcast_add_req *req =
733 &rxf->bfi_enet_cmd.mcast_add_req;
734 struct bfi_enet_mcast_add_rsp *rsp =
735 (struct bfi_enet_mcast_add_rsp *)msghdr;
736
737 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
738 ntohs(rsp->handle));
739 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
740}
741
742static void
743bna_rxf_init(struct bna_rxf *rxf,
744 struct bna_rx *rx,
745 struct bna_rx_config *q_config,
746 struct bna_res_info *res_info)
747{
748 rxf->rx = rx;
749
750 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
751 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
752 rxf->ucast_pending_set = 0;
753 rxf->ucast_active_set = 0;
754 INIT_LIST_HEAD(&rxf->ucast_active_q);
755 rxf->ucast_pending_mac = NULL;
756
757 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
758 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
759 INIT_LIST_HEAD(&rxf->mcast_active_q);
760 INIT_LIST_HEAD(&rxf->mcast_handle_q);
761
762 if (q_config->paused)
763 rxf->flags |= BNA_RXF_F_PAUSED;
764
765 rxf->rit = (u8 *)
766 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
767 bna_rit_init(rxf, q_config->num_paths);
768
769 rxf->rss_status = q_config->rss_status;
770 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
771 rxf->rss_cfg = q_config->rss_config;
772 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
773 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
774 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
775 }
776
777 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
778 memset(rxf->vlan_filter_table, 0,
779 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
780 rxf->vlan_filter_table[0] |= 1;
781 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
782
783 rxf->vlan_strip_status = q_config->vlan_strip_status;
784
785 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
786}
787
788static void
789bna_rxf_uninit(struct bna_rxf *rxf)
790{
791 struct bna_mac *mac;
792
793 rxf->ucast_pending_set = 0;
794 rxf->ucast_active_set = 0;
795
796 while (!list_empty(&rxf->ucast_pending_add_q)) {
797 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
798 bfa_q_qe_init(&mac->qe);
799 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
800 }
801
802 if (rxf->ucast_pending_mac) {
803 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
804 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
805 rxf->ucast_pending_mac);
806 rxf->ucast_pending_mac = NULL;
807 }
808
809 while (!list_empty(&rxf->mcast_pending_add_q)) {
810 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
811 bfa_q_qe_init(&mac->qe);
812 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
813 }
814
815 rxf->rxmode_pending = 0;
816 rxf->rxmode_pending_bitmask = 0;
817 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
818 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
819 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
820 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
821
822 rxf->rss_pending = 0;
823 rxf->vlan_strip_pending = false;
824
825 rxf->flags = 0;
826
827 rxf->rx = NULL;
828}
829
830static void
831bna_rx_cb_rxf_started(struct bna_rx *rx)
832{
833 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
834}
835
836static void
837bna_rxf_start(struct bna_rxf *rxf)
838{
839 rxf->start_cbfn = bna_rx_cb_rxf_started;
840 rxf->start_cbarg = rxf->rx;
841 bfa_fsm_send_event(rxf, RXF_E_START);
842}
843
844static void
845bna_rx_cb_rxf_stopped(struct bna_rx *rx)
846{
847 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
848}
849
850static void
851bna_rxf_stop(struct bna_rxf *rxf)
852{
853 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
854 rxf->stop_cbarg = rxf->rx;
855 bfa_fsm_send_event(rxf, RXF_E_STOP);
856}
857
858static void
859bna_rxf_fail(struct bna_rxf *rxf)
860{
861 bfa_fsm_send_event(rxf, RXF_E_FAIL);
862}
863
864enum bna_cb_status
865bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
866 void (*cbfn)(struct bnad *, struct bna_rx *))
867{
868 struct bna_rxf *rxf = &rx->rxf;
869
870 if (rxf->ucast_pending_mac == NULL) {
871 rxf->ucast_pending_mac =
872 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
873 if (rxf->ucast_pending_mac == NULL)
874 return BNA_CB_UCAST_CAM_FULL;
875 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
876 }
877
878 memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
879 rxf->ucast_pending_set = 1;
880 rxf->cam_fltr_cbfn = cbfn;
881 rxf->cam_fltr_cbarg = rx->bna->bnad;
882
883 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
884
885 return BNA_CB_SUCCESS;
886}
887
888enum bna_cb_status
889bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
890 void (*cbfn)(struct bnad *, struct bna_rx *))
891{
892 struct bna_rxf *rxf = &rx->rxf;
893 struct bna_mac *mac;
894
895
896 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
897 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
898 if (cbfn)
899 cbfn(rx->bna->bnad, rx);
900 return BNA_CB_SUCCESS;
901 }
902
903 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
904 if (mac == NULL)
905 return BNA_CB_MCAST_LIST_FULL;
906 bfa_q_qe_init(&mac->qe);
907 memcpy(mac->addr, addr, ETH_ALEN);
908 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
909
910 rxf->cam_fltr_cbfn = cbfn;
911 rxf->cam_fltr_cbarg = rx->bna->bnad;
912
913 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
914
915 return BNA_CB_SUCCESS;
916}
917
918enum bna_cb_status
919bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
920 void (*cbfn)(struct bnad *, struct bna_rx *))
921{
922 struct bna_rxf *rxf = &rx->rxf;
923 struct list_head list_head;
924 struct list_head *qe;
925 u8 *mcaddr;
926 struct bna_mac *mac;
927 int i;
928
929
930 INIT_LIST_HEAD(&list_head);
931 for (i = 0, mcaddr = mclist; i < count; i++) {
932 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
933 if (mac == NULL)
934 goto err_return;
935 bfa_q_qe_init(&mac->qe);
936 memcpy(mac->addr, mcaddr, ETH_ALEN);
937 list_add_tail(&mac->qe, &list_head);
938
939 mcaddr += ETH_ALEN;
940 }
941
942
943 while (!list_empty(&rxf->mcast_pending_add_q)) {
944 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
945 bfa_q_qe_init(qe);
946 mac = (struct bna_mac *)qe;
947 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
948 }
949
950
951 while (!list_empty(&rxf->mcast_active_q)) {
952 bfa_q_deq(&rxf->mcast_active_q, &qe);
953 mac = (struct bna_mac *)qe;
954 bfa_q_qe_init(&mac->qe);
955 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
956 }
957
958
959 while (!list_empty(&list_head)) {
960 bfa_q_deq(&list_head, &qe);
961 mac = (struct bna_mac *)qe;
962 bfa_q_qe_init(&mac->qe);
963 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
964 }
965
966 rxf->cam_fltr_cbfn = cbfn;
967 rxf->cam_fltr_cbarg = rx->bna->bnad;
968 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
969
970 return BNA_CB_SUCCESS;
971
972err_return:
973 while (!list_empty(&list_head)) {
974 bfa_q_deq(&list_head, &qe);
975 mac = (struct bna_mac *)qe;
976 bfa_q_qe_init(&mac->qe);
977 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
978 }
979
980 return BNA_CB_MCAST_LIST_FULL;
981}
982
983void
984bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
985{
986 struct bna_rxf *rxf = &rx->rxf;
987 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
988 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
989 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
990
991 rxf->vlan_filter_table[index] |= bit;
992 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
993 rxf->vlan_pending_bitmask |= (1 << group_id);
994 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
995 }
996}
997
998void
999bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1000{
1001 struct bna_rxf *rxf = &rx->rxf;
1002 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1003 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
1004 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1005
1006 rxf->vlan_filter_table[index] &= ~bit;
1007 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1008 rxf->vlan_pending_bitmask |= (1 << group_id);
1009 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1010 }
1011}
1012
1013static int
1014bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1015{
1016 struct bna_mac *mac = NULL;
1017 struct list_head *qe;
1018
1019
1020 if (!list_empty(&rxf->ucast_pending_del_q)) {
1021 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1022 bfa_q_qe_init(qe);
1023 mac = (struct bna_mac *)qe;
1024 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1025 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1026 return 1;
1027 }
1028
1029
1030 if (rxf->ucast_pending_set) {
1031 rxf->ucast_pending_set = 0;
1032 memcpy(rxf->ucast_active_mac.addr,
1033 rxf->ucast_pending_mac->addr, ETH_ALEN);
1034 rxf->ucast_active_set = 1;
1035 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1036 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1037 return 1;
1038 }
1039
1040
1041 if (!list_empty(&rxf->ucast_pending_add_q)) {
1042 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1043 bfa_q_qe_init(qe);
1044 mac = (struct bna_mac *)qe;
1045 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1046 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1047 return 1;
1048 }
1049
1050 return 0;
1051}
1052
1053static int
1054bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1055{
1056 struct list_head *qe;
1057 struct bna_mac *mac;
1058
1059
1060 while (!list_empty(&rxf->ucast_pending_del_q)) {
1061 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1062 bfa_q_qe_init(qe);
1063 mac = (struct bna_mac *)qe;
1064 if (cleanup == BNA_SOFT_CLEANUP)
1065 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1066 else {
1067 bna_bfi_ucast_req(rxf, mac,
1068 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1069 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1070 return 1;
1071 }
1072 }
1073
1074
1075 while (!list_empty(&rxf->ucast_active_q)) {
1076 bfa_q_deq(&rxf->ucast_active_q, &qe);
1077 bfa_q_qe_init(qe);
1078 list_add_tail(qe, &rxf->ucast_pending_add_q);
1079 if (cleanup == BNA_HARD_CLEANUP) {
1080 mac = (struct bna_mac *)qe;
1081 bna_bfi_ucast_req(rxf, mac,
1082 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1083 return 1;
1084 }
1085 }
1086
1087 if (rxf->ucast_active_set) {
1088 rxf->ucast_pending_set = 1;
1089 rxf->ucast_active_set = 0;
1090 if (cleanup == BNA_HARD_CLEANUP) {
1091 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1092 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1093 return 1;
1094 }
1095 }
1096
1097 return 0;
1098}
1099
1100static int
1101bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1102{
1103 struct bna *bna = rxf->rx->bna;
1104
1105
1106 if (is_promisc_enable(rxf->rxmode_pending,
1107 rxf->rxmode_pending_bitmask)) {
1108
1109 promisc_inactive(rxf->rxmode_pending,
1110 rxf->rxmode_pending_bitmask);
1111 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1112 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1113 return 1;
1114 } else if (is_promisc_disable(rxf->rxmode_pending,
1115 rxf->rxmode_pending_bitmask)) {
1116
1117 promisc_inactive(rxf->rxmode_pending,
1118 rxf->rxmode_pending_bitmask);
1119 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1120 bna->promisc_rid = BFI_INVALID_RID;
1121 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1122 return 1;
1123 }
1124
1125 return 0;
1126}
1127
1128static int
1129bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1130{
1131 struct bna *bna = rxf->rx->bna;
1132
1133
1134 if (is_promisc_disable(rxf->rxmode_pending,
1135 rxf->rxmode_pending_bitmask)) {
1136 promisc_inactive(rxf->rxmode_pending,
1137 rxf->rxmode_pending_bitmask);
1138 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1139 bna->promisc_rid = BFI_INVALID_RID;
1140 if (cleanup == BNA_HARD_CLEANUP) {
1141 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1142 return 1;
1143 }
1144 }
1145
1146
1147 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1148 promisc_enable(rxf->rxmode_pending,
1149 rxf->rxmode_pending_bitmask);
1150 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1151 if (cleanup == BNA_HARD_CLEANUP) {
1152 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1153 return 1;
1154 }
1155 }
1156
1157 return 0;
1158}
1159
1160static int
1161bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1162{
1163
1164 if (is_allmulti_enable(rxf->rxmode_pending,
1165 rxf->rxmode_pending_bitmask)) {
1166
1167 allmulti_inactive(rxf->rxmode_pending,
1168 rxf->rxmode_pending_bitmask);
1169 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1170 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1171 return 1;
1172 } else if (is_allmulti_disable(rxf->rxmode_pending,
1173 rxf->rxmode_pending_bitmask)) {
1174
1175 allmulti_inactive(rxf->rxmode_pending,
1176 rxf->rxmode_pending_bitmask);
1177 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1178 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1179 return 1;
1180 }
1181
1182 return 0;
1183}
1184
1185static int
1186bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1187{
1188
1189 if (is_allmulti_disable(rxf->rxmode_pending,
1190 rxf->rxmode_pending_bitmask)) {
1191 allmulti_inactive(rxf->rxmode_pending,
1192 rxf->rxmode_pending_bitmask);
1193 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1194 if (cleanup == BNA_HARD_CLEANUP) {
1195 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1196 return 1;
1197 }
1198 }
1199
1200
1201 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1202 allmulti_enable(rxf->rxmode_pending,
1203 rxf->rxmode_pending_bitmask);
1204 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1205 if (cleanup == BNA_HARD_CLEANUP) {
1206 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1207 return 1;
1208 }
1209 }
1210
1211 return 0;
1212}
1213
1214static int
1215bna_rxf_promisc_enable(struct bna_rxf *rxf)
1216{
1217 struct bna *bna = rxf->rx->bna;
1218 int ret = 0;
1219
1220 if (is_promisc_enable(rxf->rxmode_pending,
1221 rxf->rxmode_pending_bitmask) ||
1222 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1223
1224 } else if (is_promisc_disable(rxf->rxmode_pending,
1225 rxf->rxmode_pending_bitmask)) {
1226
1227 promisc_inactive(rxf->rxmode_pending,
1228 rxf->rxmode_pending_bitmask);
1229 } else {
1230
1231 promisc_enable(rxf->rxmode_pending,
1232 rxf->rxmode_pending_bitmask);
1233 bna->promisc_rid = rxf->rx->rid;
1234 ret = 1;
1235 }
1236
1237 return ret;
1238}
1239
1240static int
1241bna_rxf_promisc_disable(struct bna_rxf *rxf)
1242{
1243 struct bna *bna = rxf->rx->bna;
1244 int ret = 0;
1245
1246 if (is_promisc_disable(rxf->rxmode_pending,
1247 rxf->rxmode_pending_bitmask) ||
1248 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1249
1250 } else if (is_promisc_enable(rxf->rxmode_pending,
1251 rxf->rxmode_pending_bitmask)) {
1252
1253 promisc_inactive(rxf->rxmode_pending,
1254 rxf->rxmode_pending_bitmask);
1255 bna->promisc_rid = BFI_INVALID_RID;
1256 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1257
1258 promisc_disable(rxf->rxmode_pending,
1259 rxf->rxmode_pending_bitmask);
1260 ret = 1;
1261 }
1262
1263 return ret;
1264}
1265
1266static int
1267bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1268{
1269 int ret = 0;
1270
1271 if (is_allmulti_enable(rxf->rxmode_pending,
1272 rxf->rxmode_pending_bitmask) ||
1273 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1274
1275 } else if (is_allmulti_disable(rxf->rxmode_pending,
1276 rxf->rxmode_pending_bitmask)) {
1277
1278 allmulti_inactive(rxf->rxmode_pending,
1279 rxf->rxmode_pending_bitmask);
1280 } else {
1281
1282 allmulti_enable(rxf->rxmode_pending,
1283 rxf->rxmode_pending_bitmask);
1284 ret = 1;
1285 }
1286
1287 return ret;
1288}
1289
1290static int
1291bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1292{
1293 int ret = 0;
1294
1295 if (is_allmulti_disable(rxf->rxmode_pending,
1296 rxf->rxmode_pending_bitmask) ||
1297 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1298
1299 } else if (is_allmulti_enable(rxf->rxmode_pending,
1300 rxf->rxmode_pending_bitmask)) {
1301
1302 allmulti_inactive(rxf->rxmode_pending,
1303 rxf->rxmode_pending_bitmask);
1304 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1305
1306 allmulti_disable(rxf->rxmode_pending,
1307 rxf->rxmode_pending_bitmask);
1308 ret = 1;
1309 }
1310
1311 return ret;
1312}
1313
1314static int
1315bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1316{
1317 if (rxf->vlan_strip_pending) {
1318 rxf->vlan_strip_pending = false;
1319 bna_bfi_vlan_strip_enable(rxf);
1320 return 1;
1321 }
1322
1323 return 0;
1324}
1325
1326
1327
1328#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1329 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1330
1331#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1332 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1333
1334#define call_rx_stop_cbfn(rx) \
1335do { \
1336 if ((rx)->stop_cbfn) { \
1337 void (*cbfn)(void *, struct bna_rx *); \
1338 void *cbarg; \
1339 cbfn = (rx)->stop_cbfn; \
1340 cbarg = (rx)->stop_cbarg; \
1341 (rx)->stop_cbfn = NULL; \
1342 (rx)->stop_cbarg = NULL; \
1343 cbfn(cbarg, rx); \
1344 } \
1345} while (0)
1346
1347#define call_rx_stall_cbfn(rx) \
1348do { \
1349 if ((rx)->rx_stall_cbfn) \
1350 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1351} while (0)
1352
1353#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1354do { \
1355 struct bna_dma_addr cur_q_addr = \
1356 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1357 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1358 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1359 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1360 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1361 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1362 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1363} while (0)
1364
1365static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1366static void bna_rx_enet_stop(struct bna_rx *rx);
1367static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1368
1369bfa_fsm_state_decl(bna_rx, stopped,
1370 struct bna_rx, enum bna_rx_event);
1371bfa_fsm_state_decl(bna_rx, start_wait,
1372 struct bna_rx, enum bna_rx_event);
1373bfa_fsm_state_decl(bna_rx, start_stop_wait,
1374 struct bna_rx, enum bna_rx_event);
1375bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1376 struct bna_rx, enum bna_rx_event);
1377bfa_fsm_state_decl(bna_rx, started,
1378 struct bna_rx, enum bna_rx_event);
1379bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1380 struct bna_rx, enum bna_rx_event);
1381bfa_fsm_state_decl(bna_rx, stop_wait,
1382 struct bna_rx, enum bna_rx_event);
1383bfa_fsm_state_decl(bna_rx, cleanup_wait,
1384 struct bna_rx, enum bna_rx_event);
1385bfa_fsm_state_decl(bna_rx, failed,
1386 struct bna_rx, enum bna_rx_event);
1387bfa_fsm_state_decl(bna_rx, quiesce_wait,
1388 struct bna_rx, enum bna_rx_event);
1389
1390static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1391{
1392 call_rx_stop_cbfn(rx);
1393}
1394
1395static void bna_rx_sm_stopped(struct bna_rx *rx,
1396 enum bna_rx_event event)
1397{
1398 switch (event) {
1399 case RX_E_START:
1400 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1401 break;
1402
1403 case RX_E_STOP:
1404 call_rx_stop_cbfn(rx);
1405 break;
1406
1407 case RX_E_FAIL:
1408
1409 break;
1410
1411 default:
1412 bfa_sm_fault(event);
1413 break;
1414 }
1415}
1416
1417static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1418{
1419 bna_bfi_rx_enet_start(rx);
1420}
1421
1422void
1423bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1424{
1425}
1426
1427static void
1428bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1429{
1430 switch (event) {
1431 case RX_E_FAIL:
1432 case RX_E_STOPPED:
1433 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1434 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1435 break;
1436
1437 case RX_E_STARTED:
1438 bna_rx_enet_stop(rx);
1439 break;
1440
1441 default:
1442 bfa_sm_fault(event);
1443 break;
1444 }
1445}
1446
1447static void bna_rx_sm_start_wait(struct bna_rx *rx,
1448 enum bna_rx_event event)
1449{
1450 switch (event) {
1451 case RX_E_STOP:
1452 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1453 break;
1454
1455 case RX_E_FAIL:
1456 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1457 break;
1458
1459 case RX_E_STARTED:
1460 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1461 break;
1462
1463 default:
1464 bfa_sm_fault(event);
1465 break;
1466 }
1467}
1468
1469static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1470{
1471 rx->rx_post_cbfn(rx->bna->bnad, rx);
1472 bna_rxf_start(&rx->rxf);
1473}
1474
1475void
1476bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1477{
1478}
1479
1480static void
1481bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1482{
1483 switch (event) {
1484 case RX_E_FAIL:
1485 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1486 bna_rxf_fail(&rx->rxf);
1487 call_rx_stall_cbfn(rx);
1488 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1489 break;
1490
1491 case RX_E_RXF_STARTED:
1492 bna_rxf_stop(&rx->rxf);
1493 break;
1494
1495 case RX_E_RXF_STOPPED:
1496 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1497 call_rx_stall_cbfn(rx);
1498 bna_rx_enet_stop(rx);
1499 break;
1500
1501 default:
1502 bfa_sm_fault(event);
1503 break;
1504 }
1505
1506}
1507
1508static void
1509bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1510{
1511}
1512
1513static void
1514bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1515{
1516 switch (event) {
1517 case RX_E_FAIL:
1518 case RX_E_STOPPED:
1519 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1520 break;
1521
1522 case RX_E_STARTED:
1523 bna_rx_enet_stop(rx);
1524 break;
1525
1526 default:
1527 bfa_sm_fault(event);
1528 }
1529}
1530
1531void
1532bna_rx_sm_started_entry(struct bna_rx *rx)
1533{
1534 struct bna_rxp *rxp;
1535 struct list_head *qe_rxp;
1536 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1537
1538
1539 list_for_each(qe_rxp, &rx->rxp_q) {
1540 rxp = (struct bna_rxp *)qe_rxp;
1541 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1542 }
1543
1544 bna_ethport_cb_rx_started(&rx->bna->ethport);
1545}
1546
1547static void
1548bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1549{
1550 switch (event) {
1551 case RX_E_STOP:
1552 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1553 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1554 bna_rxf_stop(&rx->rxf);
1555 break;
1556
1557 case RX_E_FAIL:
1558 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1559 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1560 bna_rxf_fail(&rx->rxf);
1561 call_rx_stall_cbfn(rx);
1562 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1563 break;
1564
1565 default:
1566 bfa_sm_fault(event);
1567 break;
1568 }
1569}
1570
1571static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1572 enum bna_rx_event event)
1573{
1574 switch (event) {
1575 case RX_E_STOP:
1576 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1577 break;
1578
1579 case RX_E_FAIL:
1580 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1581 bna_rxf_fail(&rx->rxf);
1582 call_rx_stall_cbfn(rx);
1583 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1584 break;
1585
1586 case RX_E_RXF_STARTED:
1587 bfa_fsm_set_state(rx, bna_rx_sm_started);
1588 break;
1589
1590 default:
1591 bfa_sm_fault(event);
1592 break;
1593 }
1594}
1595
1596void
1597bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1598{
1599}
1600
1601void
1602bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1603{
1604 switch (event) {
1605 case RX_E_FAIL:
1606 case RX_E_RXF_STOPPED:
1607
1608 break;
1609
1610 case RX_E_CLEANUP_DONE:
1611 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1612 break;
1613
1614 default:
1615 bfa_sm_fault(event);
1616 break;
1617 }
1618}
1619
1620static void
1621bna_rx_sm_failed_entry(struct bna_rx *rx)
1622{
1623}
1624
1625static void
1626bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1627{
1628 switch (event) {
1629 case RX_E_START:
1630 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1631 break;
1632
1633 case RX_E_STOP:
1634 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1635 break;
1636
1637 case RX_E_FAIL:
1638 case RX_E_RXF_STARTED:
1639 case RX_E_RXF_STOPPED:
1640
1641 break;
1642
1643 case RX_E_CLEANUP_DONE:
1644 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1645 break;
1646
1647 default:
1648 bfa_sm_fault(event);
1649 break;
1650} }
1651
1652static void
1653bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1654{
1655}
1656
1657static void
1658bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1659{
1660 switch (event) {
1661 case RX_E_STOP:
1662 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1663 break;
1664
1665 case RX_E_FAIL:
1666 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1667 break;
1668
1669 case RX_E_CLEANUP_DONE:
1670 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1671 break;
1672
1673 default:
1674 bfa_sm_fault(event);
1675 break;
1676 }
1677}
1678
1679static void
1680bna_bfi_rx_enet_start(struct bna_rx *rx)
1681{
1682 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1683 struct bna_rxp *rxp = NULL;
1684 struct bna_rxq *q0 = NULL, *q1 = NULL;
1685 struct list_head *rxp_qe;
1686 int i;
1687
1688 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1689 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1690 cfg_req->mh.num_entries = htons(
1691 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1692
1693 cfg_req->num_queue_sets = rx->num_paths;
1694 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1695 i < rx->num_paths;
1696 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1697 rxp = (struct bna_rxp *)rxp_qe;
1698
1699 GET_RXQS(rxp, q0, q1);
1700 switch (rxp->type) {
1701 case BNA_RXP_SLR:
1702 case BNA_RXP_HDS:
1703
1704 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1705 &q1->qpt);
1706 cfg_req->q_cfg[i].qs.rx_buffer_size =
1707 htons((u16)q1->buffer_size);
1708
1709
1710 case BNA_RXP_SINGLE:
1711
1712 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1713 &q0->qpt);
1714 q0->buffer_size =
1715 bna_enet_mtu_get(&rx->bna->enet);
1716 cfg_req->q_cfg[i].ql.rx_buffer_size =
1717 htons((u16)q0->buffer_size);
1718 break;
1719
1720 default:
1721 BUG_ON(1);
1722 }
1723
1724 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1725 &rxp->cq.qpt);
1726
1727 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1728 rxp->cq.ib.ib_seg_host_addr.lsb;
1729 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1730 rxp->cq.ib.ib_seg_host_addr.msb;
1731 cfg_req->q_cfg[i].ib.intr.msix_index =
1732 htons((u16)rxp->cq.ib.intr_vector);
1733 }
1734
1735 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1736 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1737 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1738 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1739 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1740 ? BNA_STATUS_T_ENABLED :
1741 BNA_STATUS_T_DISABLED;
1742 cfg_req->ib_cfg.coalescing_timeout =
1743 htonl((u32)rxp->cq.ib.coalescing_timeo);
1744 cfg_req->ib_cfg.inter_pkt_timeout =
1745 htonl((u32)rxp->cq.ib.interpkt_timeo);
1746 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1747
1748 switch (rxp->type) {
1749 case BNA_RXP_SLR:
1750 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1751 break;
1752
1753 case BNA_RXP_HDS:
1754 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1755 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1756 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1757 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1758 break;
1759
1760 case BNA_RXP_SINGLE:
1761 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1762 break;
1763
1764 default:
1765 BUG_ON(1);
1766 }
1767 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1768
1769 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1770 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1771 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1772}
1773
1774static void
1775bna_bfi_rx_enet_stop(struct bna_rx *rx)
1776{
1777 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1778
1779 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1780 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1781 req->mh.num_entries = htons(
1782 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1783 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1784 &req->mh);
1785 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1786}
1787
1788static void
1789bna_rx_enet_stop(struct bna_rx *rx)
1790{
1791 struct bna_rxp *rxp;
1792 struct list_head *qe_rxp;
1793
1794
1795 list_for_each(qe_rxp, &rx->rxp_q) {
1796 rxp = (struct bna_rxp *)qe_rxp;
1797 bna_ib_stop(rx->bna, &rxp->cq.ib);
1798 }
1799
1800 bna_bfi_rx_enet_stop(rx);
1801}
1802
1803static int
1804bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1805{
1806 if ((rx_mod->rx_free_count == 0) ||
1807 (rx_mod->rxp_free_count == 0) ||
1808 (rx_mod->rxq_free_count == 0))
1809 return 0;
1810
1811 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1812 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1813 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1814 return 0;
1815 } else {
1816 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1817 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1818 return 0;
1819 }
1820
1821 return 1;
1822}
1823
1824static struct bna_rxq *
1825bna_rxq_get(struct bna_rx_mod *rx_mod)
1826{
1827 struct bna_rxq *rxq = NULL;
1828 struct list_head *qe = NULL;
1829
1830 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1831 rx_mod->rxq_free_count--;
1832 rxq = (struct bna_rxq *)qe;
1833 bfa_q_qe_init(&rxq->qe);
1834
1835 return rxq;
1836}
1837
1838static void
1839bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1840{
1841 bfa_q_qe_init(&rxq->qe);
1842 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1843 rx_mod->rxq_free_count++;
1844}
1845
1846static struct bna_rxp *
1847bna_rxp_get(struct bna_rx_mod *rx_mod)
1848{
1849 struct list_head *qe = NULL;
1850 struct bna_rxp *rxp = NULL;
1851
1852 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1853 rx_mod->rxp_free_count--;
1854 rxp = (struct bna_rxp *)qe;
1855 bfa_q_qe_init(&rxp->qe);
1856
1857 return rxp;
1858}
1859
1860static void
1861bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1862{
1863 bfa_q_qe_init(&rxp->qe);
1864 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1865 rx_mod->rxp_free_count++;
1866}
1867
1868static struct bna_rx *
1869bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1870{
1871 struct list_head *qe = NULL;
1872 struct bna_rx *rx = NULL;
1873
1874 if (type == BNA_RX_T_REGULAR) {
1875 bfa_q_deq(&rx_mod->rx_free_q, &qe);
1876 } else
1877 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
1878
1879 rx_mod->rx_free_count--;
1880 rx = (struct bna_rx *)qe;
1881 bfa_q_qe_init(&rx->qe);
1882 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
1883 rx->type = type;
1884
1885 return rx;
1886}
1887
1888static void
1889bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1890{
1891 struct list_head *prev_qe = NULL;
1892 struct list_head *qe;
1893
1894 bfa_q_qe_init(&rx->qe);
1895
1896 list_for_each(qe, &rx_mod->rx_free_q) {
1897 if (((struct bna_rx *)qe)->rid < rx->rid)
1898 prev_qe = qe;
1899 else
1900 break;
1901 }
1902
1903 if (prev_qe == NULL) {
1904
1905 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
1906 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
1907
1908 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
1909 } else {
1910
1911 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
1912 bfa_q_prev(&rx->qe) = prev_qe;
1913 bfa_q_next(prev_qe) = &rx->qe;
1914 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
1915 }
1916
1917 rx_mod->rx_free_count++;
1918}
1919
1920static void
1921bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1922 struct bna_rxq *q1)
1923{
1924 switch (rxp->type) {
1925 case BNA_RXP_SINGLE:
1926 rxp->rxq.single.only = q0;
1927 rxp->rxq.single.reserved = NULL;
1928 break;
1929 case BNA_RXP_SLR:
1930 rxp->rxq.slr.large = q0;
1931 rxp->rxq.slr.small = q1;
1932 break;
1933 case BNA_RXP_HDS:
1934 rxp->rxq.hds.data = q0;
1935 rxp->rxq.hds.hdr = q1;
1936 break;
1937 default:
1938 break;
1939 }
1940}
1941
1942static void
1943bna_rxq_qpt_setup(struct bna_rxq *rxq,
1944 struct bna_rxp *rxp,
1945 u32 page_count,
1946 u32 page_size,
1947 struct bna_mem_descr *qpt_mem,
1948 struct bna_mem_descr *swqpt_mem,
1949 struct bna_mem_descr *page_mem)
1950{
1951 u8 *kva;
1952 u64 dma;
1953 struct bna_dma_addr bna_dma;
1954 int i;
1955
1956 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1957 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1958 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1959 rxq->qpt.page_count = page_count;
1960 rxq->qpt.page_size = page_size;
1961
1962 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1963 rxq->rcb->sw_q = page_mem->kva;
1964
1965 kva = page_mem->kva;
1966 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1967
1968 for (i = 0; i < rxq->qpt.page_count; i++) {
1969 rxq->rcb->sw_qpt[i] = kva;
1970 kva += PAGE_SIZE;
1971
1972 BNA_SET_DMA_ADDR(dma, &bna_dma);
1973 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1974 bna_dma.lsb;
1975 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1976 bna_dma.msb;
1977 dma += PAGE_SIZE;
1978 }
1979}
1980
1981static void
1982bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1983 u32 page_count,
1984 u32 page_size,
1985 struct bna_mem_descr *qpt_mem,
1986 struct bna_mem_descr *swqpt_mem,
1987 struct bna_mem_descr *page_mem)
1988{
1989 u8 *kva;
1990 u64 dma;
1991 struct bna_dma_addr bna_dma;
1992 int i;
1993
1994 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1995 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1996 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1997 rxp->cq.qpt.page_count = page_count;
1998 rxp->cq.qpt.page_size = page_size;
1999
2000 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2001 rxp->cq.ccb->sw_q = page_mem->kva;
2002
2003 kva = page_mem->kva;
2004 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2005
2006 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2007 rxp->cq.ccb->sw_qpt[i] = kva;
2008 kva += PAGE_SIZE;
2009
2010 BNA_SET_DMA_ADDR(dma, &bna_dma);
2011 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2012 bna_dma.lsb;
2013 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2014 bna_dma.msb;
2015 dma += PAGE_SIZE;
2016 }
2017}
2018
2019static void
2020bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2021{
2022 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2023
2024 bfa_wc_down(&rx_mod->rx_stop_wc);
2025}
2026
2027static void
2028bna_rx_mod_cb_rx_stopped_all(void *arg)
2029{
2030 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2031
2032 if (rx_mod->stop_cbfn)
2033 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2034 rx_mod->stop_cbfn = NULL;
2035}
2036
2037static void
2038bna_rx_start(struct bna_rx *rx)
2039{
2040 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2041 if (rx->rx_flags & BNA_RX_F_ENABLED)
2042 bfa_fsm_send_event(rx, RX_E_START);
2043}
2044
2045static void
2046bna_rx_stop(struct bna_rx *rx)
2047{
2048 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2049 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2050 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2051 else {
2052 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2053 rx->stop_cbarg = &rx->bna->rx_mod;
2054 bfa_fsm_send_event(rx, RX_E_STOP);
2055 }
2056}
2057
2058static void
2059bna_rx_fail(struct bna_rx *rx)
2060{
2061
2062 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2063 bfa_fsm_send_event(rx, RX_E_FAIL);
2064}
2065
2066void
2067bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2068{
2069 struct bna_rx *rx;
2070 struct list_head *qe;
2071
2072 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2073 if (type == BNA_RX_T_LOOPBACK)
2074 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2075
2076 list_for_each(qe, &rx_mod->rx_active_q) {
2077 rx = (struct bna_rx *)qe;
2078 if (rx->type == type)
2079 bna_rx_start(rx);
2080 }
2081}
2082
2083void
2084bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2085{
2086 struct bna_rx *rx;
2087 struct list_head *qe;
2088
2089 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2090 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2091
2092 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2093
2094 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2095
2096 list_for_each(qe, &rx_mod->rx_active_q) {
2097 rx = (struct bna_rx *)qe;
2098 if (rx->type == type) {
2099 bfa_wc_up(&rx_mod->rx_stop_wc);
2100 bna_rx_stop(rx);
2101 }
2102 }
2103
2104 bfa_wc_wait(&rx_mod->rx_stop_wc);
2105}
2106
2107void
2108bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2109{
2110 struct bna_rx *rx;
2111 struct list_head *qe;
2112
2113 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2114 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2115
2116 list_for_each(qe, &rx_mod->rx_active_q) {
2117 rx = (struct bna_rx *)qe;
2118 bna_rx_fail(rx);
2119 }
2120}
2121
2122void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2123 struct bna_res_info *res_info)
2124{
2125 int index;
2126 struct bna_rx *rx_ptr;
2127 struct bna_rxp *rxp_ptr;
2128 struct bna_rxq *rxq_ptr;
2129
2130 rx_mod->bna = bna;
2131 rx_mod->flags = 0;
2132
2133 rx_mod->rx = (struct bna_rx *)
2134 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2135 rx_mod->rxp = (struct bna_rxp *)
2136 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2137 rx_mod->rxq = (struct bna_rxq *)
2138 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2139
2140
2141 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2142 rx_mod->rx_free_count = 0;
2143 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2144 rx_mod->rxq_free_count = 0;
2145 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2146 rx_mod->rxp_free_count = 0;
2147 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2148
2149
2150 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2151 rx_ptr = &rx_mod->rx[index];
2152
2153 bfa_q_qe_init(&rx_ptr->qe);
2154 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2155 rx_ptr->bna = NULL;
2156 rx_ptr->rid = index;
2157 rx_ptr->stop_cbfn = NULL;
2158 rx_ptr->stop_cbarg = NULL;
2159
2160 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2161 rx_mod->rx_free_count++;
2162 }
2163
2164
2165 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2166 rxp_ptr = &rx_mod->rxp[index];
2167 bfa_q_qe_init(&rxp_ptr->qe);
2168 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2169 rx_mod->rxp_free_count++;
2170 }
2171
2172
2173 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2174 rxq_ptr = &rx_mod->rxq[index];
2175 bfa_q_qe_init(&rxq_ptr->qe);
2176 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2177 rx_mod->rxq_free_count++;
2178 }
2179}
2180
2181void
2182bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2183{
2184 struct list_head *qe;
2185 int i;
2186
2187 i = 0;
2188 list_for_each(qe, &rx_mod->rx_free_q)
2189 i++;
2190
2191 i = 0;
2192 list_for_each(qe, &rx_mod->rxp_free_q)
2193 i++;
2194
2195 i = 0;
2196 list_for_each(qe, &rx_mod->rxq_free_q)
2197 i++;
2198
2199 rx_mod->bna = NULL;
2200}
2201
2202void
2203bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2204{
2205 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2206 struct bna_rxp *rxp = NULL;
2207 struct bna_rxq *q0 = NULL, *q1 = NULL;
2208 struct list_head *rxp_qe;
2209 int i;
2210
2211 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2212 sizeof(struct bfi_enet_rx_cfg_rsp));
2213
2214 rx->hw_id = cfg_rsp->hw_id;
2215
2216 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2217 i < rx->num_paths;
2218 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2219 rxp = (struct bna_rxp *)rxp_qe;
2220 GET_RXQS(rxp, q0, q1);
2221
2222
2223 rxp->cq.ccb->i_dbell->doorbell_addr =
2224 rx->bna->pcidev.pci_bar_kva
2225 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2226 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2227 q0->rcb->q_dbell =
2228 rx->bna->pcidev.pci_bar_kva
2229 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2230 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2231 if (q1) {
2232 q1->rcb->q_dbell =
2233 rx->bna->pcidev.pci_bar_kva
2234 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2235 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2236 }
2237
2238
2239 (*rxp->cq.ccb->hw_producer_index) = 0;
2240 rxp->cq.ccb->producer_index = 0;
2241 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2242 if (q1)
2243 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2244 }
2245
2246 bfa_fsm_send_event(rx, RX_E_STARTED);
2247}
2248
2249void
2250bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2251{
2252 bfa_fsm_send_event(rx, RX_E_STOPPED);
2253}
2254
2255void
2256bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2257{
2258 u32 cq_size, hq_size, dq_size;
2259 u32 cpage_count, hpage_count, dpage_count;
2260 struct bna_mem_info *mem_info;
2261 u32 cq_depth;
2262 u32 hq_depth;
2263 u32 dq_depth;
2264
2265 dq_depth = q_cfg->q_depth;
2266 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2267 cq_depth = dq_depth + hq_depth;
2268
2269 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2270 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2271 cq_size = ALIGN(cq_size, PAGE_SIZE);
2272 cpage_count = SIZE_TO_PAGES(cq_size);
2273
2274 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2275 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2276 dq_size = ALIGN(dq_size, PAGE_SIZE);
2277 dpage_count = SIZE_TO_PAGES(dq_size);
2278
2279 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2280 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2281 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2282 hq_size = ALIGN(hq_size, PAGE_SIZE);
2283 hpage_count = SIZE_TO_PAGES(hq_size);
2284 } else
2285 hpage_count = 0;
2286
2287 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2288 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2289 mem_info->mem_type = BNA_MEM_T_KVA;
2290 mem_info->len = sizeof(struct bna_ccb);
2291 mem_info->num = q_cfg->num_paths;
2292
2293 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2294 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2295 mem_info->mem_type = BNA_MEM_T_KVA;
2296 mem_info->len = sizeof(struct bna_rcb);
2297 mem_info->num = BNA_GET_RXQS(q_cfg);
2298
2299 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2300 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2301 mem_info->mem_type = BNA_MEM_T_DMA;
2302 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2303 mem_info->num = q_cfg->num_paths;
2304
2305 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2306 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2307 mem_info->mem_type = BNA_MEM_T_KVA;
2308 mem_info->len = cpage_count * sizeof(void *);
2309 mem_info->num = q_cfg->num_paths;
2310
2311 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2312 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2313 mem_info->mem_type = BNA_MEM_T_DMA;
2314 mem_info->len = PAGE_SIZE * cpage_count;
2315 mem_info->num = q_cfg->num_paths;
2316
2317 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2318 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2319 mem_info->mem_type = BNA_MEM_T_DMA;
2320 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2321 mem_info->num = q_cfg->num_paths;
2322
2323 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2324 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2325 mem_info->mem_type = BNA_MEM_T_KVA;
2326 mem_info->len = dpage_count * sizeof(void *);
2327 mem_info->num = q_cfg->num_paths;
2328
2329 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2330 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2331 mem_info->mem_type = BNA_MEM_T_DMA;
2332 mem_info->len = PAGE_SIZE * dpage_count;
2333 mem_info->num = q_cfg->num_paths;
2334
2335 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2336 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2337 mem_info->mem_type = BNA_MEM_T_DMA;
2338 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2339 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2340
2341 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2342 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2343 mem_info->mem_type = BNA_MEM_T_KVA;
2344 mem_info->len = hpage_count * sizeof(void *);
2345 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2346
2347 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2348 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2349 mem_info->mem_type = BNA_MEM_T_DMA;
2350 mem_info->len = PAGE_SIZE * hpage_count;
2351 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2352
2353 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2354 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2355 mem_info->mem_type = BNA_MEM_T_DMA;
2356 mem_info->len = BFI_IBIDX_SIZE;
2357 mem_info->num = q_cfg->num_paths;
2358
2359 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2360 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2361 mem_info->mem_type = BNA_MEM_T_KVA;
2362 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2363 mem_info->num = 1;
2364
2365 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2366 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2367 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2368}
2369
2370struct bna_rx *
2371bna_rx_create(struct bna *bna, struct bnad *bnad,
2372 struct bna_rx_config *rx_cfg,
2373 const struct bna_rx_event_cbfn *rx_cbfn,
2374 struct bna_res_info *res_info,
2375 void *priv)
2376{
2377 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2378 struct bna_rx *rx;
2379 struct bna_rxp *rxp;
2380 struct bna_rxq *q0;
2381 struct bna_rxq *q1;
2382 struct bna_intr_info *intr_info;
2383 u32 page_count;
2384 struct bna_mem_descr *ccb_mem;
2385 struct bna_mem_descr *rcb_mem;
2386 struct bna_mem_descr *unmapq_mem;
2387 struct bna_mem_descr *cqpt_mem;
2388 struct bna_mem_descr *cswqpt_mem;
2389 struct bna_mem_descr *cpage_mem;
2390 struct bna_mem_descr *hqpt_mem;
2391 struct bna_mem_descr *dqpt_mem;
2392 struct bna_mem_descr *hsqpt_mem;
2393 struct bna_mem_descr *dsqpt_mem;
2394 struct bna_mem_descr *hpage_mem;
2395 struct bna_mem_descr *dpage_mem;
2396 int i;
2397 int dpage_count, hpage_count, rcb_idx;
2398
2399 if (!bna_rx_res_check(rx_mod, rx_cfg))
2400 return NULL;
2401
2402 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2403 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2404 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2405 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2406 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2407 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2408 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2409 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2410 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2411 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2412 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2413 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2414 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2415
2416 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2417 PAGE_SIZE;
2418
2419 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2420 PAGE_SIZE;
2421
2422 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2423 PAGE_SIZE;
2424
2425 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2426 rx->bna = bna;
2427 rx->rx_flags = 0;
2428 INIT_LIST_HEAD(&rx->rxp_q);
2429 rx->stop_cbfn = NULL;
2430 rx->stop_cbarg = NULL;
2431 rx->priv = priv;
2432
2433 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2434 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2435 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2436 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2437 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2438
2439 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2440 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2441
2442 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2443 switch (rx->type) {
2444 case BNA_RX_T_REGULAR:
2445 if (!(rx->bna->rx_mod.flags &
2446 BNA_RX_MOD_F_ENET_LOOPBACK))
2447 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2448 break;
2449 case BNA_RX_T_LOOPBACK:
2450 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2451 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2452 break;
2453 }
2454 }
2455
2456 rx->num_paths = rx_cfg->num_paths;
2457 for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
2458 rxp = bna_rxp_get(rx_mod);
2459 list_add_tail(&rxp->qe, &rx->rxp_q);
2460 rxp->type = rx_cfg->rxp_type;
2461 rxp->rx = rx;
2462 rxp->cq.rx = rx;
2463
2464 q0 = bna_rxq_get(rx_mod);
2465 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2466 q1 = NULL;
2467 else
2468 q1 = bna_rxq_get(rx_mod);
2469
2470 if (1 == intr_info->num)
2471 rxp->vector = intr_info->idl[0].vector;
2472 else
2473 rxp->vector = intr_info->idl[i].vector;
2474
2475
2476
2477 rxp->cq.ib.ib_seg_host_addr.lsb =
2478 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2479 rxp->cq.ib.ib_seg_host_addr.msb =
2480 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2481 rxp->cq.ib.ib_seg_host_addr_kva =
2482 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2483 rxp->cq.ib.intr_type = intr_info->intr_type;
2484 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2485 rxp->cq.ib.intr_vector = rxp->vector;
2486 else
2487 rxp->cq.ib.intr_vector = (1 << rxp->vector);
2488 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2489 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2490 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2491
2492 bna_rxp_add_rxqs(rxp, q0, q1);
2493
2494
2495
2496 q0->rx = rx;
2497 q0->rxp = rxp;
2498
2499 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2500 q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2501 rcb_idx++;
2502 q0->rcb->q_depth = rx_cfg->q_depth;
2503 q0->rcb->rxq = q0;
2504 q0->rcb->bnad = bna->bnad;
2505 q0->rcb->id = 0;
2506 q0->rx_packets = q0->rx_bytes = 0;
2507 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2508
2509 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2510 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2511
2512 if (rx->rcb_setup_cbfn)
2513 rx->rcb_setup_cbfn(bnad, q0->rcb);
2514
2515
2516
2517 if (q1) {
2518 q1->rx = rx;
2519 q1->rxp = rxp;
2520
2521 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2522 q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
2523 rcb_idx++;
2524 q1->rcb->q_depth = rx_cfg->q_depth;
2525 q1->rcb->rxq = q1;
2526 q1->rcb->bnad = bna->bnad;
2527 q1->rcb->id = 1;
2528 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2529 rx_cfg->hds_config.forced_offset
2530 : rx_cfg->small_buff_size;
2531 q1->rx_packets = q1->rx_bytes = 0;
2532 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2533
2534 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2535 &hqpt_mem[i], &hsqpt_mem[i],
2536 &hpage_mem[i]);
2537
2538 if (rx->rcb_setup_cbfn)
2539 rx->rcb_setup_cbfn(bnad, q1->rcb);
2540 }
2541
2542
2543
2544 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2545 rxp->cq.ccb->q_depth = rx_cfg->q_depth +
2546 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2547 0 : rx_cfg->q_depth);
2548 rxp->cq.ccb->cq = &rxp->cq;
2549 rxp->cq.ccb->rcb[0] = q0->rcb;
2550 q0->rcb->ccb = rxp->cq.ccb;
2551 if (q1) {
2552 rxp->cq.ccb->rcb[1] = q1->rcb;
2553 q1->rcb->ccb = rxp->cq.ccb;
2554 }
2555 rxp->cq.ccb->hw_producer_index =
2556 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2557 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2558 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2559 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2560 rxp->cq.ccb->rx_coalescing_timeo =
2561 rxp->cq.ib.coalescing_timeo;
2562 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2563 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2564 rxp->cq.ccb->bnad = bna->bnad;
2565 rxp->cq.ccb->id = i;
2566
2567 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2568 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2569
2570 if (rx->ccb_setup_cbfn)
2571 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2572 }
2573
2574 rx->hds_cfg = rx_cfg->hds_config;
2575
2576 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2577
2578 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2579
2580 rx_mod->rid_mask |= (1 << rx->rid);
2581
2582 return rx;
2583}
2584
2585void
2586bna_rx_destroy(struct bna_rx *rx)
2587{
2588 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2589 struct bna_rxq *q0 = NULL;
2590 struct bna_rxq *q1 = NULL;
2591 struct bna_rxp *rxp;
2592 struct list_head *qe;
2593
2594 bna_rxf_uninit(&rx->rxf);
2595
2596 while (!list_empty(&rx->rxp_q)) {
2597 bfa_q_deq(&rx->rxp_q, &rxp);
2598 GET_RXQS(rxp, q0, q1);
2599 if (rx->rcb_destroy_cbfn)
2600 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2601 q0->rcb = NULL;
2602 q0->rxp = NULL;
2603 q0->rx = NULL;
2604 bna_rxq_put(rx_mod, q0);
2605
2606 if (q1) {
2607 if (rx->rcb_destroy_cbfn)
2608 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2609 q1->rcb = NULL;
2610 q1->rxp = NULL;
2611 q1->rx = NULL;
2612 bna_rxq_put(rx_mod, q1);
2613 }
2614 rxp->rxq.slr.large = NULL;
2615 rxp->rxq.slr.small = NULL;
2616
2617 if (rx->ccb_destroy_cbfn)
2618 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2619 rxp->cq.ccb = NULL;
2620 rxp->rx = NULL;
2621 bna_rxp_put(rx_mod, rxp);
2622 }
2623
2624 list_for_each(qe, &rx_mod->rx_active_q) {
2625 if (qe == &rx->qe) {
2626 list_del(&rx->qe);
2627 bfa_q_qe_init(&rx->qe);
2628 break;
2629 }
2630 }
2631
2632 rx_mod->rid_mask &= ~(1 << rx->rid);
2633
2634 rx->bna = NULL;
2635 rx->priv = NULL;
2636 bna_rx_put(rx_mod, rx);
2637}
2638
2639void
2640bna_rx_enable(struct bna_rx *rx)
2641{
2642 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2643 return;
2644
2645 rx->rx_flags |= BNA_RX_F_ENABLED;
2646 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2647 bfa_fsm_send_event(rx, RX_E_START);
2648}
2649
2650void
2651bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2652 void (*cbfn)(void *, struct bna_rx *))
2653{
2654 if (type == BNA_SOFT_CLEANUP) {
2655
2656 (*cbfn)(rx->bna->bnad, rx);
2657 } else {
2658 rx->stop_cbfn = cbfn;
2659 rx->stop_cbarg = rx->bna->bnad;
2660
2661 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2662
2663 bfa_fsm_send_event(rx, RX_E_STOP);
2664 }
2665}
2666
2667void
2668bna_rx_cleanup_complete(struct bna_rx *rx)
2669{
2670 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2671}
2672
2673enum bna_cb_status
2674bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2675 enum bna_rxmode bitmask,
2676 void (*cbfn)(struct bnad *, struct bna_rx *))
2677{
2678 struct bna_rxf *rxf = &rx->rxf;
2679 int need_hw_config = 0;
2680
2681
2682
2683 if (is_promisc_enable(new_mode, bitmask)) {
2684
2685 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2686 (rx->bna->promisc_rid != rxf->rx->rid))
2687 goto err_return;
2688
2689
2690 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2691 goto err_return;
2692
2693
2694 if (is_default_enable(new_mode, bitmask))
2695 goto err_return;
2696 }
2697
2698 if (is_default_enable(new_mode, bitmask)) {
2699
2700 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2701 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2702 goto err_return;
2703 }
2704
2705
2706 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2707 goto err_return;
2708 }
2709
2710
2711
2712 if (is_promisc_enable(new_mode, bitmask)) {
2713 if (bna_rxf_promisc_enable(rxf))
2714 need_hw_config = 1;
2715 } else if (is_promisc_disable(new_mode, bitmask)) {
2716 if (bna_rxf_promisc_disable(rxf))
2717 need_hw_config = 1;
2718 }
2719
2720 if (is_allmulti_enable(new_mode, bitmask)) {
2721 if (bna_rxf_allmulti_enable(rxf))
2722 need_hw_config = 1;
2723 } else if (is_allmulti_disable(new_mode, bitmask)) {
2724 if (bna_rxf_allmulti_disable(rxf))
2725 need_hw_config = 1;
2726 }
2727
2728
2729
2730 if (need_hw_config) {
2731 rxf->cam_fltr_cbfn = cbfn;
2732 rxf->cam_fltr_cbarg = rx->bna->bnad;
2733 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2734 } else if (cbfn)
2735 (*cbfn)(rx->bna->bnad, rx);
2736
2737 return BNA_CB_SUCCESS;
2738
2739err_return:
2740 return BNA_CB_FAIL;
2741}
2742
2743void
2744bna_rx_vlanfilter_enable(struct bna_rx *rx)
2745{
2746 struct bna_rxf *rxf = &rx->rxf;
2747
2748 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2749 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2750 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2751 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2752 }
2753}
2754
2755void
2756bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2757{
2758 struct bna_rxp *rxp;
2759 struct list_head *qe;
2760
2761 list_for_each(qe, &rx->rxp_q) {
2762 rxp = (struct bna_rxp *)qe;
2763 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2764 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2765 }
2766}
2767
2768void
2769bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2770{
2771 int i, j;
2772
2773 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2774 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2775 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2776}
2777
2778void
2779bna_rx_dim_update(struct bna_ccb *ccb)
2780{
2781 struct bna *bna = ccb->cq->rx->bna;
2782 u32 load, bias;
2783 u32 pkt_rt, small_rt, large_rt;
2784 u8 coalescing_timeo;
2785
2786 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2787 (ccb->pkt_rate.large_pkt_cnt == 0))
2788 return;
2789
2790
2791
2792 small_rt = ccb->pkt_rate.small_pkt_cnt;
2793 large_rt = ccb->pkt_rate.large_pkt_cnt;
2794
2795 pkt_rt = small_rt + large_rt;
2796
2797 if (pkt_rt < BNA_PKT_RATE_10K)
2798 load = BNA_LOAD_T_LOW_4;
2799 else if (pkt_rt < BNA_PKT_RATE_20K)
2800 load = BNA_LOAD_T_LOW_3;
2801 else if (pkt_rt < BNA_PKT_RATE_30K)
2802 load = BNA_LOAD_T_LOW_2;
2803 else if (pkt_rt < BNA_PKT_RATE_40K)
2804 load = BNA_LOAD_T_LOW_1;
2805 else if (pkt_rt < BNA_PKT_RATE_50K)
2806 load = BNA_LOAD_T_HIGH_1;
2807 else if (pkt_rt < BNA_PKT_RATE_60K)
2808 load = BNA_LOAD_T_HIGH_2;
2809 else if (pkt_rt < BNA_PKT_RATE_80K)
2810 load = BNA_LOAD_T_HIGH_3;
2811 else
2812 load = BNA_LOAD_T_HIGH_4;
2813
2814 if (small_rt > (large_rt << 1))
2815 bias = 0;
2816 else
2817 bias = 1;
2818
2819 ccb->pkt_rate.small_pkt_cnt = 0;
2820 ccb->pkt_rate.large_pkt_cnt = 0;
2821
2822 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2823 ccb->rx_coalescing_timeo = coalescing_timeo;
2824
2825
2826 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2827}
2828
2829const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2830 {12, 12},
2831 {6, 10},
2832 {5, 10},
2833 {4, 8},
2834 {3, 6},
2835 {3, 6},
2836 {2, 4},
2837 {1, 2},
2838};
2839
2840
2841
2842#define call_tx_stop_cbfn(tx) \
2843do { \
2844 if ((tx)->stop_cbfn) { \
2845 void (*cbfn)(void *, struct bna_tx *); \
2846 void *cbarg; \
2847 cbfn = (tx)->stop_cbfn; \
2848 cbarg = (tx)->stop_cbarg; \
2849 (tx)->stop_cbfn = NULL; \
2850 (tx)->stop_cbarg = NULL; \
2851 cbfn(cbarg, (tx)); \
2852 } \
2853} while (0)
2854
2855#define call_tx_prio_change_cbfn(tx) \
2856do { \
2857 if ((tx)->prio_change_cbfn) { \
2858 void (*cbfn)(struct bnad *, struct bna_tx *); \
2859 cbfn = (tx)->prio_change_cbfn; \
2860 (tx)->prio_change_cbfn = NULL; \
2861 cbfn((tx)->bna->bnad, (tx)); \
2862 } \
2863} while (0)
2864
2865static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2866static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2867static void bna_tx_enet_stop(struct bna_tx *tx);
2868
2869enum bna_tx_event {
2870 TX_E_START = 1,
2871 TX_E_STOP = 2,
2872 TX_E_FAIL = 3,
2873 TX_E_STARTED = 4,
2874 TX_E_STOPPED = 5,
2875 TX_E_PRIO_CHANGE = 6,
2876 TX_E_CLEANUP_DONE = 7,
2877 TX_E_BW_UPDATE = 8,
2878};
2879
2880bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2881bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2882bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2883bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2884bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2885 enum bna_tx_event);
2886bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2887 enum bna_tx_event);
2888bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2889 enum bna_tx_event);
2890bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2891bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2892 enum bna_tx_event);
2893
2894static void
2895bna_tx_sm_stopped_entry(struct bna_tx *tx)
2896{
2897 call_tx_stop_cbfn(tx);
2898}
2899
2900static void
2901bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2902{
2903 switch (event) {
2904 case TX_E_START:
2905 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2906 break;
2907
2908 case TX_E_STOP:
2909 call_tx_stop_cbfn(tx);
2910 break;
2911
2912 case TX_E_FAIL:
2913
2914 break;
2915
2916 case TX_E_PRIO_CHANGE:
2917 call_tx_prio_change_cbfn(tx);
2918 break;
2919
2920 case TX_E_BW_UPDATE:
2921
2922 break;
2923
2924 default:
2925 bfa_sm_fault(event);
2926 }
2927}
2928
2929static void
2930bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2931{
2932 bna_bfi_tx_enet_start(tx);
2933}
2934
2935static void
2936bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2937{
2938 switch (event) {
2939 case TX_E_STOP:
2940 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2941 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2942 break;
2943
2944 case TX_E_FAIL:
2945 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
2946 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2947 break;
2948
2949 case TX_E_STARTED:
2950 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
2951 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
2952 BNA_TX_F_BW_UPDATED);
2953 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2954 } else
2955 bfa_fsm_set_state(tx, bna_tx_sm_started);
2956 break;
2957
2958 case TX_E_PRIO_CHANGE:
2959 tx->flags |= BNA_TX_F_PRIO_CHANGED;
2960 break;
2961
2962 case TX_E_BW_UPDATE:
2963 tx->flags |= BNA_TX_F_BW_UPDATED;
2964 break;
2965
2966 default:
2967 bfa_sm_fault(event);
2968 }
2969}
2970
2971static void
2972bna_tx_sm_started_entry(struct bna_tx *tx)
2973{
2974 struct bna_txq *txq;
2975 struct list_head *qe;
2976 int is_regular = (tx->type == BNA_TX_T_REGULAR);
2977
2978 list_for_each(qe, &tx->txq_q) {
2979 txq = (struct bna_txq *)qe;
2980 txq->tcb->priority = txq->priority;
2981
2982 bna_ib_start(tx->bna, &txq->ib, is_regular);
2983 }
2984 tx->tx_resume_cbfn(tx->bna->bnad, tx);
2985}
2986
2987static void
2988bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
2989{
2990 switch (event) {
2991 case TX_E_STOP:
2992 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2993 tx->tx_stall_cbfn(tx->bna->bnad, tx);
2994 bna_tx_enet_stop(tx);
2995 break;
2996
2997 case TX_E_FAIL:
2998 bfa_fsm_set_state(tx, bna_tx_sm_failed);
2999 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3000 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3001 break;
3002
3003 case TX_E_PRIO_CHANGE:
3004 case TX_E_BW_UPDATE:
3005 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3006 break;
3007
3008 default:
3009 bfa_sm_fault(event);
3010 }
3011}
3012
3013static void
3014bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3015{
3016}
3017
3018static void
3019bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3020{
3021 switch (event) {
3022 case TX_E_FAIL:
3023 case TX_E_STOPPED:
3024 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3025 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3026 break;
3027
3028 case TX_E_STARTED:
3029
3030
3031
3032
3033 bna_tx_enet_stop(tx);
3034 break;
3035
3036 case TX_E_PRIO_CHANGE:
3037 case TX_E_BW_UPDATE:
3038
3039 break;
3040
3041 default:
3042 bfa_sm_fault(event);
3043 }
3044}
3045
3046static void
3047bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3048{
3049}
3050
3051static void
3052bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3053{
3054 switch (event) {
3055 case TX_E_FAIL:
3056 case TX_E_PRIO_CHANGE:
3057 case TX_E_BW_UPDATE:
3058
3059 break;
3060
3061 case TX_E_CLEANUP_DONE:
3062 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3063 break;
3064
3065 default:
3066 bfa_sm_fault(event);
3067 }
3068}
3069
3070static void
3071bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3072{
3073 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3074 bna_tx_enet_stop(tx);
3075}
3076
3077static void
3078bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3079{
3080 switch (event) {
3081 case TX_E_STOP:
3082 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3083 break;
3084
3085 case TX_E_FAIL:
3086 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3087 call_tx_prio_change_cbfn(tx);
3088 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3089 break;
3090
3091 case TX_E_STOPPED:
3092 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3093 break;
3094
3095 case TX_E_PRIO_CHANGE:
3096 case TX_E_BW_UPDATE:
3097
3098 break;
3099
3100 default:
3101 bfa_sm_fault(event);
3102 }
3103}
3104
3105static void
3106bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3107{
3108 call_tx_prio_change_cbfn(tx);
3109 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3110}
3111
3112static void
3113bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3114{
3115 switch (event) {
3116 case TX_E_STOP:
3117 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3118 break;
3119
3120 case TX_E_FAIL:
3121 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3122 break;
3123
3124 case TX_E_PRIO_CHANGE:
3125 case TX_E_BW_UPDATE:
3126
3127 break;
3128
3129 case TX_E_CLEANUP_DONE:
3130 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3131 break;
3132
3133 default:
3134 bfa_sm_fault(event);
3135 }
3136}
3137
3138static void
3139bna_tx_sm_failed_entry(struct bna_tx *tx)
3140{
3141}
3142
3143static void
3144bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3145{
3146 switch (event) {
3147 case TX_E_START:
3148 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3149 break;
3150
3151 case TX_E_STOP:
3152 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3153 break;
3154
3155 case TX_E_FAIL:
3156
3157 break;
3158
3159 case TX_E_CLEANUP_DONE:
3160 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3161 break;
3162
3163 default:
3164 bfa_sm_fault(event);
3165 }
3166}
3167
3168static void
3169bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3170{
3171}
3172
3173static void
3174bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3175{
3176 switch (event) {
3177 case TX_E_STOP:
3178 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3179 break;
3180
3181 case TX_E_FAIL:
3182 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3183 break;
3184
3185 case TX_E_CLEANUP_DONE:
3186 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3187 break;
3188
3189 case TX_E_BW_UPDATE:
3190
3191 break;
3192
3193 default:
3194 bfa_sm_fault(event);
3195 }
3196}
3197
3198static void
3199bna_bfi_tx_enet_start(struct bna_tx *tx)
3200{
3201 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3202 struct bna_txq *txq = NULL;
3203 struct list_head *qe;
3204 int i;
3205
3206 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3207 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3208 cfg_req->mh.num_entries = htons(
3209 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3210
3211 cfg_req->num_queues = tx->num_txq;
3212 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3213 i < tx->num_txq;
3214 i++, qe = bfa_q_next(qe)) {
3215 txq = (struct bna_txq *)qe;
3216
3217 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3218 cfg_req->q_cfg[i].q.priority = txq->priority;
3219
3220 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3221 txq->ib.ib_seg_host_addr.lsb;
3222 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3223 txq->ib.ib_seg_host_addr.msb;
3224 cfg_req->q_cfg[i].ib.intr.msix_index =
3225 htons((u16)txq->ib.intr_vector);
3226 }
3227
3228 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3229 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3230 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3231 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3232 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3233 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3234 cfg_req->ib_cfg.coalescing_timeout =
3235 htonl((u32)txq->ib.coalescing_timeo);
3236 cfg_req->ib_cfg.inter_pkt_timeout =
3237 htonl((u32)txq->ib.interpkt_timeo);
3238 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3239
3240 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3241 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3242 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
3243 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3244
3245 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3246 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3247 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3248}
3249
3250static void
3251bna_bfi_tx_enet_stop(struct bna_tx *tx)
3252{
3253 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3254
3255 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3256 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3257 req->mh.num_entries = htons(
3258 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3259 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3260 &req->mh);
3261 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3262}
3263
3264static void
3265bna_tx_enet_stop(struct bna_tx *tx)
3266{
3267 struct bna_txq *txq;
3268 struct list_head *qe;
3269
3270
3271 list_for_each(qe, &tx->txq_q) {
3272 txq = (struct bna_txq *)qe;
3273 bna_ib_stop(tx->bna, &txq->ib);
3274 }
3275
3276 bna_bfi_tx_enet_stop(tx);
3277}
3278
3279static void
3280bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3281 struct bna_mem_descr *qpt_mem,
3282 struct bna_mem_descr *swqpt_mem,
3283 struct bna_mem_descr *page_mem)
3284{
3285 u8 *kva;
3286 u64 dma;
3287 struct bna_dma_addr bna_dma;
3288 int i;
3289
3290 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3291 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3292 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3293 txq->qpt.page_count = page_count;
3294 txq->qpt.page_size = page_size;
3295
3296 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3297 txq->tcb->sw_q = page_mem->kva;
3298
3299 kva = page_mem->kva;
3300 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3301
3302 for (i = 0; i < page_count; i++) {
3303 txq->tcb->sw_qpt[i] = kva;
3304 kva += PAGE_SIZE;
3305
3306 BNA_SET_DMA_ADDR(dma, &bna_dma);
3307 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3308 bna_dma.lsb;
3309 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3310 bna_dma.msb;
3311 dma += PAGE_SIZE;
3312 }
3313}
3314
3315static struct bna_tx *
3316bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3317{
3318 struct list_head *qe = NULL;
3319 struct bna_tx *tx = NULL;
3320
3321 if (list_empty(&tx_mod->tx_free_q))
3322 return NULL;
3323 if (type == BNA_TX_T_REGULAR) {
3324 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3325 } else {
3326 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3327 }
3328 tx = (struct bna_tx *)qe;
3329 bfa_q_qe_init(&tx->qe);
3330 tx->type = type;
3331
3332 return tx;
3333}
3334
3335static void
3336bna_tx_free(struct bna_tx *tx)
3337{
3338 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3339 struct bna_txq *txq;
3340 struct list_head *prev_qe;
3341 struct list_head *qe;
3342
3343 while (!list_empty(&tx->txq_q)) {
3344 bfa_q_deq(&tx->txq_q, &txq);
3345 bfa_q_qe_init(&txq->qe);
3346 txq->tcb = NULL;
3347 txq->tx = NULL;
3348 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3349 }
3350
3351 list_for_each(qe, &tx_mod->tx_active_q) {
3352 if (qe == &tx->qe) {
3353 list_del(&tx->qe);
3354 bfa_q_qe_init(&tx->qe);
3355 break;
3356 }
3357 }
3358
3359 tx->bna = NULL;
3360 tx->priv = NULL;
3361
3362 prev_qe = NULL;
3363 list_for_each(qe, &tx_mod->tx_free_q) {
3364 if (((struct bna_tx *)qe)->rid < tx->rid)
3365 prev_qe = qe;
3366 else {
3367 break;
3368 }
3369 }
3370
3371 if (prev_qe == NULL) {
3372
3373 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3374 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3375
3376 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3377 } else {
3378
3379 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3380 bfa_q_prev(&tx->qe) = prev_qe;
3381 bfa_q_next(prev_qe) = &tx->qe;
3382 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3383 }
3384}
3385
3386static void
3387bna_tx_start(struct bna_tx *tx)
3388{
3389 tx->flags |= BNA_TX_F_ENET_STARTED;
3390 if (tx->flags & BNA_TX_F_ENABLED)
3391 bfa_fsm_send_event(tx, TX_E_START);
3392}
3393
3394static void
3395bna_tx_stop(struct bna_tx *tx)
3396{
3397 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3398 tx->stop_cbarg = &tx->bna->tx_mod;
3399
3400 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3401 bfa_fsm_send_event(tx, TX_E_STOP);
3402}
3403
3404static void
3405bna_tx_fail(struct bna_tx *tx)
3406{
3407 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3408 bfa_fsm_send_event(tx, TX_E_FAIL);
3409}
3410
3411void
3412bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3413{
3414 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3415 struct bna_txq *txq = NULL;
3416 struct list_head *qe;
3417 int i;
3418
3419 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3420 sizeof(struct bfi_enet_tx_cfg_rsp));
3421
3422 tx->hw_id = cfg_rsp->hw_id;
3423
3424 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3425 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3426 txq = (struct bna_txq *)qe;
3427
3428
3429 txq->tcb->i_dbell->doorbell_addr =
3430 tx->bna->pcidev.pci_bar_kva
3431 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3432 txq->tcb->q_dbell =
3433 tx->bna->pcidev.pci_bar_kva
3434 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3435 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3436
3437
3438 (*txq->tcb->hw_consumer_index) = 0;
3439 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3440 }
3441
3442 bfa_fsm_send_event(tx, TX_E_STARTED);
3443}
3444
3445void
3446bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3447{
3448 bfa_fsm_send_event(tx, TX_E_STOPPED);
3449}
3450
3451void
3452bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3453{
3454 struct bna_tx *tx;
3455 struct list_head *qe;
3456
3457 list_for_each(qe, &tx_mod->tx_active_q) {
3458 tx = (struct bna_tx *)qe;
3459 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3460 }
3461}
3462
3463void
3464bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3465{
3466 u32 q_size;
3467 u32 page_count;
3468 struct bna_mem_info *mem_info;
3469
3470 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3471 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3472 mem_info->mem_type = BNA_MEM_T_KVA;
3473 mem_info->len = sizeof(struct bna_tcb);
3474 mem_info->num = num_txq;
3475
3476 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3477 q_size = ALIGN(q_size, PAGE_SIZE);
3478 page_count = q_size >> PAGE_SHIFT;
3479
3480 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3481 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3482 mem_info->mem_type = BNA_MEM_T_DMA;
3483 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3484 mem_info->num = num_txq;
3485
3486 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3487 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3488 mem_info->mem_type = BNA_MEM_T_KVA;
3489 mem_info->len = page_count * sizeof(void *);
3490 mem_info->num = num_txq;
3491
3492 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3493 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3494 mem_info->mem_type = BNA_MEM_T_DMA;
3495 mem_info->len = PAGE_SIZE * page_count;
3496 mem_info->num = num_txq;
3497
3498 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3499 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3500 mem_info->mem_type = BNA_MEM_T_DMA;
3501 mem_info->len = BFI_IBIDX_SIZE;
3502 mem_info->num = num_txq;
3503
3504 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3505 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3506 BNA_INTR_T_MSIX;
3507 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3508}
3509
3510struct bna_tx *
3511bna_tx_create(struct bna *bna, struct bnad *bnad,
3512 struct bna_tx_config *tx_cfg,
3513 const struct bna_tx_event_cbfn *tx_cbfn,
3514 struct bna_res_info *res_info, void *priv)
3515{
3516 struct bna_intr_info *intr_info;
3517 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3518 struct bna_tx *tx;
3519 struct bna_txq *txq;
3520 struct list_head *qe;
3521 int page_count;
3522 int i;
3523
3524 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3525 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3526 PAGE_SIZE;
3527
3528
3529
3530
3531
3532 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3533 return NULL;
3534
3535
3536
3537 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3538 if (!tx)
3539 return NULL;
3540 tx->bna = bna;
3541 tx->priv = priv;
3542
3543
3544
3545 INIT_LIST_HEAD(&tx->txq_q);
3546 for (i = 0; i < tx_cfg->num_txq; i++) {
3547 if (list_empty(&tx_mod->txq_free_q))
3548 goto err_return;
3549
3550 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3551 bfa_q_qe_init(&txq->qe);
3552 list_add_tail(&txq->qe, &tx->txq_q);
3553 txq->tx = tx;
3554 }
3555
3556
3557
3558
3559
3560
3561
3562 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3563 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3564
3565 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3566 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3567 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3568
3569 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3570
3571 tx->num_txq = tx_cfg->num_txq;
3572
3573 tx->flags = 0;
3574 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3575 switch (tx->type) {
3576 case BNA_TX_T_REGULAR:
3577 if (!(tx->bna->tx_mod.flags &
3578 BNA_TX_MOD_F_ENET_LOOPBACK))
3579 tx->flags |= BNA_TX_F_ENET_STARTED;
3580 break;
3581 case BNA_TX_T_LOOPBACK:
3582 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3583 tx->flags |= BNA_TX_F_ENET_STARTED;
3584 break;
3585 }
3586 }
3587
3588
3589
3590 i = 0;
3591 list_for_each(qe, &tx->txq_q) {
3592 txq = (struct bna_txq *)qe;
3593 txq->tcb = (struct bna_tcb *)
3594 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3595 txq->tx_packets = 0;
3596 txq->tx_bytes = 0;
3597
3598
3599 txq->ib.ib_seg_host_addr.lsb =
3600 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3601 txq->ib.ib_seg_host_addr.msb =
3602 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3603 txq->ib.ib_seg_host_addr_kva =
3604 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3605 txq->ib.intr_type = intr_info->intr_type;
3606 txq->ib.intr_vector = (intr_info->num == 1) ?
3607 intr_info->idl[0].vector :
3608 intr_info->idl[i].vector;
3609 if (intr_info->intr_type == BNA_INTR_T_INTX)
3610 txq->ib.intr_vector = (1 << txq->ib.intr_vector);
3611 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3612 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3613 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3614
3615
3616
3617 txq->tcb->q_depth = tx_cfg->txq_depth;
3618 txq->tcb->unmap_q = (void *)
3619 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3620 txq->tcb->hw_consumer_index =
3621 (u32 *)txq->ib.ib_seg_host_addr_kva;
3622 txq->tcb->i_dbell = &txq->ib.door_bell;
3623 txq->tcb->intr_type = txq->ib.intr_type;
3624 txq->tcb->intr_vector = txq->ib.intr_vector;
3625 txq->tcb->txq = txq;
3626 txq->tcb->bnad = bnad;
3627 txq->tcb->id = i;
3628
3629
3630 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3631 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3632 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3633 &res_info[BNA_TX_RES_MEM_T_PAGE].
3634 res_u.mem_info.mdl[i]);
3635
3636
3637 if (tx->tcb_setup_cbfn)
3638 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3639
3640 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3641 txq->priority = txq->tcb->id;
3642 else
3643 txq->priority = tx_mod->default_prio;
3644
3645 i++;
3646 }
3647
3648 tx->txf_vlan_id = 0;
3649
3650 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3651
3652 tx_mod->rid_mask |= (1 << tx->rid);
3653
3654 return tx;
3655
3656err_return:
3657 bna_tx_free(tx);
3658 return NULL;
3659}
3660
3661void
3662bna_tx_destroy(struct bna_tx *tx)
3663{
3664 struct bna_txq *txq;
3665 struct list_head *qe;
3666
3667 list_for_each(qe, &tx->txq_q) {
3668 txq = (struct bna_txq *)qe;
3669 if (tx->tcb_destroy_cbfn)
3670 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3671 }
3672
3673 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
3674 bna_tx_free(tx);
3675}
3676
3677void
3678bna_tx_enable(struct bna_tx *tx)
3679{
3680 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3681 return;
3682
3683 tx->flags |= BNA_TX_F_ENABLED;
3684
3685 if (tx->flags & BNA_TX_F_ENET_STARTED)
3686 bfa_fsm_send_event(tx, TX_E_START);
3687}
3688
3689void
3690bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3691 void (*cbfn)(void *, struct bna_tx *))
3692{
3693 if (type == BNA_SOFT_CLEANUP) {
3694 (*cbfn)(tx->bna->bnad, tx);
3695 return;
3696 }
3697
3698 tx->stop_cbfn = cbfn;
3699 tx->stop_cbarg = tx->bna->bnad;
3700
3701 tx->flags &= ~BNA_TX_F_ENABLED;
3702
3703 bfa_fsm_send_event(tx, TX_E_STOP);
3704}
3705
3706void
3707bna_tx_cleanup_complete(struct bna_tx *tx)
3708{
3709 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3710}
3711
3712static void
3713bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3714{
3715 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3716
3717 bfa_wc_down(&tx_mod->tx_stop_wc);
3718}
3719
3720static void
3721bna_tx_mod_cb_tx_stopped_all(void *arg)
3722{
3723 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3724
3725 if (tx_mod->stop_cbfn)
3726 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3727 tx_mod->stop_cbfn = NULL;
3728}
3729
3730void
3731bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3732 struct bna_res_info *res_info)
3733{
3734 int i;
3735
3736 tx_mod->bna = bna;
3737 tx_mod->flags = 0;
3738
3739 tx_mod->tx = (struct bna_tx *)
3740 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3741 tx_mod->txq = (struct bna_txq *)
3742 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3743
3744 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3745 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3746
3747 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3748
3749 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3750 tx_mod->tx[i].rid = i;
3751 bfa_q_qe_init(&tx_mod->tx[i].qe);
3752 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3753 bfa_q_qe_init(&tx_mod->txq[i].qe);
3754 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3755 }
3756
3757 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3758 tx_mod->default_prio = 0;
3759 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3760 tx_mod->iscsi_prio = -1;
3761}
3762
3763void
3764bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3765{
3766 struct list_head *qe;
3767 int i;
3768
3769 i = 0;
3770 list_for_each(qe, &tx_mod->tx_free_q)
3771 i++;
3772
3773 i = 0;
3774 list_for_each(qe, &tx_mod->txq_free_q)
3775 i++;
3776
3777 tx_mod->bna = NULL;
3778}
3779
3780void
3781bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3782{
3783 struct bna_tx *tx;
3784 struct list_head *qe;
3785
3786 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3787 if (type == BNA_TX_T_LOOPBACK)
3788 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3789
3790 list_for_each(qe, &tx_mod->tx_active_q) {
3791 tx = (struct bna_tx *)qe;
3792 if (tx->type == type)
3793 bna_tx_start(tx);
3794 }
3795}
3796
3797void
3798bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3799{
3800 struct bna_tx *tx;
3801 struct list_head *qe;
3802
3803 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3804 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3805
3806 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3807
3808 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3809
3810 list_for_each(qe, &tx_mod->tx_active_q) {
3811 tx = (struct bna_tx *)qe;
3812 if (tx->type == type) {
3813 bfa_wc_up(&tx_mod->tx_stop_wc);
3814 bna_tx_stop(tx);
3815 }
3816 }
3817
3818 bfa_wc_wait(&tx_mod->tx_stop_wc);
3819}
3820
3821void
3822bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3823{
3824 struct bna_tx *tx;
3825 struct list_head *qe;
3826
3827 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3828 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3829
3830 list_for_each(qe, &tx_mod->tx_active_q) {
3831 tx = (struct bna_tx *)qe;
3832 bna_tx_fail(tx);
3833 }
3834}
3835
3836void
3837bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3838{
3839 struct bna_txq *txq;
3840 struct list_head *qe;
3841
3842 list_for_each(qe, &tx->txq_q) {
3843 txq = (struct bna_txq *)qe;
3844 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
3845 }
3846}
3847