1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "bna.h"
19
20static inline int
21ethport_can_be_up(struct bna_ethport *ethport)
22{
23 int ready = 0;
24 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
25 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
26 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
27 (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
28 else
29 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
30 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
31 !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
32 return ready;
33}
34
35#define ethport_is_up ethport_can_be_up
36
37enum bna_ethport_event {
38 ETHPORT_E_START = 1,
39 ETHPORT_E_STOP = 2,
40 ETHPORT_E_FAIL = 3,
41 ETHPORT_E_UP = 4,
42 ETHPORT_E_DOWN = 5,
43 ETHPORT_E_FWRESP_UP_OK = 6,
44 ETHPORT_E_FWRESP_DOWN = 7,
45 ETHPORT_E_FWRESP_UP_FAIL = 8,
46};
47
48enum bna_enet_event {
49 ENET_E_START = 1,
50 ENET_E_STOP = 2,
51 ENET_E_FAIL = 3,
52 ENET_E_PAUSE_CFG = 4,
53 ENET_E_MTU_CFG = 5,
54 ENET_E_FWRESP_PAUSE = 6,
55 ENET_E_CHLD_STOPPED = 7,
56};
57
58enum bna_ioceth_event {
59 IOCETH_E_ENABLE = 1,
60 IOCETH_E_DISABLE = 2,
61 IOCETH_E_IOC_RESET = 3,
62 IOCETH_E_IOC_FAILED = 4,
63 IOCETH_E_IOC_READY = 5,
64 IOCETH_E_ENET_ATTR_RESP = 6,
65 IOCETH_E_ENET_STOPPED = 7,
66 IOCETH_E_IOC_DISABLED = 8,
67};
68
69#define bna_stats_copy(_name, _type) \
70do { \
71 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
72 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
73 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
74 for (i = 0; i < count; i++) \
75 stats_dst[i] = be64_to_cpu(stats_src[i]); \
76} while (0) \
77
78
79
80
81
82static void
83bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
84 struct bfi_msgq_mhdr *msghdr)
85{
86 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
87
88 if (ethport_can_be_up(ethport))
89 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
90}
91
92static void
93bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
94 struct bfi_msgq_mhdr *msghdr)
95{
96 int ethport_up = ethport_is_up(ethport);
97
98 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
99
100 if (ethport_up)
101 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
102}
103
104static void
105bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
106 struct bfi_msgq_mhdr *msghdr)
107{
108 struct bfi_enet_enable_req *admin_req =
109 ðport->bfi_enet_cmd.admin_req;
110 struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
111
112 switch (admin_req->enable) {
113 case BNA_STATUS_T_ENABLED:
114 if (rsp->error == BFI_ENET_CMD_OK)
115 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
116 else {
117 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
118 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
119 }
120 break;
121
122 case BNA_STATUS_T_DISABLED:
123 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
124 ethport->link_status = BNA_LINK_DOWN;
125 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
126 break;
127 }
128}
129
130static void
131bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
132 struct bfi_msgq_mhdr *msghdr)
133{
134 struct bfi_enet_diag_lb_req *diag_lb_req =
135 ðport->bfi_enet_cmd.lpbk_req;
136 struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
137
138 switch (diag_lb_req->enable) {
139 case BNA_STATUS_T_ENABLED:
140 if (rsp->error == BFI_ENET_CMD_OK)
141 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
142 else {
143 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
144 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
145 }
146 break;
147
148 case BNA_STATUS_T_DISABLED:
149 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
150 break;
151 }
152}
153
154static void
155bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
156{
157 bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
158}
159
160static void
161bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
162 struct bfi_msgq_mhdr *msghdr)
163{
164 struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
165
166
167
168
169
170 if (!ioceth->attr.fw_query_complete) {
171 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
172 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
173 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
174 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
175 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
176 ioceth->attr.fw_query_complete = true;
177 }
178
179 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
180}
181
182static void
183bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
184{
185 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
186 u64 *stats_src;
187 u64 *stats_dst;
188 u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
189 u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
190 int count;
191 int i;
192
193 bna_stats_copy(mac, mac);
194 bna_stats_copy(bpc, bpc);
195 bna_stats_copy(rad, rad);
196 bna_stats_copy(rlb, rad);
197 bna_stats_copy(fc_rx, fc_rx);
198 bna_stats_copy(fc_tx, fc_tx);
199
200 stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
201
202
203 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
204 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
205 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
206 if (rx_enet_mask & ((u32)(1 << i))) {
207 int k;
208 count = sizeof(struct bfi_enet_stats_rxf) /
209 sizeof(u64);
210 for (k = 0; k < count; k++) {
211 stats_dst[k] = be64_to_cpu(*stats_src);
212 stats_src++;
213 }
214 }
215 }
216
217
218 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
219 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
220 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
221 if (tx_enet_mask & ((u32)(1 << i))) {
222 int k;
223 count = sizeof(struct bfi_enet_stats_txf) /
224 sizeof(u64);
225 for (k = 0; k < count; k++) {
226 stats_dst[k] = be64_to_cpu(*stats_src);
227 stats_src++;
228 }
229 }
230 }
231
232 bna->stats_mod.stats_get_busy = false;
233 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
234}
235
236static void
237bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
238 struct bfi_msgq_mhdr *msghdr)
239{
240 ethport->link_status = BNA_LINK_UP;
241
242
243 ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
244}
245
246static void
247bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
248 struct bfi_msgq_mhdr *msghdr)
249{
250 ethport->link_status = BNA_LINK_DOWN;
251
252
253 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
254}
255
256static void
257bna_err_handler(struct bna *bna, u32 intr_status)
258{
259 if (BNA_IS_HALT_INTR(bna, intr_status))
260 bna_halt_clear(bna);
261
262 bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
263}
264
265void
266bna_mbox_handler(struct bna *bna, u32 intr_status)
267{
268 if (BNA_IS_ERR_INTR(bna, intr_status)) {
269 bna_err_handler(bna, intr_status);
270 return;
271 }
272 if (BNA_IS_MBOX_INTR(bna, intr_status))
273 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
274}
275
276static void
277bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
278{
279 struct bna *bna = (struct bna *)arg;
280 struct bna_tx *tx;
281 struct bna_rx *rx;
282
283 switch (msghdr->msg_id) {
284 case BFI_ENET_I2H_RX_CFG_SET_RSP:
285 bna_rx_from_rid(bna, msghdr->enet_id, rx);
286 if (rx)
287 bna_bfi_rx_enet_start_rsp(rx, msghdr);
288 break;
289
290 case BFI_ENET_I2H_RX_CFG_CLR_RSP:
291 bna_rx_from_rid(bna, msghdr->enet_id, rx);
292 if (rx)
293 bna_bfi_rx_enet_stop_rsp(rx, msghdr);
294 break;
295
296 case BFI_ENET_I2H_RIT_CFG_RSP:
297 case BFI_ENET_I2H_RSS_CFG_RSP:
298 case BFI_ENET_I2H_RSS_ENABLE_RSP:
299 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
300 case BFI_ENET_I2H_RX_DEFAULT_RSP:
301 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
302 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
303 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
304 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
305 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
306 case BFI_ENET_I2H_RX_VLAN_SET_RSP:
307 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
308 bna_rx_from_rid(bna, msghdr->enet_id, rx);
309 if (rx)
310 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
311 break;
312
313 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
314 bna_rx_from_rid(bna, msghdr->enet_id, rx);
315 if (rx)
316 bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
317 break;
318
319 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
320 bna_rx_from_rid(bna, msghdr->enet_id, rx);
321 if (rx)
322 bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
323 break;
324
325 case BFI_ENET_I2H_TX_CFG_SET_RSP:
326 bna_tx_from_rid(bna, msghdr->enet_id, tx);
327 if (tx)
328 bna_bfi_tx_enet_start_rsp(tx, msghdr);
329 break;
330
331 case BFI_ENET_I2H_TX_CFG_CLR_RSP:
332 bna_tx_from_rid(bna, msghdr->enet_id, tx);
333 if (tx)
334 bna_bfi_tx_enet_stop_rsp(tx, msghdr);
335 break;
336
337 case BFI_ENET_I2H_PORT_ADMIN_RSP:
338 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
339 break;
340
341 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
342 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
343 break;
344
345 case BFI_ENET_I2H_SET_PAUSE_RSP:
346 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
347 break;
348
349 case BFI_ENET_I2H_GET_ATTR_RSP:
350 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
351 break;
352
353 case BFI_ENET_I2H_STATS_GET_RSP:
354 bna_bfi_stats_get_rsp(bna, msghdr);
355 break;
356
357 case BFI_ENET_I2H_STATS_CLR_RSP:
358
359 break;
360
361 case BFI_ENET_I2H_LINK_UP_AEN:
362 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
363 break;
364
365 case BFI_ENET_I2H_LINK_DOWN_AEN:
366 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
367 break;
368
369 case BFI_ENET_I2H_PORT_ENABLE_AEN:
370 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
371 break;
372
373 case BFI_ENET_I2H_PORT_DISABLE_AEN:
374 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
375 break;
376
377 case BFI_ENET_I2H_BW_UPDATE_AEN:
378 bna_bfi_bw_update_aen(&bna->tx_mod);
379 break;
380
381 default:
382 break;
383 }
384}
385
386
387
388#define call_ethport_stop_cbfn(_ethport) \
389do { \
390 if ((_ethport)->stop_cbfn) { \
391 void (*cbfn)(struct bna_enet *); \
392 cbfn = (_ethport)->stop_cbfn; \
393 (_ethport)->stop_cbfn = NULL; \
394 cbfn(&(_ethport)->bna->enet); \
395 } \
396} while (0)
397
398#define call_ethport_adminup_cbfn(ethport, status) \
399do { \
400 if ((ethport)->adminup_cbfn) { \
401 void (*cbfn)(struct bnad *, enum bna_cb_status); \
402 cbfn = (ethport)->adminup_cbfn; \
403 (ethport)->adminup_cbfn = NULL; \
404 cbfn((ethport)->bna->bnad, status); \
405 } \
406} while (0)
407
408static void
409bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
410{
411 struct bfi_enet_enable_req *admin_up_req =
412 ðport->bfi_enet_cmd.admin_req;
413
414 bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
415 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
416 admin_up_req->mh.num_entries = htons(
417 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
418 admin_up_req->enable = BNA_STATUS_T_ENABLED;
419
420 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
421 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
422 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
423}
424
425static void
426bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
427{
428 struct bfi_enet_enable_req *admin_down_req =
429 ðport->bfi_enet_cmd.admin_req;
430
431 bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
432 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
433 admin_down_req->mh.num_entries = htons(
434 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
435 admin_down_req->enable = BNA_STATUS_T_DISABLED;
436
437 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
438 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
439 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
440}
441
442static void
443bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
444{
445 struct bfi_enet_diag_lb_req *lpbk_up_req =
446 ðport->bfi_enet_cmd.lpbk_req;
447
448 bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
449 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
450 lpbk_up_req->mh.num_entries = htons(
451 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
452 lpbk_up_req->mode = (ethport->bna->enet.type ==
453 BNA_ENET_T_LOOPBACK_INTERNAL) ?
454 BFI_ENET_DIAG_LB_OPMODE_EXT :
455 BFI_ENET_DIAG_LB_OPMODE_CBL;
456 lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
457
458 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
459 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
460 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
461}
462
463static void
464bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
465{
466 struct bfi_enet_diag_lb_req *lpbk_down_req =
467 ðport->bfi_enet_cmd.lpbk_req;
468
469 bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
470 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
471 lpbk_down_req->mh.num_entries = htons(
472 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
473 lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
474
475 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
476 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
477 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
478}
479
480static void
481bna_bfi_ethport_up(struct bna_ethport *ethport)
482{
483 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
484 bna_bfi_ethport_admin_up(ethport);
485 else
486 bna_bfi_ethport_lpbk_up(ethport);
487}
488
489static void
490bna_bfi_ethport_down(struct bna_ethport *ethport)
491{
492 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
493 bna_bfi_ethport_admin_down(ethport);
494 else
495 bna_bfi_ethport_lpbk_down(ethport);
496}
497
498bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
499 enum bna_ethport_event);
500bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
501 enum bna_ethport_event);
502bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
503 enum bna_ethport_event);
504bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
505 enum bna_ethport_event);
506bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
507 enum bna_ethport_event);
508bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
509 enum bna_ethport_event);
510
511static void
512bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
513{
514 call_ethport_stop_cbfn(ethport);
515}
516
517static void
518bna_ethport_sm_stopped(struct bna_ethport *ethport,
519 enum bna_ethport_event event)
520{
521 switch (event) {
522 case ETHPORT_E_START:
523 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
524 break;
525
526 case ETHPORT_E_STOP:
527 call_ethport_stop_cbfn(ethport);
528 break;
529
530 case ETHPORT_E_FAIL:
531
532 break;
533
534 case ETHPORT_E_DOWN:
535
536
537 break;
538
539 default:
540 bfa_sm_fault(event);
541 }
542}
543
544static void
545bna_ethport_sm_down_entry(struct bna_ethport *ethport)
546{
547}
548
549static void
550bna_ethport_sm_down(struct bna_ethport *ethport,
551 enum bna_ethport_event event)
552{
553 switch (event) {
554 case ETHPORT_E_STOP:
555 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
556 break;
557
558 case ETHPORT_E_FAIL:
559 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
560 break;
561
562 case ETHPORT_E_UP:
563 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
564 bna_bfi_ethport_up(ethport);
565 break;
566
567 default:
568 bfa_sm_fault(event);
569 }
570}
571
572static void
573bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
574{
575}
576
577static void
578bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
579 enum bna_ethport_event event)
580{
581 switch (event) {
582 case ETHPORT_E_STOP:
583 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
584 break;
585
586 case ETHPORT_E_FAIL:
587 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
588 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
589 break;
590
591 case ETHPORT_E_DOWN:
592 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
593 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
594 break;
595
596 case ETHPORT_E_FWRESP_UP_OK:
597 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
598 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
599 break;
600
601 case ETHPORT_E_FWRESP_UP_FAIL:
602 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
603 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
604 break;
605
606 case ETHPORT_E_FWRESP_DOWN:
607
608 bna_bfi_ethport_up(ethport);
609 break;
610
611 default:
612 bfa_sm_fault(event);
613 }
614}
615
616static void
617bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
618{
619
620
621
622
623
624}
625
626static void
627bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
628 enum bna_ethport_event event)
629{
630 switch (event) {
631 case ETHPORT_E_STOP:
632 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
633 break;
634
635 case ETHPORT_E_FAIL:
636 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
637 break;
638
639 case ETHPORT_E_UP:
640 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
641 break;
642
643 case ETHPORT_E_FWRESP_UP_OK:
644
645 bna_bfi_ethport_down(ethport);
646 break;
647
648 case ETHPORT_E_FWRESP_UP_FAIL:
649 case ETHPORT_E_FWRESP_DOWN:
650 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
651 break;
652
653 default:
654 bfa_sm_fault(event);
655 }
656}
657
658static void
659bna_ethport_sm_up_entry(struct bna_ethport *ethport)
660{
661}
662
663static void
664bna_ethport_sm_up(struct bna_ethport *ethport,
665 enum bna_ethport_event event)
666{
667 switch (event) {
668 case ETHPORT_E_STOP:
669 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
670 bna_bfi_ethport_down(ethport);
671 break;
672
673 case ETHPORT_E_FAIL:
674 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
675 break;
676
677 case ETHPORT_E_DOWN:
678 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
679 bna_bfi_ethport_down(ethport);
680 break;
681
682 default:
683 bfa_sm_fault(event);
684 }
685}
686
687static void
688bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
689{
690}
691
692static void
693bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
694 enum bna_ethport_event event)
695{
696 switch (event) {
697 case ETHPORT_E_FAIL:
698 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
699 break;
700
701 case ETHPORT_E_DOWN:
702
703
704
705
706
707 break;
708
709 case ETHPORT_E_FWRESP_UP_OK:
710
711 bna_bfi_ethport_down(ethport);
712 break;
713
714 case ETHPORT_E_FWRESP_UP_FAIL:
715 case ETHPORT_E_FWRESP_DOWN:
716 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
717 break;
718
719 default:
720 bfa_sm_fault(event);
721 }
722}
723
724static void
725bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
726{
727 ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
728 ethport->bna = bna;
729
730 ethport->link_status = BNA_LINK_DOWN;
731 ethport->link_cbfn = bnad_cb_ethport_link_status;
732
733 ethport->rx_started_count = 0;
734
735 ethport->stop_cbfn = NULL;
736 ethport->adminup_cbfn = NULL;
737
738 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
739}
740
741static void
742bna_ethport_uninit(struct bna_ethport *ethport)
743{
744 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
745 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
746
747 ethport->bna = NULL;
748}
749
750static void
751bna_ethport_start(struct bna_ethport *ethport)
752{
753 bfa_fsm_send_event(ethport, ETHPORT_E_START);
754}
755
756static void
757bna_enet_cb_ethport_stopped(struct bna_enet *enet)
758{
759 bfa_wc_down(&enet->chld_stop_wc);
760}
761
762static void
763bna_ethport_stop(struct bna_ethport *ethport)
764{
765 ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
766 bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
767}
768
769static void
770bna_ethport_fail(struct bna_ethport *ethport)
771{
772
773 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
774
775 if (ethport->link_status != BNA_LINK_DOWN) {
776 ethport->link_status = BNA_LINK_DOWN;
777 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
778 }
779 bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
780}
781
782
783void
784bna_ethport_cb_rx_started(struct bna_ethport *ethport)
785{
786 ethport->rx_started_count++;
787
788 if (ethport->rx_started_count == 1) {
789 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
790
791 if (ethport_can_be_up(ethport))
792 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
793 }
794}
795
796void
797bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
798{
799 int ethport_up = ethport_is_up(ethport);
800
801 ethport->rx_started_count--;
802
803 if (ethport->rx_started_count == 0) {
804 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
805
806 if (ethport_up)
807 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
808 }
809}
810
811
812
813#define bna_enet_chld_start(enet) \
814do { \
815 enum bna_tx_type tx_type = \
816 ((enet)->type == BNA_ENET_T_REGULAR) ? \
817 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
818 enum bna_rx_type rx_type = \
819 ((enet)->type == BNA_ENET_T_REGULAR) ? \
820 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
821 bna_ethport_start(&(enet)->bna->ethport); \
822 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
823 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
824} while (0)
825
826#define bna_enet_chld_stop(enet) \
827do { \
828 enum bna_tx_type tx_type = \
829 ((enet)->type == BNA_ENET_T_REGULAR) ? \
830 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
831 enum bna_rx_type rx_type = \
832 ((enet)->type == BNA_ENET_T_REGULAR) ? \
833 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
834 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
835 bfa_wc_up(&(enet)->chld_stop_wc); \
836 bna_ethport_stop(&(enet)->bna->ethport); \
837 bfa_wc_up(&(enet)->chld_stop_wc); \
838 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
839 bfa_wc_up(&(enet)->chld_stop_wc); \
840 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
841 bfa_wc_wait(&(enet)->chld_stop_wc); \
842} while (0)
843
844#define bna_enet_chld_fail(enet) \
845do { \
846 bna_ethport_fail(&(enet)->bna->ethport); \
847 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
848 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
849} while (0)
850
851#define bna_enet_rx_start(enet) \
852do { \
853 enum bna_rx_type rx_type = \
854 ((enet)->type == BNA_ENET_T_REGULAR) ? \
855 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
856 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
857} while (0)
858
859#define bna_enet_rx_stop(enet) \
860do { \
861 enum bna_rx_type rx_type = \
862 ((enet)->type == BNA_ENET_T_REGULAR) ? \
863 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
864 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
865 bfa_wc_up(&(enet)->chld_stop_wc); \
866 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
867 bfa_wc_wait(&(enet)->chld_stop_wc); \
868} while (0)
869
870#define call_enet_stop_cbfn(enet) \
871do { \
872 if ((enet)->stop_cbfn) { \
873 void (*cbfn)(void *); \
874 void *cbarg; \
875 cbfn = (enet)->stop_cbfn; \
876 cbarg = (enet)->stop_cbarg; \
877 (enet)->stop_cbfn = NULL; \
878 (enet)->stop_cbarg = NULL; \
879 cbfn(cbarg); \
880 } \
881} while (0)
882
883#define call_enet_pause_cbfn(enet) \
884do { \
885 if ((enet)->pause_cbfn) { \
886 void (*cbfn)(struct bnad *); \
887 cbfn = (enet)->pause_cbfn; \
888 (enet)->pause_cbfn = NULL; \
889 cbfn((enet)->bna->bnad); \
890 } \
891} while (0)
892
893#define call_enet_mtu_cbfn(enet) \
894do { \
895 if ((enet)->mtu_cbfn) { \
896 void (*cbfn)(struct bnad *); \
897 cbfn = (enet)->mtu_cbfn; \
898 (enet)->mtu_cbfn = NULL; \
899 cbfn((enet)->bna->bnad); \
900 } \
901} while (0)
902
903static void bna_enet_cb_chld_stopped(void *arg);
904static void bna_bfi_pause_set(struct bna_enet *enet);
905
906bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
907 enum bna_enet_event);
908bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
909 enum bna_enet_event);
910bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
911 enum bna_enet_event);
912bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
913 enum bna_enet_event);
914bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
915 enum bna_enet_event);
916bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
917 enum bna_enet_event);
918bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
919 enum bna_enet_event);
920
921static void
922bna_enet_sm_stopped_entry(struct bna_enet *enet)
923{
924 call_enet_pause_cbfn(enet);
925 call_enet_mtu_cbfn(enet);
926 call_enet_stop_cbfn(enet);
927}
928
929static void
930bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
931{
932 switch (event) {
933 case ENET_E_START:
934 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
935 break;
936
937 case ENET_E_STOP:
938 call_enet_stop_cbfn(enet);
939 break;
940
941 case ENET_E_FAIL:
942
943 break;
944
945 case ENET_E_PAUSE_CFG:
946 call_enet_pause_cbfn(enet);
947 break;
948
949 case ENET_E_MTU_CFG:
950 call_enet_mtu_cbfn(enet);
951 break;
952
953 case ENET_E_CHLD_STOPPED:
954
955
956
957
958
959 break;
960
961 default:
962 bfa_sm_fault(event);
963 }
964}
965
966static void
967bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
968{
969 bna_bfi_pause_set(enet);
970}
971
972static void
973bna_enet_sm_pause_init_wait(struct bna_enet *enet,
974 enum bna_enet_event event)
975{
976 switch (event) {
977 case ENET_E_STOP:
978 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
979 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
980 break;
981
982 case ENET_E_FAIL:
983 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
984 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
985 break;
986
987 case ENET_E_PAUSE_CFG:
988 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
989 break;
990
991 case ENET_E_MTU_CFG:
992
993 break;
994
995 case ENET_E_FWRESP_PAUSE:
996 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
997 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
998 bna_bfi_pause_set(enet);
999 } else {
1000 bfa_fsm_set_state(enet, bna_enet_sm_started);
1001 bna_enet_chld_start(enet);
1002 }
1003 break;
1004
1005 default:
1006 bfa_sm_fault(event);
1007 }
1008}
1009
1010static void
1011bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1012{
1013 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1014}
1015
1016static void
1017bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1018 enum bna_enet_event event)
1019{
1020 switch (event) {
1021 case ENET_E_FAIL:
1022 case ENET_E_FWRESP_PAUSE:
1023 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1024 break;
1025
1026 default:
1027 bfa_sm_fault(event);
1028 }
1029}
1030
1031static void
1032bna_enet_sm_started_entry(struct bna_enet *enet)
1033{
1034
1035
1036
1037
1038 call_enet_pause_cbfn(enet);
1039 call_enet_mtu_cbfn(enet);
1040}
1041
1042static void
1043bna_enet_sm_started(struct bna_enet *enet,
1044 enum bna_enet_event event)
1045{
1046 switch (event) {
1047 case ENET_E_STOP:
1048 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1049 break;
1050
1051 case ENET_E_FAIL:
1052 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1053 bna_enet_chld_fail(enet);
1054 break;
1055
1056 case ENET_E_PAUSE_CFG:
1057 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1058 bna_bfi_pause_set(enet);
1059 break;
1060
1061 case ENET_E_MTU_CFG:
1062 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1063 bna_enet_rx_stop(enet);
1064 break;
1065
1066 default:
1067 bfa_sm_fault(event);
1068 }
1069}
1070
1071static void
1072bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1073{
1074}
1075
1076static void
1077bna_enet_sm_cfg_wait(struct bna_enet *enet,
1078 enum bna_enet_event event)
1079{
1080 switch (event) {
1081 case ENET_E_STOP:
1082 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1083 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1084 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1085 break;
1086
1087 case ENET_E_FAIL:
1088 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1089 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1090 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1091 bna_enet_chld_fail(enet);
1092 break;
1093
1094 case ENET_E_PAUSE_CFG:
1095 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1096 break;
1097
1098 case ENET_E_MTU_CFG:
1099 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1100 break;
1101
1102 case ENET_E_CHLD_STOPPED:
1103 bna_enet_rx_start(enet);
1104
1105 case ENET_E_FWRESP_PAUSE:
1106 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1107 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1108 bna_bfi_pause_set(enet);
1109 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1110 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1111 bna_enet_rx_stop(enet);
1112 } else {
1113 bfa_fsm_set_state(enet, bna_enet_sm_started);
1114 }
1115 break;
1116
1117 default:
1118 bfa_sm_fault(event);
1119 }
1120}
1121
1122static void
1123bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1124{
1125 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1126 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1127}
1128
1129static void
1130bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1131 enum bna_enet_event event)
1132{
1133 switch (event) {
1134 case ENET_E_FAIL:
1135 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1136 bna_enet_chld_fail(enet);
1137 break;
1138
1139 case ENET_E_FWRESP_PAUSE:
1140 case ENET_E_CHLD_STOPPED:
1141 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1142 break;
1143
1144 default:
1145 bfa_sm_fault(event);
1146 }
1147}
1148
1149static void
1150bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1151{
1152 bna_enet_chld_stop(enet);
1153}
1154
1155static void
1156bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1157 enum bna_enet_event event)
1158{
1159 switch (event) {
1160 case ENET_E_FAIL:
1161 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1162 bna_enet_chld_fail(enet);
1163 break;
1164
1165 case ENET_E_CHLD_STOPPED:
1166 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1167 break;
1168
1169 default:
1170 bfa_sm_fault(event);
1171 }
1172}
1173
1174static void
1175bna_bfi_pause_set(struct bna_enet *enet)
1176{
1177 struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1178
1179 bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1180 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1181 pause_req->mh.num_entries = htons(
1182 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1183 pause_req->tx_pause = enet->pause_config.tx_pause;
1184 pause_req->rx_pause = enet->pause_config.rx_pause;
1185
1186 bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1187 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1188 bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1189}
1190
1191static void
1192bna_enet_cb_chld_stopped(void *arg)
1193{
1194 struct bna_enet *enet = (struct bna_enet *)arg;
1195
1196 bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1197}
1198
1199static void
1200bna_enet_init(struct bna_enet *enet, struct bna *bna)
1201{
1202 enet->bna = bna;
1203 enet->flags = 0;
1204 enet->mtu = 0;
1205 enet->type = BNA_ENET_T_REGULAR;
1206
1207 enet->stop_cbfn = NULL;
1208 enet->stop_cbarg = NULL;
1209
1210 enet->pause_cbfn = NULL;
1211
1212 enet->mtu_cbfn = NULL;
1213
1214 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1215}
1216
1217static void
1218bna_enet_uninit(struct bna_enet *enet)
1219{
1220 enet->flags = 0;
1221
1222 enet->bna = NULL;
1223}
1224
1225static void
1226bna_enet_start(struct bna_enet *enet)
1227{
1228 enet->flags |= BNA_ENET_F_IOCETH_READY;
1229 if (enet->flags & BNA_ENET_F_ENABLED)
1230 bfa_fsm_send_event(enet, ENET_E_START);
1231}
1232
1233static void
1234bna_ioceth_cb_enet_stopped(void *arg)
1235{
1236 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1237
1238 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1239}
1240
1241static void
1242bna_enet_stop(struct bna_enet *enet)
1243{
1244 enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1245 enet->stop_cbarg = &enet->bna->ioceth;
1246
1247 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1248 bfa_fsm_send_event(enet, ENET_E_STOP);
1249}
1250
1251static void
1252bna_enet_fail(struct bna_enet *enet)
1253{
1254 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1255 bfa_fsm_send_event(enet, ENET_E_FAIL);
1256}
1257
1258void
1259bna_enet_cb_tx_stopped(struct bna_enet *enet)
1260{
1261 bfa_wc_down(&enet->chld_stop_wc);
1262}
1263
1264void
1265bna_enet_cb_rx_stopped(struct bna_enet *enet)
1266{
1267 bfa_wc_down(&enet->chld_stop_wc);
1268}
1269
1270int
1271bna_enet_mtu_get(struct bna_enet *enet)
1272{
1273 return enet->mtu;
1274}
1275
1276void
1277bna_enet_enable(struct bna_enet *enet)
1278{
1279 if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1280 return;
1281
1282 enet->flags |= BNA_ENET_F_ENABLED;
1283
1284 if (enet->flags & BNA_ENET_F_IOCETH_READY)
1285 bfa_fsm_send_event(enet, ENET_E_START);
1286}
1287
1288void
1289bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1290 void (*cbfn)(void *))
1291{
1292 if (type == BNA_SOFT_CLEANUP) {
1293 (*cbfn)(enet->bna->bnad);
1294 return;
1295 }
1296
1297 enet->stop_cbfn = cbfn;
1298 enet->stop_cbarg = enet->bna->bnad;
1299
1300 enet->flags &= ~BNA_ENET_F_ENABLED;
1301
1302 bfa_fsm_send_event(enet, ENET_E_STOP);
1303}
1304
1305void
1306bna_enet_pause_config(struct bna_enet *enet,
1307 struct bna_pause_config *pause_config,
1308 void (*cbfn)(struct bnad *))
1309{
1310 enet->pause_config = *pause_config;
1311
1312 enet->pause_cbfn = cbfn;
1313
1314 bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1315}
1316
1317void
1318bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1319 void (*cbfn)(struct bnad *))
1320{
1321 enet->mtu = mtu;
1322
1323 enet->mtu_cbfn = cbfn;
1324
1325 bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1326}
1327
1328void
1329bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1330{
1331 *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1332}
1333
1334
1335
1336#define enable_mbox_intr(_ioceth) \
1337do { \
1338 u32 intr_status; \
1339 bna_intr_status_get((_ioceth)->bna, intr_status); \
1340 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1341 bna_mbox_intr_enable((_ioceth)->bna); \
1342} while (0)
1343
1344#define disable_mbox_intr(_ioceth) \
1345do { \
1346 bna_mbox_intr_disable((_ioceth)->bna); \
1347 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1348} while (0)
1349
1350#define call_ioceth_stop_cbfn(_ioceth) \
1351do { \
1352 if ((_ioceth)->stop_cbfn) { \
1353 void (*cbfn)(struct bnad *); \
1354 struct bnad *cbarg; \
1355 cbfn = (_ioceth)->stop_cbfn; \
1356 cbarg = (_ioceth)->stop_cbarg; \
1357 (_ioceth)->stop_cbfn = NULL; \
1358 (_ioceth)->stop_cbarg = NULL; \
1359 cbfn(cbarg); \
1360 } \
1361} while (0)
1362
1363#define bna_stats_mod_uninit(_stats_mod) \
1364do { \
1365} while (0)
1366
1367#define bna_stats_mod_start(_stats_mod) \
1368do { \
1369 (_stats_mod)->ioc_ready = true; \
1370} while (0)
1371
1372#define bna_stats_mod_stop(_stats_mod) \
1373do { \
1374 (_stats_mod)->ioc_ready = false; \
1375} while (0)
1376
1377#define bna_stats_mod_fail(_stats_mod) \
1378do { \
1379 (_stats_mod)->ioc_ready = false; \
1380 (_stats_mod)->stats_get_busy = false; \
1381 (_stats_mod)->stats_clr_busy = false; \
1382} while (0)
1383
1384static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1385
1386bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1387 enum bna_ioceth_event);
1388bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1389 enum bna_ioceth_event);
1390bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1391 enum bna_ioceth_event);
1392bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1393 enum bna_ioceth_event);
1394bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1395 enum bna_ioceth_event);
1396bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1397 enum bna_ioceth_event);
1398bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1399 enum bna_ioceth_event);
1400bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1401 enum bna_ioceth_event);
1402
1403static void
1404bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1405{
1406 call_ioceth_stop_cbfn(ioceth);
1407}
1408
1409static void
1410bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1411 enum bna_ioceth_event event)
1412{
1413 switch (event) {
1414 case IOCETH_E_ENABLE:
1415 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1416 bfa_nw_ioc_enable(&ioceth->ioc);
1417 break;
1418
1419 case IOCETH_E_DISABLE:
1420 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1421 break;
1422
1423 case IOCETH_E_IOC_RESET:
1424 enable_mbox_intr(ioceth);
1425 break;
1426
1427 case IOCETH_E_IOC_FAILED:
1428 disable_mbox_intr(ioceth);
1429 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1430 break;
1431
1432 default:
1433 bfa_sm_fault(event);
1434 }
1435}
1436
1437static void
1438bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1439{
1440
1441
1442
1443
1444}
1445
1446static void
1447bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1448 enum bna_ioceth_event event)
1449{
1450 switch (event) {
1451 case IOCETH_E_DISABLE:
1452 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1453 bfa_nw_ioc_disable(&ioceth->ioc);
1454 break;
1455
1456 case IOCETH_E_IOC_RESET:
1457 enable_mbox_intr(ioceth);
1458 break;
1459
1460 case IOCETH_E_IOC_FAILED:
1461 disable_mbox_intr(ioceth);
1462 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1463 break;
1464
1465 case IOCETH_E_IOC_READY:
1466 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1467 break;
1468
1469 default:
1470 bfa_sm_fault(event);
1471 }
1472}
1473
1474static void
1475bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1476{
1477 bna_bfi_attr_get(ioceth);
1478}
1479
1480static void
1481bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1482 enum bna_ioceth_event event)
1483{
1484 switch (event) {
1485 case IOCETH_E_DISABLE:
1486 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1487 break;
1488
1489 case IOCETH_E_IOC_FAILED:
1490 disable_mbox_intr(ioceth);
1491 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1492 break;
1493
1494 case IOCETH_E_ENET_ATTR_RESP:
1495 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1496 break;
1497
1498 default:
1499 bfa_sm_fault(event);
1500 }
1501}
1502
1503static void
1504bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1505{
1506 bna_enet_start(&ioceth->bna->enet);
1507 bna_stats_mod_start(&ioceth->bna->stats_mod);
1508 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1509}
1510
1511static void
1512bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1513{
1514 switch (event) {
1515 case IOCETH_E_DISABLE:
1516 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1517 break;
1518
1519 case IOCETH_E_IOC_FAILED:
1520 disable_mbox_intr(ioceth);
1521 bna_enet_fail(&ioceth->bna->enet);
1522 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1523 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1524 break;
1525
1526 default:
1527 bfa_sm_fault(event);
1528 }
1529}
1530
1531static void
1532bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1533{
1534}
1535
1536static void
1537bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1538 enum bna_ioceth_event event)
1539{
1540 switch (event) {
1541 case IOCETH_E_IOC_FAILED:
1542 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1543 disable_mbox_intr(ioceth);
1544 bfa_nw_ioc_disable(&ioceth->ioc);
1545 break;
1546
1547 case IOCETH_E_ENET_ATTR_RESP:
1548 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1549 bfa_nw_ioc_disable(&ioceth->ioc);
1550 break;
1551
1552 default:
1553 bfa_sm_fault(event);
1554 }
1555}
1556
1557static void
1558bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1559{
1560 bna_stats_mod_stop(&ioceth->bna->stats_mod);
1561 bna_enet_stop(&ioceth->bna->enet);
1562}
1563
1564static void
1565bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1566 enum bna_ioceth_event event)
1567{
1568 switch (event) {
1569 case IOCETH_E_IOC_FAILED:
1570 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1571 disable_mbox_intr(ioceth);
1572 bna_enet_fail(&ioceth->bna->enet);
1573 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1574 bfa_nw_ioc_disable(&ioceth->ioc);
1575 break;
1576
1577 case IOCETH_E_ENET_STOPPED:
1578 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1579 bfa_nw_ioc_disable(&ioceth->ioc);
1580 break;
1581
1582 default:
1583 bfa_sm_fault(event);
1584 }
1585}
1586
1587static void
1588bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1589{
1590}
1591
1592static void
1593bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1594 enum bna_ioceth_event event)
1595{
1596 switch (event) {
1597 case IOCETH_E_IOC_DISABLED:
1598 disable_mbox_intr(ioceth);
1599 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1600 break;
1601
1602 case IOCETH_E_ENET_STOPPED:
1603
1604
1605 break;
1606
1607 default:
1608 bfa_sm_fault(event);
1609 }
1610}
1611
1612static void
1613bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1614{
1615 bnad_cb_ioceth_failed(ioceth->bna->bnad);
1616}
1617
1618static void
1619bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1620 enum bna_ioceth_event event)
1621{
1622 switch (event) {
1623 case IOCETH_E_DISABLE:
1624 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1625 bfa_nw_ioc_disable(&ioceth->ioc);
1626 break;
1627
1628 case IOCETH_E_IOC_RESET:
1629 enable_mbox_intr(ioceth);
1630 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1631 break;
1632
1633 case IOCETH_E_IOC_FAILED:
1634 break;
1635
1636 default:
1637 bfa_sm_fault(event);
1638 }
1639}
1640
1641static void
1642bna_bfi_attr_get(struct bna_ioceth *ioceth)
1643{
1644 struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1645
1646 bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1647 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1648 attr_req->mh.num_entries = htons(
1649 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1650 bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1651 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1652 bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1653}
1654
1655
1656
1657static void
1658bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1659{
1660 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1661
1662 if (error)
1663 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1664 else
1665 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1666}
1667
1668static void
1669bna_cb_ioceth_disable(void *arg)
1670{
1671 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1672
1673 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1674}
1675
1676static void
1677bna_cb_ioceth_hbfail(void *arg)
1678{
1679 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1680
1681 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1682}
1683
1684static void
1685bna_cb_ioceth_reset(void *arg)
1686{
1687 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1688
1689 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1690}
1691
1692static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1693 bna_cb_ioceth_enable,
1694 bna_cb_ioceth_disable,
1695 bna_cb_ioceth_hbfail,
1696 bna_cb_ioceth_reset
1697};
1698
1699static void bna_attr_init(struct bna_ioceth *ioceth)
1700{
1701 ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1702 ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1703 ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1704 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1705 ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1706 ioceth->attr.fw_query_complete = false;
1707}
1708
1709static void
1710bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1711 struct bna_res_info *res_info)
1712{
1713 u64 dma;
1714 u8 *kva;
1715
1716 ioceth->bna = bna;
1717
1718
1719
1720
1721
1722
1723 bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1724 bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1725
1726 BNA_GET_DMA_ADDR(
1727 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1728 kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1729 bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1730
1731 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1732 bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1733
1734
1735
1736
1737
1738 BNA_GET_DMA_ADDR(
1739 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1740 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1741 bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1742 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1743 kva += bfa_nw_cee_meminfo();
1744 dma += bfa_nw_cee_meminfo();
1745
1746 bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1747 bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1748 kva += bfa_nw_flash_meminfo();
1749 dma += bfa_nw_flash_meminfo();
1750
1751 bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1752 bfa_msgq_memclaim(&bna->msgq, kva, dma);
1753 bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1754 kva += bfa_msgq_meminfo();
1755 dma += bfa_msgq_meminfo();
1756
1757 ioceth->stop_cbfn = NULL;
1758 ioceth->stop_cbarg = NULL;
1759
1760 bna_attr_init(ioceth);
1761
1762 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1763}
1764
1765static void
1766bna_ioceth_uninit(struct bna_ioceth *ioceth)
1767{
1768 bfa_nw_ioc_detach(&ioceth->ioc);
1769
1770 ioceth->bna = NULL;
1771}
1772
1773void
1774bna_ioceth_enable(struct bna_ioceth *ioceth)
1775{
1776 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1777 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1778 return;
1779 }
1780
1781 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1782 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1783}
1784
1785void
1786bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1787{
1788 if (type == BNA_SOFT_CLEANUP) {
1789 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1790 return;
1791 }
1792
1793 ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1794 ioceth->stop_cbarg = ioceth->bna->bnad;
1795
1796 bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1797}
1798
1799static void
1800bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1801 struct bna_res_info *res_info)
1802{
1803 int i;
1804
1805 ucam_mod->ucmac = (struct bna_mac *)
1806 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1807
1808 INIT_LIST_HEAD(&ucam_mod->free_q);
1809 for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1810 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1811 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1812 }
1813
1814 ucam_mod->bna = bna;
1815}
1816
1817static void
1818bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1819{
1820 struct list_head *qe;
1821 int i = 0;
1822
1823 list_for_each(qe, &ucam_mod->free_q)
1824 i++;
1825
1826 ucam_mod->bna = NULL;
1827}
1828
1829static void
1830bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1831 struct bna_res_info *res_info)
1832{
1833 int i;
1834
1835 mcam_mod->mcmac = (struct bna_mac *)
1836 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1837
1838 INIT_LIST_HEAD(&mcam_mod->free_q);
1839 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1840 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1841 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1842 }
1843
1844 mcam_mod->mchandle = (struct bna_mcam_handle *)
1845 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1846
1847 INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1848 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1849 bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1850 list_add_tail(&mcam_mod->mchandle[i].qe,
1851 &mcam_mod->free_handle_q);
1852 }
1853
1854 mcam_mod->bna = bna;
1855}
1856
1857static void
1858bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1859{
1860 struct list_head *qe;
1861 int i;
1862
1863 i = 0;
1864 list_for_each(qe, &mcam_mod->free_q) i++;
1865
1866 i = 0;
1867 list_for_each(qe, &mcam_mod->free_handle_q) i++;
1868
1869 mcam_mod->bna = NULL;
1870}
1871
1872static void
1873bna_bfi_stats_get(struct bna *bna)
1874{
1875 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1876
1877 bna->stats_mod.stats_get_busy = true;
1878
1879 bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1880 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1881 stats_req->mh.num_entries = htons(
1882 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1883 stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1884 stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1885 stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1886 stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1887 stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1888
1889 bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1890 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1891 bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1892}
1893
1894void
1895bna_res_req(struct bna_res_info *res_info)
1896{
1897
1898 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1899 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1900 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1901 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1902 (bfa_nw_cee_meminfo() +
1903 bfa_nw_flash_meminfo() +
1904 bfa_msgq_meminfo()), PAGE_SIZE);
1905
1906
1907 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1908 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1909 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1910 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1911 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1912
1913
1914 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1915 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1916 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1917 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1918
1919
1920 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1921 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1922 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1923 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1924 ALIGN(sizeof(struct bfi_enet_stats),
1925 PAGE_SIZE);
1926}
1927
1928void
1929bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1930{
1931 struct bna_attr *attr = &bna->ioceth.attr;
1932
1933
1934 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1935 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1936 BNA_MEM_T_KVA;
1937 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1938 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1939 attr->num_txq * sizeof(struct bna_tx);
1940
1941
1942 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1943 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1944 BNA_MEM_T_KVA;
1945 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1946 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1947 attr->num_txq * sizeof(struct bna_txq);
1948
1949
1950 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1951 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1952 BNA_MEM_T_KVA;
1953 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1954 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1955 attr->num_rxp * sizeof(struct bna_rx);
1956
1957
1958 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1959 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1960 BNA_MEM_T_KVA;
1961 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1962 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1963 attr->num_rxp * sizeof(struct bna_rxp);
1964
1965
1966 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1967 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1968 BNA_MEM_T_KVA;
1969 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1970 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1971 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1972
1973
1974 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1975 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1976 BNA_MEM_T_KVA;
1977 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
1978 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
1979 attr->num_ucmac * sizeof(struct bna_mac);
1980
1981
1982 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1983 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
1984 BNA_MEM_T_KVA;
1985 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
1986 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
1987 attr->num_mcmac * sizeof(struct bna_mac);
1988
1989
1990 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
1991 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
1992 BNA_MEM_T_KVA;
1993 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
1994 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
1995 attr->num_mcmac * sizeof(struct bna_mcam_handle);
1996}
1997
1998void
1999bna_init(struct bna *bna, struct bnad *bnad,
2000 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
2001{
2002 bna->bnad = bnad;
2003 bna->pcidev = *pcidev;
2004
2005 bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
2006 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2007 bna->stats.hw_stats_dma.msb =
2008 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2009 bna->stats.hw_stats_dma.lsb =
2010 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2011
2012 bna_reg_addr_init(bna, &bna->pcidev);
2013
2014
2015 bna_ioceth_init(&bna->ioceth, bna, res_info);
2016
2017 bna_enet_init(&bna->enet, bna);
2018 bna_ethport_init(&bna->ethport, bna);
2019}
2020
2021void
2022bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2023{
2024 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2025
2026 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2027
2028 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2029
2030 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2031
2032 bna->default_mode_rid = BFI_INVALID_RID;
2033 bna->promisc_rid = BFI_INVALID_RID;
2034
2035 bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2036}
2037
2038void
2039bna_uninit(struct bna *bna)
2040{
2041 if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2042 bna_mcam_mod_uninit(&bna->mcam_mod);
2043 bna_ucam_mod_uninit(&bna->ucam_mod);
2044 bna_rx_mod_uninit(&bna->rx_mod);
2045 bna_tx_mod_uninit(&bna->tx_mod);
2046 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2047 }
2048
2049 bna_stats_mod_uninit(&bna->stats_mod);
2050 bna_ethport_uninit(&bna->ethport);
2051 bna_enet_uninit(&bna->enet);
2052
2053 bna_ioceth_uninit(&bna->ioceth);
2054
2055 bna->bnad = NULL;
2056}
2057
2058int
2059bna_num_txq_set(struct bna *bna, int num_txq)
2060{
2061 if (bna->ioceth.attr.fw_query_complete &&
2062 (num_txq <= bna->ioceth.attr.num_txq)) {
2063 bna->ioceth.attr.num_txq = num_txq;
2064 return BNA_CB_SUCCESS;
2065 }
2066
2067 return BNA_CB_FAIL;
2068}
2069
2070int
2071bna_num_rxp_set(struct bna *bna, int num_rxp)
2072{
2073 if (bna->ioceth.attr.fw_query_complete &&
2074 (num_rxp <= bna->ioceth.attr.num_rxp)) {
2075 bna->ioceth.attr.num_rxp = num_rxp;
2076 return BNA_CB_SUCCESS;
2077 }
2078
2079 return BNA_CB_FAIL;
2080}
2081
2082struct bna_mac *
2083bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
2084{
2085 struct list_head *qe;
2086
2087 if (list_empty(&ucam_mod->free_q))
2088 return NULL;
2089
2090 bfa_q_deq(&ucam_mod->free_q, &qe);
2091
2092 return (struct bna_mac *)qe;
2093}
2094
2095void
2096bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
2097{
2098 list_add_tail(&mac->qe, &ucam_mod->free_q);
2099}
2100
2101struct bna_mac *
2102bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
2103{
2104 struct list_head *qe;
2105
2106 if (list_empty(&mcam_mod->free_q))
2107 return NULL;
2108
2109 bfa_q_deq(&mcam_mod->free_q, &qe);
2110
2111 return (struct bna_mac *)qe;
2112}
2113
2114void
2115bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
2116{
2117 list_add_tail(&mac->qe, &mcam_mod->free_q);
2118}
2119
2120struct bna_mcam_handle *
2121bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2122{
2123 struct list_head *qe;
2124
2125 if (list_empty(&mcam_mod->free_handle_q))
2126 return NULL;
2127
2128 bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2129
2130 return (struct bna_mcam_handle *)qe;
2131}
2132
2133void
2134bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2135 struct bna_mcam_handle *handle)
2136{
2137 list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2138}
2139
2140void
2141bna_hw_stats_get(struct bna *bna)
2142{
2143 if (!bna->stats_mod.ioc_ready) {
2144 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2145 return;
2146 }
2147 if (bna->stats_mod.stats_get_busy) {
2148 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2149 return;
2150 }
2151
2152 bna_bfi_stats_get(bna);
2153}
2154