1
2
3
4
5#include "hinic_compat.h"
6#include "hinic_csr.h"
7#include "hinic_pmd_hwdev.h"
8#include "hinic_pmd_hwif.h"
9#include "hinic_pmd_mgmt.h"
10#include "hinic_pmd_mbox.h"
11
12#define BUF_OUT_DEFAULT_SIZE 1
13
14#define MAX_PF_MGMT_BUF_SIZE 2048UL
15
16#define MGMT_MSG_SIZE_MIN 20
17#define MGMT_MSG_SIZE_STEP 16
18#define MGMT_MSG_RSVD_FOR_DEV 8
19
20#define MGMT_MSG_TIMEOUT 5000
21
22#define SYNC_MSG_ID_MASK 0x1FF
23#define ASYNC_MSG_ID_MASK 0x1FF
24#define ASYNC_MSG_FLAG 0x200
25
26#define MSG_NO_RESP 0xFFFF
27
28#define MAX_MSG_SZ 2016
29
30#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ)
31
32#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
33
34#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
35 (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK)
36
37#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id)
38
39#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \
40 ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \
41 | ASYNC_MSG_FLAG)
42
43#define HINIC_SEQ_ID_MAX_VAL 42
44#define HINIC_MSG_SEG_LEN 48
45
46#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx)
47
48#define EQ_ELEM_DESC_TYPE_SHIFT 0
49#define EQ_ELEM_DESC_SRC_SHIFT 7
50#define EQ_ELEM_DESC_SIZE_SHIFT 8
51#define EQ_ELEM_DESC_WRAPPED_SHIFT 31
52
53#define EQ_ELEM_DESC_TYPE_MASK 0x7FU
54#define EQ_ELEM_DESC_SRC_MASK 0x1U
55#define EQ_ELEM_DESC_SIZE_MASK 0xFFU
56#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U
57
58#define EQ_MSIX_RESEND_TIMER_CLEAR 1
59
60#define EQ_ELEM_DESC_GET(val, member) \
61 (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \
62 EQ_ELEM_DESC_##member##_MASK)
63
64#define HINIC_MGMT_CHANNEL_STATUS_SHIFT 0x0
65#define HINIC_MGMT_CHANNEL_STATUS_MASK 0x1
66
67#define HINIC_GET_MGMT_CHANNEL_STATUS(val, member) \
68 (((val) >> HINIC_##member##_SHIFT) & HINIC_##member##_MASK)
69
70#define HINIC_MSG_TO_MGMT_MAX_LEN 2016
71
72
73
74
75
76
77static u16 mgmt_msg_len(u16 msg_data_len)
78{
79
80 u16 msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) +
81 msg_data_len);
82
83 if (msg_size > MGMT_MSG_SIZE_MIN)
84 msg_size = MGMT_MSG_SIZE_MIN +
85 ALIGN((msg_size - MGMT_MSG_SIZE_MIN),
86 MGMT_MSG_SIZE_STEP);
87 else
88 msg_size = MGMT_MSG_SIZE_MIN;
89
90 return msg_size;
91}
92
93
94
95
96
97
98
99
100
101
102
103
104static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
105 u64 *header, int msg_len, enum hinic_mod_type mod,
106 enum hinic_msg_ack_type ack_type,
107 enum hinic_msg_direction_type direction,
108 u8 cmd, u32 msg_id)
109{
110 struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif;
111
112 *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
113 HINIC_MSG_HEADER_SET(mod, MODULE) |
114 HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) |
115 HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
116 HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
117 HINIC_MSG_HEADER_SET(0, SEQID) |
118 HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
119 HINIC_MSG_HEADER_SET(direction, DIRECTION) |
120 HINIC_MSG_HEADER_SET(cmd, CMD) |
121 HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
122 HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
123 HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
124}
125
126
127
128
129
130
131
132
133static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg,
134 int msg_len)
135{
136 u32 cmd_buf_max = MAX_PF_MGMT_BUF_SIZE;
137
138 memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
139
140 mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
141 cmd_buf_max -= MGMT_MSG_RSVD_FOR_DEV;
142 memcpy(mgmt_cmd, header, sizeof(*header));
143
144 mgmt_cmd += sizeof(*header);
145 cmd_buf_max -= sizeof(*header);
146 memcpy(mgmt_cmd, msg, msg_len);
147}
148
149
150
151
152
153
154static int alloc_recv_msg(struct hinic_recv_msg *recv_msg)
155{
156 int err;
157
158 recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
159 if (!recv_msg->msg) {
160 PMD_DRV_LOG(ERR, "Allocate recv msg buf failed");
161 return -ENOMEM;
162 }
163
164 recv_msg->buf_out = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
165 if (!recv_msg->buf_out) {
166 PMD_DRV_LOG(ERR, "Allocate recv msg output buf failed");
167 err = -ENOMEM;
168 goto alloc_buf_out_err;
169 }
170
171 return 0;
172
173alloc_buf_out_err:
174 kfree(recv_msg->msg);
175 return err;
176}
177
178
179
180
181
182static void free_recv_msg(struct hinic_recv_msg *recv_msg)
183{
184 kfree(recv_msg->buf_out);
185 kfree(recv_msg->msg);
186}
187
188
189
190
191
192
193static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
194{
195 int err;
196
197 err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
198 if (err) {
199 PMD_DRV_LOG(ERR, "Allocate recv msg failed");
200 return err;
201 }
202
203 err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
204 if (err) {
205 PMD_DRV_LOG(ERR, "Allocate resp recv msg failed");
206 goto alloc_msg_for_resp_err;
207 }
208
209 pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
210 if (!pf_to_mgmt->async_msg_buf) {
211 PMD_DRV_LOG(ERR, "Allocate async msg buf failed");
212 err = -ENOMEM;
213 goto async_msg_buf_err;
214 }
215
216 pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
217 if (!pf_to_mgmt->sync_msg_buf) {
218 PMD_DRV_LOG(ERR, "Allocate sync msg buf failed");
219 err = -ENOMEM;
220 goto sync_msg_buf_err;
221 }
222
223 return 0;
224
225sync_msg_buf_err:
226 kfree(pf_to_mgmt->async_msg_buf);
227
228async_msg_buf_err:
229 free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
230
231alloc_msg_for_resp_err:
232 free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
233
234 return err;
235}
236
237
238
239
240
241
242static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
243{
244 kfree(pf_to_mgmt->sync_msg_buf);
245 kfree(pf_to_mgmt->async_msg_buf);
246
247 free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
248 free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
249}
250
251static int hinic_get_mgmt_channel_status(void *hwdev)
252{
253 struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
254 u32 val;
255
256 if (hinic_func_type((struct hinic_hwdev *)hwdev) == TYPE_VF)
257 return false;
258
259 val = hinic_hwif_read_reg(hwif, HINIC_ICPL_RESERVD_ADDR);
260
261 return HINIC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS);
262}
263
264
265
266
267
268
269
270
271
272
273
274
275static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
276 enum hinic_mod_type mod, u8 cmd,
277 void *msg, u16 msg_len,
278 enum hinic_msg_direction_type direction,
279 u16 resp_msg_id)
280{
281 void *mgmt_cmd = pf_to_mgmt->async_msg_buf;
282 struct hinic_api_cmd_chain *chain;
283 u64 header;
284 u16 cmd_size = mgmt_msg_len(msg_len);
285
286 if (direction == HINIC_MSG_RESPONSE)
287 prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
288 direction, cmd, resp_msg_id);
289 else
290 prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
291 direction, cmd, ASYNC_MSG_ID(pf_to_mgmt));
292
293 prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
294
295 chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU];
296
297 return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd,
298 cmd_size);
299}
300
301
302
303
304
305
306
307
308
309
310
311
312
313static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
314 enum hinic_mod_type mod, u8 cmd,
315 void *msg, u16 msg_len,
316 enum hinic_msg_ack_type ack_type,
317 enum hinic_msg_direction_type direction,
318 __rte_unused u16 resp_msg_id)
319{
320 void *mgmt_cmd = pf_to_mgmt->sync_msg_buf;
321 struct hinic_api_cmd_chain *chain;
322 u64 header;
323 u16 cmd_size = mgmt_msg_len(msg_len);
324
325
326 if (hinic_get_mgmt_channel_status(pf_to_mgmt->hwdev)) {
327 if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC)
328 return HINIC_DEV_BUSY_ACTIVE_FW;
329 else
330 return -EBUSY;
331 }
332
333 if (direction == HINIC_MSG_RESPONSE)
334 prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
335 direction, cmd, resp_msg_id);
336 else
337 prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
338 direction, cmd, SYNC_MSG_ID(pf_to_mgmt));
339
340 prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
341
342 chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_PMD_WRITE_TO_MGMT];
343
344 return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST,
345 mgmt_cmd, cmd_size);
346}
347
348
349
350
351
352
353static int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
354{
355 struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
356 int err;
357
358 pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
359 if (!pf_to_mgmt) {
360 PMD_DRV_LOG(ERR, "Allocate pf to mgmt mem failed");
361 return -ENOMEM;
362 }
363
364 hwdev->pf_to_mgmt = pf_to_mgmt;
365 pf_to_mgmt->hwdev = hwdev;
366
367 err = hinic_mutex_init(&pf_to_mgmt->sync_msg_mutex, NULL);
368 if (err)
369 goto mutex_init_err;
370
371 err = alloc_msg_buf(pf_to_mgmt);
372 if (err) {
373 PMD_DRV_LOG(ERR, "Allocate msg buffers failed");
374 goto alloc_msg_buf_err;
375 }
376
377 err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain);
378 if (err) {
379 PMD_DRV_LOG(ERR, "Init the api cmd chains failed");
380 goto api_cmd_init_err;
381 }
382
383 return 0;
384
385api_cmd_init_err:
386 free_msg_buf(pf_to_mgmt);
387
388alloc_msg_buf_err:
389 hinic_mutex_destroy(&pf_to_mgmt->sync_msg_mutex);
390
391mutex_init_err:
392 kfree(pf_to_mgmt);
393
394 return err;
395}
396
397
398
399
400
401static void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
402{
403 struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
404
405 hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
406 free_msg_buf(pf_to_mgmt);
407 hinic_mutex_destroy(&pf_to_mgmt->sync_msg_mutex);
408 kfree(pf_to_mgmt);
409}
410
411static int
412hinic_pf_to_mgmt_sync(struct hinic_hwdev *hwdev,
413 enum hinic_mod_type mod, u8 cmd, void *buf_in, u16 in_size,
414 void *buf_out, u16 *out_size, u32 timeout)
415{
416 struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
417 struct hinic_recv_msg *recv_msg;
418 u32 timeo;
419 int err, i;
420
421 err = hinic_mutex_lock(&pf_to_mgmt->sync_msg_mutex);
422 if (err)
423 return err;
424
425 SYNC_MSG_ID_INC(pf_to_mgmt);
426 recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
427
428 err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
429 HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND,
430 MSG_NO_RESP);
431 if (err) {
432 PMD_DRV_LOG(ERR, "Send msg to mgmt failed");
433 goto unlock_sync_msg;
434 }
435
436 timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
437 for (i = 0; i < pf_to_mgmt->rx_aeq->poll_retry_nr; i++) {
438 err = hinic_aeq_poll_msg(pf_to_mgmt->rx_aeq, timeo, NULL);
439 if (err) {
440 PMD_DRV_LOG(ERR, "Poll mgmt rsp timeout, mod=%d cmd=%d msg_id=%u rc=%d",
441 mod, cmd, pf_to_mgmt->sync_msg_id, err);
442 err = -ETIMEDOUT;
443 hinic_dump_aeq_info(hwdev);
444 goto unlock_sync_msg;
445 } else {
446 if (mod == recv_msg->mod && cmd == recv_msg->cmd &&
447 recv_msg->msg_id == pf_to_mgmt->sync_msg_id) {
448
449 break;
450 }
451 PMD_DRV_LOG(ERR, "AEQ[%d] poll(mod=%d, cmd=%d, msg_id=%u) an "
452 "unexpected(mod=%d, cmd=%d, msg_id=%u) response",
453 pf_to_mgmt->rx_aeq->q_id, mod, cmd,
454 pf_to_mgmt->sync_msg_id, recv_msg->mod,
455 recv_msg->cmd, recv_msg->msg_id);
456 }
457 }
458
459 if (i == pf_to_mgmt->rx_aeq->poll_retry_nr) {
460 PMD_DRV_LOG(ERR, "Get %d unexpected mgmt rsp from AEQ[%d], poll mgmt rsp failed",
461 i, pf_to_mgmt->rx_aeq->q_id);
462 err = -EBADMSG;
463 goto unlock_sync_msg;
464 }
465
466 rte_smp_rmb();
467 if (recv_msg->msg_len && buf_out && out_size) {
468 if (recv_msg->msg_len <= *out_size) {
469 memcpy(buf_out, recv_msg->msg,
470 recv_msg->msg_len);
471 *out_size = recv_msg->msg_len;
472 } else {
473 PMD_DRV_LOG(ERR, "Mgmt rsp's msg len: %u overflow.",
474 recv_msg->msg_len);
475 err = -ERANGE;
476 }
477 }
478
479unlock_sync_msg:
480 if (err && out_size)
481 *out_size = 0;
482 (void)hinic_mutex_unlock(&pf_to_mgmt->sync_msg_mutex);
483 return err;
484}
485
486int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
487 void *buf_in, u16 in_size,
488 void *buf_out, u16 *out_size, u32 timeout)
489{
490 int rc = HINIC_ERROR;
491
492 if (!hwdev || in_size > HINIC_MSG_TO_MGMT_MAX_LEN)
493 return -EINVAL;
494
495 if (hinic_func_type(hwdev) == TYPE_VF) {
496 rc = hinic_mbox_to_pf(hwdev, mod, cmd, buf_in, in_size,
497 buf_out, out_size, timeout);
498 } else {
499 rc = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size,
500 buf_out, out_size, timeout);
501 }
502
503 return rc;
504}
505
506int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
507 void *buf_in, u16 in_size)
508{
509 struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
510 ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
511 int err = -EINVAL;
512
513 if (!MSG_SZ_IS_VALID(in_size)) {
514 PMD_DRV_LOG(ERR, "Mgmt msg buffer size is invalid");
515 return err;
516 }
517
518 err = hinic_mutex_lock(&pf_to_mgmt->sync_msg_mutex);
519 if (err)
520 return err;
521
522 err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
523 HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND,
524 MSG_NO_RESP);
525
526 (void)hinic_mutex_unlock(&pf_to_mgmt->sync_msg_mutex);
527
528 return err;
529}
530
531static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg,
532 u8 seq_id, u8 seg_len, u16 msg_id)
533{
534 if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN)
535 return false;
536
537 if (seq_id == 0) {
538 recv_msg->seq_id = seq_id;
539 recv_msg->msg_id = msg_id;
540 } else {
541 if ((seq_id != recv_msg->seq_id + 1) ||
542 msg_id != recv_msg->msg_id) {
543 recv_msg->seq_id = 0;
544 return false;
545 }
546 recv_msg->seq_id = seq_id;
547 }
548
549 return true;
550}
551
552
553
554
555
556
557
558static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
559 struct hinic_recv_msg *recv_msg,
560 void *param)
561{
562 void *buf_out = recv_msg->buf_out;
563 u16 out_size = 0;
564
565 switch (recv_msg->mod) {
566 case HINIC_MOD_COMM:
567 hinic_comm_async_event_handle(pf_to_mgmt->hwdev,
568 recv_msg->cmd, recv_msg->msg,
569 recv_msg->msg_len,
570 buf_out, &out_size);
571 break;
572 case HINIC_MOD_L2NIC:
573 hinic_l2nic_async_event_handle(pf_to_mgmt->hwdev, param,
574 recv_msg->cmd, recv_msg->msg,
575 recv_msg->msg_len,
576 buf_out, &out_size);
577 break;
578 case HINIC_MOD_HILINK:
579 hinic_hilink_async_event_handle(pf_to_mgmt->hwdev,
580 recv_msg->cmd, recv_msg->msg,
581 recv_msg->msg_len,
582 buf_out, &out_size);
583 break;
584 default:
585 PMD_DRV_LOG(ERR, "No handler, mod: %d", recv_msg->mod);
586 break;
587 }
588
589 if (!recv_msg->async_mgmt_to_pf) {
590 if (!out_size)
591 out_size = BUF_OUT_DEFAULT_SIZE;
592
593
594 (void)send_msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod,
595 recv_msg->cmd, buf_out, out_size,
596 HINIC_MSG_RESPONSE,
597 recv_msg->msg_id);
598 }
599}
600
601
602
603
604
605
606
607
608
609
610static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
611 u8 *header, struct hinic_recv_msg *recv_msg,
612 void *param)
613{
614 u64 msg_header = *((u64 *)header);
615 void *msg_body = header + sizeof(msg_header);
616 u8 *dest_msg;
617 u8 seq_id, seq_len;
618 u32 msg_buf_max = MAX_PF_MGMT_BUF_SIZE;
619 u8 front_id;
620 u16 msg_id;
621
622 seq_id = HINIC_MSG_HEADER_GET(msg_header, SEQID);
623 seq_len = HINIC_MSG_HEADER_GET(msg_header, SEG_LEN);
624 front_id = recv_msg->seq_id;
625 msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID);
626
627 if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len, msg_id)) {
628 PMD_DRV_LOG(ERR,
629 "Mgmt msg sequence and segment check failed, "
630 "func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x "
631 "front msg_id: %d, cur msg_id: %d",
632 hinic_global_func_id(pf_to_mgmt->hwdev),
633 front_id, seq_id, seq_len, recv_msg->msg_id, msg_id);
634 return HINIC_ERROR;
635 }
636
637 dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN;
638 msg_buf_max -= seq_id * HINIC_MSG_SEG_LEN;
639 memcpy(dest_msg, msg_body, seq_len);
640
641 if (!HINIC_MSG_HEADER_GET(msg_header, LAST))
642 return HINIC_ERROR;
643
644 recv_msg->cmd = HINIC_MSG_HEADER_GET(msg_header, CMD);
645 recv_msg->mod = HINIC_MSG_HEADER_GET(msg_header, MODULE);
646 recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(msg_header,
647 ASYNC_MGMT_TO_PF);
648 recv_msg->msg_len = HINIC_MSG_HEADER_GET(msg_header, MSG_LEN);
649 recv_msg->msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID);
650
651 if (HINIC_MSG_HEADER_GET(msg_header, DIRECTION) == HINIC_MSG_RESPONSE)
652 return HINIC_OK;
653
654 hinic_mgmt_recv_msg_handler(pf_to_mgmt, recv_msg, param);
655
656 return HINIC_ERROR;
657}
658
659
660
661
662
663
664
665
666
667
668static int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header,
669 __rte_unused u8 size, void *param)
670{
671 struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
672 ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
673 struct hinic_recv_msg *recv_msg;
674
675 recv_msg = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) ==
676 HINIC_MSG_DIRECT_SEND) ?
677 &pf_to_mgmt->recv_msg_from_mgmt :
678 &pf_to_mgmt->recv_resp_msg_from_mgmt;
679
680 return recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg, param);
681}
682
683static int hinic_handle_aeqe(void *handle, enum hinic_aeq_type event,
684 u8 *data, u8 size, void *param)
685{
686 int rc = 0;
687
688 switch (event) {
689 case HINIC_MSG_FROM_MGMT_CPU:
690 rc = hinic_mgmt_msg_aeqe_handler(handle, data, size, param);
691 break;
692 case HINIC_MBX_FROM_FUNC:
693 rc = hinic_mbox_func_aeqe_handler(handle, data, size, param);
694 break;
695 default:
696 PMD_DRV_LOG(ERR, "Unknown event type: 0x%x, size: %d",
697 event, size);
698 rc = HINIC_ERROR;
699 break;
700 }
701
702 return rc;
703}
704
705
706
707
708
709
710
711
712
713
714int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param)
715{
716 struct hinic_aeq_elem *aeqe_pos;
717 enum hinic_aeq_type event;
718 u32 aeqe_desc = 0;
719 u16 i;
720 u8 size;
721 int done = HINIC_ERROR;
722 int err = -EFAULT;
723 unsigned long end;
724
725 for (i = 0; ((timeout == 0) && (i < eq->eq_len)) ||
726 ((timeout > 0) && (done != HINIC_OK) && (i < eq->eq_len)); i++) {
727 err = -EIO;
728 end = jiffies + msecs_to_jiffies(timeout);
729 do {
730 aeqe_pos = GET_CURR_AEQ_ELEM(eq);
731 rte_rmb();
732
733
734 aeqe_desc = be32_to_cpu(aeqe_pos->desc);
735
736
737
738
739 if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED)
740 != eq->wrapped) {
741 err = 0;
742 break;
743 }
744
745 if (timeout != 0)
746 usleep(1000);
747 } while (time_before(jiffies, end));
748
749 if (err != HINIC_OK)
750 break;
751
752 event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
753 if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
754 PMD_DRV_LOG(ERR, "AEQ sw event not support %d", event);
755 return -ENODEV;
756
757 } else {
758 size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
759 done = hinic_handle_aeqe(eq->hwdev, event,
760 aeqe_pos->aeqe_data,
761 size, param);
762 }
763
764 eq->cons_idx++;
765 if (eq->cons_idx == eq->eq_len) {
766 eq->cons_idx = 0;
767 eq->wrapped = !eq->wrapped;
768 }
769 }
770
771 eq_update_ci(eq);
772
773 return err;
774}
775
776int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
777{
778 int rc;
779
780
781 if (hinic_func_type(hwdev) == TYPE_VF)
782 return 0;
783
784 rc = hinic_pf_to_mgmt_init(hwdev);
785 if (rc)
786 return rc;
787
788 hwdev->pf_to_mgmt->rx_aeq = &hwdev->aeqs->aeq[HINIC_MGMT_RSP_AEQN];
789
790 return 0;
791}
792
793void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
794{
795
796 if (hinic_func_type(hwdev) == TYPE_VF)
797 return;
798
799 hinic_pf_to_mgmt_free(hwdev);
800}
801
802void hinic_dev_handle_aeq_event(struct hinic_hwdev *hwdev, void *param)
803{
804 struct hinic_eq *aeq = &hwdev->aeqs->aeq[0];
805
806
807 hinic_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx,
808 EQ_MSIX_RESEND_TIMER_CLEAR);
809 (void)hinic_aeq_poll_msg(aeq, 0, param);
810}
811