1
2
3
4
5#include "hinic_compat.h"
6#include "hinic_csr.h"
7#include "hinic_pmd_hwdev.h"
8#include "hinic_pmd_hwif.h"
9#include "hinic_pmd_mgmt.h"
10#include "hinic_pmd_mbox.h"
11
12#define BUF_OUT_DEFAULT_SIZE 1
13
14#define MAX_PF_MGMT_BUF_SIZE 2048UL
15
16#define MGMT_MSG_SIZE_MIN 20
17#define MGMT_MSG_SIZE_STEP 16
18#define MGMT_MSG_RSVD_FOR_DEV 8
19
20#define MGMT_MSG_TIMEOUT 5000
21
22#define SYNC_MSG_ID_MASK 0x1FF
23#define ASYNC_MSG_ID_MASK 0x1FF
24#define ASYNC_MSG_FLAG 0x200
25
26#define MSG_NO_RESP 0xFFFF
27
28#define MAX_MSG_SZ 2016
29
30#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_SZ)
31
32#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
33
34#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
35 (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK)
36
37#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id)
38
39#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \
40 ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \
41 | ASYNC_MSG_FLAG)
42
43#define HINIC_SEQ_ID_MAX_VAL 42
44#define HINIC_MSG_SEG_LEN 48
45
46#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx)
47
48#define EQ_ELEM_DESC_TYPE_SHIFT 0
49#define EQ_ELEM_DESC_SRC_SHIFT 7
50#define EQ_ELEM_DESC_SIZE_SHIFT 8
51#define EQ_ELEM_DESC_WRAPPED_SHIFT 31
52
53#define EQ_ELEM_DESC_TYPE_MASK 0x7FU
54#define EQ_ELEM_DESC_SRC_MASK 0x1U
55#define EQ_ELEM_DESC_SIZE_MASK 0xFFU
56#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U
57
58#define EQ_MSIX_RESEND_TIMER_CLEAR 1
59
60#define EQ_ELEM_DESC_GET(val, member) \
61 (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \
62 EQ_ELEM_DESC_##member##_MASK)
63
64#define HINIC_MGMT_CHANNEL_STATUS_SHIFT 0x0
65#define HINIC_MGMT_CHANNEL_STATUS_MASK 0x1
66
67#define HINIC_GET_MGMT_CHANNEL_STATUS(val, member) \
68 (((val) >> HINIC_##member##_SHIFT) & HINIC_##member##_MASK)
69
70#define HINIC_MSG_TO_MGMT_MAX_LEN 2016
71
72
73
74
75
76
77static u16 mgmt_msg_len(u16 msg_data_len)
78{
79
80 u16 msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) +
81 msg_data_len);
82
83 if (msg_size > MGMT_MSG_SIZE_MIN)
84 msg_size = MGMT_MSG_SIZE_MIN +
85 ALIGN((msg_size - MGMT_MSG_SIZE_MIN),
86 MGMT_MSG_SIZE_STEP);
87 else
88 msg_size = MGMT_MSG_SIZE_MIN;
89
90 return msg_size;
91}
92
93
94
95
96
97
98
99
100
101
102
103
104static void prepare_header(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
105 u64 *header, int msg_len, enum hinic_mod_type mod,
106 enum hinic_msg_ack_type ack_type,
107 enum hinic_msg_direction_type direction,
108 u8 cmd, u32 msg_id)
109{
110 struct hinic_hwif *hwif = pf_to_mgmt->hwdev->hwif;
111
112 *header = HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
113 HINIC_MSG_HEADER_SET(mod, MODULE) |
114 HINIC_MSG_HEADER_SET(msg_len, SEG_LEN) |
115 HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
116 HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
117 HINIC_MSG_HEADER_SET(0, SEQID) |
118 HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
119 HINIC_MSG_HEADER_SET(direction, DIRECTION) |
120 HINIC_MSG_HEADER_SET(cmd, CMD) |
121 HINIC_MSG_HEADER_SET(HINIC_PCI_INTF_IDX(hwif), PCI_INTF_IDX) |
122 HINIC_MSG_HEADER_SET(hwif->attr.port_to_port_idx, P2P_IDX) |
123 HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
124}
125
126
127
128
129
130
131
132
133static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, void *msg,
134 int msg_len)
135{
136 memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
137
138 mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
139 memcpy(mgmt_cmd, header, sizeof(*header));
140
141 mgmt_cmd += sizeof(*header);
142 memcpy(mgmt_cmd, msg, msg_len);
143}
144
145
146
147
148
149
150static int alloc_recv_msg(struct hinic_recv_msg *recv_msg)
151{
152 int err;
153
154 recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
155 if (!recv_msg->msg) {
156 PMD_DRV_LOG(ERR, "Allocate recv msg buf failed");
157 return -ENOMEM;
158 }
159
160 recv_msg->buf_out = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
161 if (!recv_msg->buf_out) {
162 PMD_DRV_LOG(ERR, "Allocate recv msg output buf failed");
163 err = -ENOMEM;
164 goto alloc_buf_out_err;
165 }
166
167 return 0;
168
169alloc_buf_out_err:
170 kfree(recv_msg->msg);
171 return err;
172}
173
174
175
176
177
178static void free_recv_msg(struct hinic_recv_msg *recv_msg)
179{
180 kfree(recv_msg->buf_out);
181 kfree(recv_msg->msg);
182}
183
184
185
186
187
188
189static int alloc_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
190{
191 int err;
192
193 err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
194 if (err) {
195 PMD_DRV_LOG(ERR, "Allocate recv msg failed");
196 return err;
197 }
198
199 err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
200 if (err) {
201 PMD_DRV_LOG(ERR, "Allocate resp recv msg failed");
202 goto alloc_msg_for_resp_err;
203 }
204
205 pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
206 if (!pf_to_mgmt->async_msg_buf) {
207 PMD_DRV_LOG(ERR, "Allocate async msg buf failed");
208 err = -ENOMEM;
209 goto async_msg_buf_err;
210 }
211
212 pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL);
213 if (!pf_to_mgmt->sync_msg_buf) {
214 PMD_DRV_LOG(ERR, "Allocate sync msg buf failed");
215 err = -ENOMEM;
216 goto sync_msg_buf_err;
217 }
218
219 return 0;
220
221sync_msg_buf_err:
222 kfree(pf_to_mgmt->async_msg_buf);
223
224async_msg_buf_err:
225 free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
226
227alloc_msg_for_resp_err:
228 free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
229
230 return err;
231}
232
233
234
235
236
237
238static void free_msg_buf(struct hinic_msg_pf_to_mgmt *pf_to_mgmt)
239{
240 kfree(pf_to_mgmt->sync_msg_buf);
241 kfree(pf_to_mgmt->async_msg_buf);
242
243 free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt);
244 free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt);
245}
246
247static int hinic_get_mgmt_channel_status(void *hwdev)
248{
249 struct hinic_hwif *hwif = ((struct hinic_hwdev *)hwdev)->hwif;
250 u32 val;
251
252 if (hinic_func_type((struct hinic_hwdev *)hwdev) == TYPE_VF)
253 return false;
254
255 val = hinic_hwif_read_reg(hwif, HINIC_ICPL_RESERVD_ADDR);
256
257 return HINIC_GET_MGMT_CHANNEL_STATUS(val, MGMT_CHANNEL_STATUS);
258}
259
260
261
262
263
264
265
266
267
268
269
270
271static int send_msg_to_mgmt_async(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
272 enum hinic_mod_type mod, u8 cmd,
273 void *msg, u16 msg_len,
274 enum hinic_msg_direction_type direction,
275 u16 resp_msg_id)
276{
277 void *mgmt_cmd = pf_to_mgmt->async_msg_buf;
278 struct hinic_api_cmd_chain *chain;
279 u64 header;
280 u16 cmd_size = mgmt_msg_len(msg_len);
281
282 if (direction == HINIC_MSG_RESPONSE)
283 prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
284 direction, cmd, resp_msg_id);
285 else
286 prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC_MSG_ACK,
287 direction, cmd, ASYNC_MSG_ID(pf_to_mgmt));
288
289 prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
290
291 chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_ASYNC_TO_MGMT_CPU];
292
293 return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST, mgmt_cmd,
294 cmd_size);
295}
296
297
298
299
300
301
302
303
304
305
306
307
308
309static int send_msg_to_mgmt_sync(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
310 enum hinic_mod_type mod, u8 cmd,
311 void *msg, u16 msg_len,
312 enum hinic_msg_ack_type ack_type,
313 enum hinic_msg_direction_type direction,
314 __rte_unused u16 resp_msg_id)
315{
316 void *mgmt_cmd = pf_to_mgmt->sync_msg_buf;
317 struct hinic_api_cmd_chain *chain;
318 u64 header;
319 u16 cmd_size = mgmt_msg_len(msg_len);
320
321
322 if (hinic_get_mgmt_channel_status(pf_to_mgmt->hwdev)) {
323 if (mod == HINIC_MOD_COMM || mod == HINIC_MOD_L2NIC)
324 return HINIC_DEV_BUSY_ACTIVE_FW;
325 else
326 return -EBUSY;
327 }
328
329 if (direction == HINIC_MSG_RESPONSE)
330 prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
331 direction, cmd, resp_msg_id);
332 else
333 prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type,
334 direction, cmd, SYNC_MSG_ID(pf_to_mgmt));
335
336 prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len);
337
338 chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_PMD_WRITE_TO_MGMT];
339
340 return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT_HOST,
341 mgmt_cmd, cmd_size);
342}
343
344
345
346
347
348
349static int hinic_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
350{
351 struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
352 int err;
353
354 pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL);
355 if (!pf_to_mgmt) {
356 PMD_DRV_LOG(ERR, "Allocate pf to mgmt mem failed");
357 return -ENOMEM;
358 }
359
360 hwdev->pf_to_mgmt = pf_to_mgmt;
361 pf_to_mgmt->hwdev = hwdev;
362
363 err = hinic_mutex_init(&pf_to_mgmt->sync_msg_mutex, NULL);
364 if (err)
365 goto mutex_init_err;
366
367 err = alloc_msg_buf(pf_to_mgmt);
368 if (err) {
369 PMD_DRV_LOG(ERR, "Allocate msg buffers failed");
370 goto alloc_msg_buf_err;
371 }
372
373 err = hinic_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain);
374 if (err) {
375 PMD_DRV_LOG(ERR, "Init the api cmd chains failed");
376 goto api_cmd_init_err;
377 }
378
379 return 0;
380
381api_cmd_init_err:
382 free_msg_buf(pf_to_mgmt);
383
384alloc_msg_buf_err:
385 hinic_mutex_destroy(&pf_to_mgmt->sync_msg_mutex);
386
387mutex_init_err:
388 kfree(pf_to_mgmt);
389
390 return err;
391}
392
393
394
395
396
397static void hinic_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
398{
399 struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
400
401 hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
402 free_msg_buf(pf_to_mgmt);
403 hinic_mutex_destroy(&pf_to_mgmt->sync_msg_mutex);
404 kfree(pf_to_mgmt);
405}
406
407static int
408hinic_pf_to_mgmt_sync(struct hinic_hwdev *hwdev,
409 enum hinic_mod_type mod, u8 cmd, void *buf_in, u16 in_size,
410 void *buf_out, u16 *out_size, u32 timeout)
411{
412 struct hinic_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt;
413 struct hinic_recv_msg *recv_msg;
414 u32 timeo;
415 int err, i;
416
417 err = hinic_mutex_lock(&pf_to_mgmt->sync_msg_mutex);
418 if (err)
419 return err;
420
421 SYNC_MSG_ID_INC(pf_to_mgmt);
422 recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
423
424 err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
425 HINIC_MSG_ACK, HINIC_MSG_DIRECT_SEND,
426 MSG_NO_RESP);
427 if (err) {
428 PMD_DRV_LOG(ERR, "Send msg to mgmt failed");
429 goto unlock_sync_msg;
430 }
431
432 timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
433 for (i = 0; i < pf_to_mgmt->rx_aeq->poll_retry_nr; i++) {
434 err = hinic_aeq_poll_msg(pf_to_mgmt->rx_aeq, timeo, NULL);
435 if (err) {
436 PMD_DRV_LOG(ERR, "Poll mgmt rsp timeout, mod=%d cmd=%d msg_id=%u rc=%d",
437 mod, cmd, pf_to_mgmt->sync_msg_id, err);
438 err = -ETIMEDOUT;
439 hinic_dump_aeq_info(hwdev);
440 goto unlock_sync_msg;
441 } else {
442 if (mod == recv_msg->mod && cmd == recv_msg->cmd &&
443 recv_msg->msg_id == pf_to_mgmt->sync_msg_id) {
444
445 break;
446 }
447 PMD_DRV_LOG(ERR, "AEQ[%d] poll(mod=%d, cmd=%d, msg_id=%u) an "
448 "unexpected(mod=%d, cmd=%d, msg_id=%u) response",
449 pf_to_mgmt->rx_aeq->q_id, mod, cmd,
450 pf_to_mgmt->sync_msg_id, recv_msg->mod,
451 recv_msg->cmd, recv_msg->msg_id);
452 }
453 }
454
455 if (i == pf_to_mgmt->rx_aeq->poll_retry_nr) {
456 PMD_DRV_LOG(ERR, "Get %d unexpected mgmt rsp from AEQ[%d], poll mgmt rsp failed",
457 i, pf_to_mgmt->rx_aeq->q_id);
458 err = -EBADMSG;
459 goto unlock_sync_msg;
460 }
461
462 rte_smp_rmb();
463 if (recv_msg->msg_len && buf_out && out_size) {
464 if (recv_msg->msg_len <= *out_size) {
465 memcpy(buf_out, recv_msg->msg,
466 recv_msg->msg_len);
467 *out_size = recv_msg->msg_len;
468 } else {
469 PMD_DRV_LOG(ERR, "Mgmt rsp's msg len: %u overflow.",
470 recv_msg->msg_len);
471 err = -ERANGE;
472 }
473 }
474
475unlock_sync_msg:
476 if (err && out_size)
477 *out_size = 0;
478 (void)hinic_mutex_unlock(&pf_to_mgmt->sync_msg_mutex);
479 return err;
480}
481
482int hinic_msg_to_mgmt_sync(void *hwdev, enum hinic_mod_type mod, u8 cmd,
483 void *buf_in, u16 in_size,
484 void *buf_out, u16 *out_size, u32 timeout)
485{
486 int rc = HINIC_ERROR;
487
488 if (!hwdev || in_size > HINIC_MSG_TO_MGMT_MAX_LEN)
489 return -EINVAL;
490
491 if (hinic_func_type(hwdev) == TYPE_VF) {
492 rc = hinic_mbox_to_pf(hwdev, mod, cmd, buf_in, in_size,
493 buf_out, out_size, timeout);
494 } else {
495 rc = hinic_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size,
496 buf_out, out_size, timeout);
497 }
498
499 return rc;
500}
501
502int hinic_msg_to_mgmt_no_ack(void *hwdev, enum hinic_mod_type mod, u8 cmd,
503 void *buf_in, u16 in_size)
504{
505 struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
506 ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
507 int err = -EINVAL;
508
509 if (!MSG_SZ_IS_VALID(in_size)) {
510 PMD_DRV_LOG(ERR, "Mgmt msg buffer size is invalid");
511 return err;
512 }
513
514 err = hinic_mutex_lock(&pf_to_mgmt->sync_msg_mutex);
515 if (err)
516 return err;
517
518 err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
519 HINIC_MSG_NO_ACK, HINIC_MSG_DIRECT_SEND,
520 MSG_NO_RESP);
521
522 (void)hinic_mutex_unlock(&pf_to_mgmt->sync_msg_mutex);
523
524 return err;
525}
526
527static bool check_mgmt_seq_id_and_seg_len(struct hinic_recv_msg *recv_msg,
528 u8 seq_id, u8 seg_len, u16 msg_id)
529{
530 if (seq_id > HINIC_SEQ_ID_MAX_VAL || seg_len > HINIC_MSG_SEG_LEN)
531 return false;
532
533 if (seq_id == 0) {
534 recv_msg->seq_id = seq_id;
535 recv_msg->msg_id = msg_id;
536 } else {
537 if ((seq_id != recv_msg->seq_id + 1) ||
538 msg_id != recv_msg->msg_id) {
539 recv_msg->seq_id = 0;
540 return false;
541 }
542 recv_msg->seq_id = seq_id;
543 }
544
545 return true;
546}
547
548
549
550
551
552
553
554static void hinic_mgmt_recv_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
555 struct hinic_recv_msg *recv_msg,
556 void *param)
557{
558 void *buf_out = recv_msg->buf_out;
559 u16 out_size = 0;
560
561 switch (recv_msg->mod) {
562 case HINIC_MOD_COMM:
563 hinic_comm_async_event_handle(pf_to_mgmt->hwdev,
564 recv_msg->cmd, recv_msg->msg,
565 recv_msg->msg_len,
566 buf_out, &out_size);
567 break;
568 case HINIC_MOD_L2NIC:
569 hinic_l2nic_async_event_handle(pf_to_mgmt->hwdev, param,
570 recv_msg->cmd, recv_msg->msg,
571 recv_msg->msg_len,
572 buf_out, &out_size);
573 break;
574 case HINIC_MOD_HILINK:
575 hinic_hilink_async_event_handle(pf_to_mgmt->hwdev,
576 recv_msg->cmd, recv_msg->msg,
577 recv_msg->msg_len,
578 buf_out, &out_size);
579 break;
580 default:
581 PMD_DRV_LOG(ERR, "No handler, mod: %d", recv_msg->mod);
582 break;
583 }
584
585 if (!recv_msg->async_mgmt_to_pf) {
586 if (!out_size)
587 out_size = BUF_OUT_DEFAULT_SIZE;
588
589
590 (void)send_msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod,
591 recv_msg->cmd, buf_out, out_size,
592 HINIC_MSG_RESPONSE,
593 recv_msg->msg_id);
594 }
595}
596
597
598
599
600
601
602
603
604
605
606static int recv_mgmt_msg_handler(struct hinic_msg_pf_to_mgmt *pf_to_mgmt,
607 u8 *header, struct hinic_recv_msg *recv_msg,
608 void *param)
609{
610 u64 msg_header = *((u64 *)header);
611 void *msg_body = header + sizeof(msg_header);
612 u8 *dest_msg;
613 u8 seq_id, seq_len;
614 u8 front_id;
615 u16 msg_id;
616
617 seq_id = HINIC_MSG_HEADER_GET(msg_header, SEQID);
618 seq_len = HINIC_MSG_HEADER_GET(msg_header, SEG_LEN);
619 front_id = recv_msg->seq_id;
620 msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID);
621
622 if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len, msg_id)) {
623 PMD_DRV_LOG(ERR,
624 "Mgmt msg sequence and segment check failed, "
625 "func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x "
626 "front msg_id: %d, cur msg_id: %d",
627 hinic_global_func_id(pf_to_mgmt->hwdev),
628 front_id, seq_id, seq_len, recv_msg->msg_id, msg_id);
629 return HINIC_ERROR;
630 }
631
632 dest_msg = (u8 *)recv_msg->msg + seq_id * HINIC_MSG_SEG_LEN;
633 memcpy(dest_msg, msg_body, seq_len);
634
635 if (!HINIC_MSG_HEADER_GET(msg_header, LAST))
636 return HINIC_ERROR;
637
638 recv_msg->cmd = HINIC_MSG_HEADER_GET(msg_header, CMD);
639 recv_msg->mod = HINIC_MSG_HEADER_GET(msg_header, MODULE);
640 recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(msg_header,
641 ASYNC_MGMT_TO_PF);
642 recv_msg->msg_len = HINIC_MSG_HEADER_GET(msg_header, MSG_LEN);
643 recv_msg->msg_id = HINIC_MSG_HEADER_GET(msg_header, MSG_ID);
644
645 if (HINIC_MSG_HEADER_GET(msg_header, DIRECTION) == HINIC_MSG_RESPONSE)
646 return HINIC_OK;
647
648 hinic_mgmt_recv_msg_handler(pf_to_mgmt, recv_msg, param);
649
650 return HINIC_ERROR;
651}
652
653
654
655
656
657
658
659
660
661
662static int hinic_mgmt_msg_aeqe_handler(void *hwdev, u8 *header,
663 __rte_unused u8 size, void *param)
664{
665 struct hinic_msg_pf_to_mgmt *pf_to_mgmt =
666 ((struct hinic_hwdev *)hwdev)->pf_to_mgmt;
667 struct hinic_recv_msg *recv_msg;
668
669 recv_msg = (HINIC_MSG_HEADER_GET(*(u64 *)header, DIRECTION) ==
670 HINIC_MSG_DIRECT_SEND) ?
671 &pf_to_mgmt->recv_msg_from_mgmt :
672 &pf_to_mgmt->recv_resp_msg_from_mgmt;
673
674 return recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg, param);
675}
676
677static int hinic_handle_aeqe(void *handle, enum hinic_aeq_type event,
678 u8 *data, u8 size, void *param)
679{
680 int rc = 0;
681
682 switch (event) {
683 case HINIC_MSG_FROM_MGMT_CPU:
684 rc = hinic_mgmt_msg_aeqe_handler(handle, data, size, param);
685 break;
686 case HINIC_MBX_FROM_FUNC:
687 rc = hinic_mbox_func_aeqe_handler(handle, data, size, param);
688 break;
689 default:
690 PMD_DRV_LOG(ERR, "Unknown event type: 0x%x, size: %d",
691 event, size);
692 rc = HINIC_ERROR;
693 break;
694 }
695
696 return rc;
697}
698
699
700
701
702
703
704
705
706
707
708int hinic_aeq_poll_msg(struct hinic_eq *eq, u32 timeout, void *param)
709{
710 struct hinic_aeq_elem *aeqe_pos;
711 enum hinic_aeq_type event;
712 u32 aeqe_desc = 0;
713 u16 i;
714 u8 size;
715 int done = HINIC_ERROR;
716 int err = -EFAULT;
717 unsigned long end;
718
719 for (i = 0; ((timeout == 0) && (i < eq->eq_len)) ||
720 ((timeout > 0) && (done != HINIC_OK) && (i < eq->eq_len)); i++) {
721 err = -EIO;
722 end = jiffies + msecs_to_jiffies(timeout);
723 do {
724 aeqe_pos = GET_CURR_AEQ_ELEM(eq);
725 rte_rmb();
726
727
728 aeqe_desc = be32_to_cpu(aeqe_pos->desc);
729
730
731
732
733 if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED)
734 != eq->wrapped) {
735 err = 0;
736 break;
737 }
738
739 if (timeout != 0)
740 usleep(1000);
741 } while (time_before(jiffies, end));
742
743 if (err != HINIC_OK)
744 break;
745
746 event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
747 if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
748 PMD_DRV_LOG(ERR, "AEQ sw event not support %d", event);
749 return -ENODEV;
750
751 } else {
752 size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
753 done = hinic_handle_aeqe(eq->hwdev, event,
754 aeqe_pos->aeqe_data,
755 size, param);
756 }
757
758 eq->cons_idx++;
759 if (eq->cons_idx == eq->eq_len) {
760 eq->cons_idx = 0;
761 eq->wrapped = !eq->wrapped;
762 }
763 }
764
765 eq_update_ci(eq);
766
767 return err;
768}
769
770int hinic_comm_pf_to_mgmt_init(struct hinic_hwdev *hwdev)
771{
772 int rc;
773
774
775 if (hinic_func_type(hwdev) == TYPE_VF)
776 return 0;
777
778 rc = hinic_pf_to_mgmt_init(hwdev);
779 if (rc)
780 return rc;
781
782 hwdev->pf_to_mgmt->rx_aeq = &hwdev->aeqs->aeq[HINIC_MGMT_RSP_AEQN];
783
784 return 0;
785}
786
787void hinic_comm_pf_to_mgmt_free(struct hinic_hwdev *hwdev)
788{
789
790 if (hinic_func_type(hwdev) == TYPE_VF)
791 return;
792
793 hinic_pf_to_mgmt_free(hwdev);
794}
795
796void hinic_dev_handle_aeq_event(struct hinic_hwdev *hwdev, void *param)
797{
798 struct hinic_eq *aeq = &hwdev->aeqs->aeq[0];
799
800
801 hinic_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx,
802 EQ_MSIX_RESEND_TIMER_CLEAR);
803 (void)hinic_aeq_poll_msg(aeq, 0, param);
804}
805