1
2
3
4
5#include <ethdev_driver.h>
6#include <rte_io.h>
7
8#include "hns3_ethdev.h"
9#include "hns3_regs.h"
10#include "hns3_logs.h"
11#include "hns3_intr.h"
12#include "hns3_rxtx.h"
13
14#define HNS3_CMD_CODE_OFFSET 2
15
16static const struct errno_respcode_map err_code_map[] = {
17 {0, 0},
18 {1, -EPERM},
19 {2, -ENOENT},
20 {5, -EIO},
21 {11, -EAGAIN},
22 {12, -ENOMEM},
23 {16, -EBUSY},
24 {22, -EINVAL},
25 {28, -ENOSPC},
26 {95, -EOPNOTSUPP},
27};
28
29static int
30hns3_resp_to_errno(uint16_t resp_code)
31{
32 uint32_t i, num;
33
34 num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
35 for (i = 0; i < num; i++) {
36 if (err_code_map[i].resp_code == resp_code)
37 return err_code_map[i].err_no;
38 }
39
40 return -EIO;
41}
42
43static void
44hns3_mbx_proc_timeout(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
45{
46 if (hw->mbx_resp.matching_scheme ==
47 HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) {
48 hw->mbx_resp.lost++;
49 hns3_err(hw,
50 "VF could not get mbx(%u,%u) head(%u) tail(%u) "
51 "lost(%u) from PF",
52 code, subcode, hw->mbx_resp.head, hw->mbx_resp.tail,
53 hw->mbx_resp.lost);
54 return;
55 }
56
57 hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode);
58}
59
60static int
61hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
62 uint8_t *resp_data, uint16_t resp_len)
63{
64#define HNS3_MAX_RETRY_US 500000
65#define HNS3_WAIT_RESP_US 100
66 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
67 struct hns3_mbx_resp_status *mbx_resp;
68 uint32_t wait_time = 0;
69 bool received;
70
71 if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
72 hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
73 resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
74 return -EINVAL;
75 }
76
77 while (wait_time < HNS3_MAX_RETRY_US) {
78 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
79 hns3_err(hw, "Don't wait for mbx respone because of "
80 "disable_cmd");
81 return -EBUSY;
82 }
83
84 if (is_reset_pending(hns)) {
85 hw->mbx_resp.req_msg_data = 0;
86 hns3_err(hw, "Don't wait for mbx respone because of "
87 "reset pending");
88 return -EIO;
89 }
90
91 hns3_dev_handle_mbx_msg(hw);
92 rte_delay_us(HNS3_WAIT_RESP_US);
93
94 if (hw->mbx_resp.matching_scheme ==
95 HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL)
96 received = (hw->mbx_resp.head ==
97 hw->mbx_resp.tail + hw->mbx_resp.lost);
98 else
99 received = hw->mbx_resp.received_match_resp;
100 if (received)
101 break;
102
103 wait_time += HNS3_WAIT_RESP_US;
104 }
105 hw->mbx_resp.req_msg_data = 0;
106 if (wait_time >= HNS3_MAX_RETRY_US) {
107 hns3_mbx_proc_timeout(hw, code, subcode);
108 return -ETIME;
109 }
110 rte_io_rmb();
111 mbx_resp = &hw->mbx_resp;
112
113 if (mbx_resp->resp_status)
114 return mbx_resp->resp_status;
115
116 if (resp_data)
117 memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
118
119 return 0;
120}
121
122static void
123hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
124{
125
126
127
128
129
130
131
132 hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
133 hw->mbx_resp.head++;
134
135
136 hw->mbx_resp.match_id++;
137 if (hw->mbx_resp.match_id == 0)
138 hw->mbx_resp.match_id = 1;
139 hw->mbx_resp.received_match_resp = false;
140
141 hw->mbx_resp.resp_status = 0;
142 memset(hw->mbx_resp.additional_info, 0, HNS3_MBX_MAX_RESP_DATA_SIZE);
143}
144
145int
146hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
147 const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
148 uint8_t *resp_data, uint16_t resp_len)
149{
150 struct hns3_mbx_vf_to_pf_cmd *req;
151 struct hns3_cmd_desc desc;
152 bool is_ring_vector_msg;
153 int offset;
154 int ret;
155
156 req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
157
158
159 if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
160 hns3_err(hw,
161 "VF send mbx msg fail, msg len %u exceeds max payload len %d",
162 msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
163 return -EINVAL;
164 }
165
166 hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
167 req->msg[0] = code;
168 is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
169 (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
170 (code == HNS3_MBX_GET_RING_VECTOR_MAP);
171 if (!is_ring_vector_msg)
172 req->msg[1] = subcode;
173 if (msg_data) {
174 offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
175 memcpy(&req->msg[offset], msg_data, msg_len);
176 }
177
178
179 if (need_resp) {
180 req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
181 rte_spinlock_lock(&hw->mbx_resp.lock);
182 hns3_mbx_prepare_resp(hw, code, subcode);
183 req->match_id = hw->mbx_resp.match_id;
184 ret = hns3_cmd_send(hw, &desc, 1);
185 if (ret) {
186 hw->mbx_resp.head--;
187 rte_spinlock_unlock(&hw->mbx_resp.lock);
188 hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
189 ret);
190 return ret;
191 }
192
193 ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
194 rte_spinlock_unlock(&hw->mbx_resp.lock);
195 } else {
196
197 ret = hns3_cmd_send(hw, &desc, 1);
198 if (ret) {
199 hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
200 ret);
201 return ret;
202 }
203 }
204
205 return ret;
206}
207
208static bool
209hns3_cmd_crq_empty(struct hns3_hw *hw)
210{
211 uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
212
213 return tail == hw->cmq.crq.next_to_use;
214}
215
216static void
217hns3vf_handle_link_change_event(struct hns3_hw *hw,
218 struct hns3_mbx_pf_to_vf_cmd *req)
219{
220 uint8_t link_status, link_duplex;
221 uint16_t *msg_q = req->msg;
222 uint8_t support_push_lsc;
223 uint32_t link_speed;
224
225 memcpy(&link_speed, &msg_q[2], sizeof(link_speed));
226 link_status = rte_le_to_cpu_16(msg_q[1]);
227 link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
228 hns3vf_update_link_status(hw, link_status, link_speed,
229 link_duplex);
230 support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u;
231 hns3vf_update_push_lsc_cap(hw, support_push_lsc);
232}
233
234static void
235hns3_handle_asserting_reset(struct hns3_hw *hw,
236 struct hns3_mbx_pf_to_vf_cmd *req)
237{
238 enum hns3_reset_level reset_level;
239 uint16_t *msg_q = req->msg;
240
241
242
243
244
245
246
247 reset_level = rte_le_to_cpu_16(msg_q[1]);
248 hns3_atomic_set_bit(reset_level, &hw->reset.pending);
249
250 hns3_warn(hw, "PF inform reset level %d", reset_level);
251 hw->reset.stats.request_cnt++;
252 hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
253}
254
255
256
257
258
259
260
261
262static void
263hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
264{
265 struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
266 uint32_t tail = resp->tail + 1;
267
268 if (tail > resp->head)
269 tail = resp->head;
270 if (resp->req_msg_data != resp_msg) {
271 if (resp->lost)
272 resp->lost--;
273 hns3_warn(hw, "Received a mismatched response req_msg(%x) "
274 "resp_msg(%x) head(%u) tail(%u) lost(%u)",
275 resp->req_msg_data, resp_msg, resp->head, tail,
276 resp->lost);
277 } else if (tail + resp->lost > resp->head) {
278 resp->lost--;
279 hns3_warn(hw, "Received a new response again resp_msg(%x) "
280 "head(%u) tail(%u) lost(%u)", resp_msg,
281 resp->head, tail, resp->lost);
282 }
283 rte_io_wmb();
284 resp->tail = tail;
285}
286
287static void
288hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
289{
290 struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
291 uint32_t msg_data;
292
293 if (req->match_id != 0) {
294
295
296
297
298
299 if (resp->matching_scheme !=
300 HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID) {
301 resp->matching_scheme =
302 HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID;
303 hns3_info(hw, "detect mailbox support match id!");
304 }
305 if (req->match_id == resp->match_id) {
306 resp->resp_status = hns3_resp_to_errno(req->msg[3]);
307 memcpy(resp->additional_info, &req->msg[4],
308 HNS3_MBX_MAX_RESP_DATA_SIZE);
309 rte_io_wmb();
310 resp->received_match_resp = true;
311 }
312 return;
313 }
314
315
316
317
318
319
320 resp->resp_status = hns3_resp_to_errno(req->msg[3]);
321 memcpy(resp->additional_info, &req->msg[4],
322 HNS3_MBX_MAX_RESP_DATA_SIZE);
323 msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
324 hns3_update_resp_position(hw, msg_data);
325}
326
327static void
328hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
329{
330 switch (link_fail_code) {
331 case HNS3_MBX_LF_NORMAL:
332 break;
333 case HNS3_MBX_LF_REF_CLOCK_LOST:
334 hns3_warn(hw, "Reference clock lost!");
335 break;
336 case HNS3_MBX_LF_XSFP_TX_DISABLE:
337 hns3_warn(hw, "SFP tx is disabled!");
338 break;
339 case HNS3_MBX_LF_XSFP_ABSENT:
340 hns3_warn(hw, "SFP is absent!");
341 break;
342 default:
343 hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
344 break;
345 }
346}
347
348static void
349hns3pf_handle_link_change_event(struct hns3_hw *hw,
350 struct hns3_mbx_vf_to_pf_cmd *req)
351{
352#define LINK_STATUS_OFFSET 1
353#define LINK_FAIL_CODE_OFFSET 2
354
355 if (!req->msg[LINK_STATUS_OFFSET])
356 hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
357
358 hns3_update_linkstatus_and_event(hw, true);
359}
360
361static void
362hns3_update_port_base_vlan_info(struct hns3_hw *hw,
363 struct hns3_mbx_pf_to_vf_cmd *req)
364{
365#define PVID_STATE_OFFSET 1
366 uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
367 HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
368
369
370
371
372
373
374
375
376
377
378
379 if (hw->port_base_vlan_cfg.state != new_pvid_state) {
380 hw->port_base_vlan_cfg.state = new_pvid_state;
381 hns3_update_all_queues_pvid_proc_en(hw);
382 }
383}
384
385static void
386hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
387{
388 if (!promisc_en) {
389
390
391
392
393 hns3_warn(hw, "Promisc mode will be closed by host for being "
394 "untrusted.");
395 hw->data->promiscuous = 0;
396 hw->data->all_multicast = 0;
397 }
398}
399
400static void
401hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw)
402{
403 struct hns3_cmq_ring *crq = &hw->cmq.crq;
404 struct hns3_mbx_pf_to_vf_cmd *req;
405 struct hns3_cmd_desc *desc;
406 uint32_t tail, next_to_use;
407 uint8_t opcode;
408 uint16_t flag;
409
410 tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
411 next_to_use = crq->next_to_use;
412 while (next_to_use != tail) {
413 desc = &crq->desc[next_to_use];
414 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
415 opcode = req->msg[0] & 0xff;
416
417 flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag);
418 if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))
419 goto scan_next;
420
421 if (crq->desc[next_to_use].opcode == 0)
422 goto scan_next;
423
424 if (opcode == HNS3_MBX_PF_VF_RESP) {
425 hns3_handle_mbx_response(hw, req);
426
427
428
429
430 crq->desc[crq->next_to_use].opcode = 0;
431 }
432
433scan_next:
434 next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num;
435 }
436}
437
438void
439hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
440{
441 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
442 struct hns3_cmq_ring *crq = &hw->cmq.crq;
443 struct hns3_mbx_pf_to_vf_cmd *req;
444 struct hns3_cmd_desc *desc;
445 bool handle_out;
446 uint8_t opcode;
447 uint16_t flag;
448
449 rte_spinlock_lock(&hw->cmq.crq.lock);
450
451 handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY ||
452 !rte_thread_is_intr()) && hns->is_vf;
453 if (handle_out) {
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470 hns3_handle_mbx_msg_out_intr(hw);
471 rte_spinlock_unlock(&hw->cmq.crq.lock);
472 return;
473 }
474
475 while (!hns3_cmd_crq_empty(hw)) {
476 if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
477 rte_spinlock_unlock(&hw->cmq.crq.lock);
478 return;
479 }
480
481 desc = &crq->desc[crq->next_to_use];
482 req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
483 opcode = req->msg[0] & 0xff;
484
485 flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
486 if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
487 hns3_warn(hw,
488 "dropped invalid mailbox message, code = %u",
489 opcode);
490
491
492 crq->desc[crq->next_to_use].flag = 0;
493 hns3_mbx_ring_ptr_move_crq(crq);
494 continue;
495 }
496
497 handle_out = hns->is_vf && desc->opcode == 0;
498 if (handle_out) {
499
500 crq->desc[crq->next_to_use].flag = 0;
501 hns3_mbx_ring_ptr_move_crq(crq);
502 continue;
503 }
504
505 switch (opcode) {
506 case HNS3_MBX_PF_VF_RESP:
507 hns3_handle_mbx_response(hw, req);
508 break;
509 case HNS3_MBX_LINK_STAT_CHANGE:
510 hns3vf_handle_link_change_event(hw, req);
511 break;
512 case HNS3_MBX_ASSERTING_RESET:
513 hns3_handle_asserting_reset(hw, req);
514 break;
515 case HNS3_MBX_PUSH_LINK_STATUS:
516
517
518
519
520
521
522 hns3pf_handle_link_change_event(hw,
523 (struct hns3_mbx_vf_to_pf_cmd *)req);
524 break;
525 case HNS3_MBX_PUSH_VLAN_INFO:
526
527
528
529
530
531 hns3_update_port_base_vlan_info(hw, req);
532 break;
533 case HNS3_MBX_PUSH_PROMISC_INFO:
534
535
536
537
538
539 hns3_handle_promisc_info(hw, req->msg[1]);
540 break;
541 default:
542 hns3_err(hw, "received unsupported(%u) mbx msg",
543 opcode);
544 break;
545 }
546
547 crq->desc[crq->next_to_use].flag = 0;
548 hns3_mbx_ring_ptr_move_crq(crq);
549 }
550
551
552 hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
553
554 rte_spinlock_unlock(&hw->cmq.crq.lock);
555}
556