dpdk/drivers/net/hns3/hns3_mbx.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018-2021 HiSilicon Limited.
   3 */
   4
   5#include <ethdev_driver.h>
   6#include <rte_io.h>
   7
   8#include "hns3_ethdev.h"
   9#include "hns3_regs.h"
  10#include "hns3_logs.h"
  11#include "hns3_intr.h"
  12#include "hns3_rxtx.h"
  13
  14#define HNS3_CMD_CODE_OFFSET            2
  15
  16static const struct errno_respcode_map err_code_map[] = {
  17        {0, 0},
  18        {1, -EPERM},
  19        {2, -ENOENT},
  20        {5, -EIO},
  21        {11, -EAGAIN},
  22        {12, -ENOMEM},
  23        {16, -EBUSY},
  24        {22, -EINVAL},
  25        {28, -ENOSPC},
  26        {95, -EOPNOTSUPP},
  27};
  28
  29static int
  30hns3_resp_to_errno(uint16_t resp_code)
  31{
  32        uint32_t i, num;
  33
  34        num = sizeof(err_code_map) / sizeof(struct errno_respcode_map);
  35        for (i = 0; i < num; i++) {
  36                if (err_code_map[i].resp_code == resp_code)
  37                        return err_code_map[i].err_no;
  38        }
  39
  40        return -EIO;
  41}
  42
  43static void
  44hns3_mbx_proc_timeout(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
  45{
  46        if (hw->mbx_resp.matching_scheme ==
  47            HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL) {
  48                hw->mbx_resp.lost++;
  49                hns3_err(hw,
  50                         "VF could not get mbx(%u,%u) head(%u) tail(%u) "
  51                         "lost(%u) from PF",
  52                         code, subcode, hw->mbx_resp.head, hw->mbx_resp.tail,
  53                         hw->mbx_resp.lost);
  54                return;
  55        }
  56
  57        hns3_err(hw, "VF could not get mbx(%u,%u) from PF", code, subcode);
  58}
  59
  60static int
  61hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
  62                  uint8_t *resp_data, uint16_t resp_len)
  63{
  64#define HNS3_MAX_RETRY_US       500000
  65#define HNS3_WAIT_RESP_US       100
  66        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
  67        struct hns3_mbx_resp_status *mbx_resp;
  68        uint32_t wait_time = 0;
  69        bool received;
  70
  71        if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
  72                hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
  73                         resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
  74                return -EINVAL;
  75        }
  76
  77        while (wait_time < HNS3_MAX_RETRY_US) {
  78                if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
  79                        hns3_err(hw, "Don't wait for mbx respone because of "
  80                                 "disable_cmd");
  81                        return -EBUSY;
  82                }
  83
  84                if (is_reset_pending(hns)) {
  85                        hw->mbx_resp.req_msg_data = 0;
  86                        hns3_err(hw, "Don't wait for mbx respone because of "
  87                                 "reset pending");
  88                        return -EIO;
  89                }
  90
  91                hns3_dev_handle_mbx_msg(hw);
  92                rte_delay_us(HNS3_WAIT_RESP_US);
  93
  94                if (hw->mbx_resp.matching_scheme ==
  95                    HNS3_MBX_RESP_MATCHING_SCHEME_OF_ORIGINAL)
  96                        received = (hw->mbx_resp.head ==
  97                                    hw->mbx_resp.tail + hw->mbx_resp.lost);
  98                else
  99                        received = hw->mbx_resp.received_match_resp;
 100                if (received)
 101                        break;
 102
 103                wait_time += HNS3_WAIT_RESP_US;
 104        }
 105        hw->mbx_resp.req_msg_data = 0;
 106        if (wait_time >= HNS3_MAX_RETRY_US) {
 107                hns3_mbx_proc_timeout(hw, code, subcode);
 108                return -ETIME;
 109        }
 110        rte_io_rmb();
 111        mbx_resp = &hw->mbx_resp;
 112
 113        if (mbx_resp->resp_status)
 114                return mbx_resp->resp_status;
 115
 116        if (resp_data)
 117                memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
 118
 119        return 0;
 120}
 121
 122static void
 123hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode)
 124{
 125        /*
 126         * Init both matching scheme fields because we may not know the exact
 127         * scheme will be used when in the initial phase.
 128         *
 129         * Also, there are OK to init both matching scheme fields even though
 130         * we get the exact scheme which is used.
 131         */
 132        hw->mbx_resp.req_msg_data = (uint32_t)code << 16 | subcode;
 133        hw->mbx_resp.head++;
 134
 135        /* Update match_id and ensure the value of match_id is not zero */
 136        hw->mbx_resp.match_id++;
 137        if (hw->mbx_resp.match_id == 0)
 138                hw->mbx_resp.match_id = 1;
 139        hw->mbx_resp.received_match_resp = false;
 140
 141        hw->mbx_resp.resp_status = 0;
 142        memset(hw->mbx_resp.additional_info, 0, HNS3_MBX_MAX_RESP_DATA_SIZE);
 143}
 144
 145int
 146hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode,
 147                  const uint8_t *msg_data, uint8_t msg_len, bool need_resp,
 148                  uint8_t *resp_data, uint16_t resp_len)
 149{
 150        struct hns3_mbx_vf_to_pf_cmd *req;
 151        struct hns3_cmd_desc desc;
 152        bool is_ring_vector_msg;
 153        int offset;
 154        int ret;
 155
 156        req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
 157
 158        /* first two bytes are reserved for code & subcode */
 159        if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
 160                hns3_err(hw,
 161                         "VF send mbx msg fail, msg len %u exceeds max payload len %d",
 162                         msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
 163                return -EINVAL;
 164        }
 165
 166        hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
 167        req->msg[0] = code;
 168        is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) ||
 169                             (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) ||
 170                             (code == HNS3_MBX_GET_RING_VECTOR_MAP);
 171        if (!is_ring_vector_msg)
 172                req->msg[1] = subcode;
 173        if (msg_data) {
 174                offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET;
 175                memcpy(&req->msg[offset], msg_data, msg_len);
 176        }
 177
 178        /* synchronous send */
 179        if (need_resp) {
 180                req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT;
 181                rte_spinlock_lock(&hw->mbx_resp.lock);
 182                hns3_mbx_prepare_resp(hw, code, subcode);
 183                req->match_id = hw->mbx_resp.match_id;
 184                ret = hns3_cmd_send(hw, &desc, 1);
 185                if (ret) {
 186                        hw->mbx_resp.head--;
 187                        rte_spinlock_unlock(&hw->mbx_resp.lock);
 188                        hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
 189                                 ret);
 190                        return ret;
 191                }
 192
 193                ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len);
 194                rte_spinlock_unlock(&hw->mbx_resp.lock);
 195        } else {
 196                /* asynchronous send */
 197                ret = hns3_cmd_send(hw, &desc, 1);
 198                if (ret) {
 199                        hns3_err(hw, "VF failed(=%d) to send mbx message to PF",
 200                                 ret);
 201                        return ret;
 202                }
 203        }
 204
 205        return ret;
 206}
 207
 208static bool
 209hns3_cmd_crq_empty(struct hns3_hw *hw)
 210{
 211        uint32_t tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
 212
 213        return tail == hw->cmq.crq.next_to_use;
 214}
 215
 216static void
 217hns3vf_handle_link_change_event(struct hns3_hw *hw,
 218                                struct hns3_mbx_pf_to_vf_cmd *req)
 219{
 220        uint8_t link_status, link_duplex;
 221        uint16_t *msg_q = req->msg;
 222        uint8_t support_push_lsc;
 223        uint32_t link_speed;
 224
 225        memcpy(&link_speed, &msg_q[2], sizeof(link_speed));
 226        link_status = rte_le_to_cpu_16(msg_q[1]);
 227        link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]);
 228        hns3vf_update_link_status(hw, link_status, link_speed,
 229                                  link_duplex);
 230        support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u;
 231        hns3vf_update_push_lsc_cap(hw, support_push_lsc);
 232}
 233
 234static void
 235hns3_handle_asserting_reset(struct hns3_hw *hw,
 236                            struct hns3_mbx_pf_to_vf_cmd *req)
 237{
 238        enum hns3_reset_level reset_level;
 239        uint16_t *msg_q = req->msg;
 240
 241        /*
 242         * PF has asserted reset hence VF should go in pending
 243         * state and poll for the hardware reset status till it
 244         * has been completely reset. After this stack should
 245         * eventually be re-initialized.
 246         */
 247        reset_level = rte_le_to_cpu_16(msg_q[1]);
 248        hns3_atomic_set_bit(reset_level, &hw->reset.pending);
 249
 250        hns3_warn(hw, "PF inform reset level %d", reset_level);
 251        hw->reset.stats.request_cnt++;
 252        hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
 253}
 254
 255/*
 256 * Case1: receive response after timeout, req_msg_data
 257 *        is 0, not equal resp_msg, do lost--
 258 * Case2: receive last response during new send_mbx_msg,
 259 *        req_msg_data is different with resp_msg, let
 260 *        lost--, continue to wait for response.
 261 */
 262static void
 263hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
 264{
 265        struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
 266        uint32_t tail = resp->tail + 1;
 267
 268        if (tail > resp->head)
 269                tail = resp->head;
 270        if (resp->req_msg_data != resp_msg) {
 271                if (resp->lost)
 272                        resp->lost--;
 273                hns3_warn(hw, "Received a mismatched response req_msg(%x) "
 274                          "resp_msg(%x) head(%u) tail(%u) lost(%u)",
 275                          resp->req_msg_data, resp_msg, resp->head, tail,
 276                          resp->lost);
 277        } else if (tail + resp->lost > resp->head) {
 278                resp->lost--;
 279                hns3_warn(hw, "Received a new response again resp_msg(%x) "
 280                          "head(%u) tail(%u) lost(%u)", resp_msg,
 281                          resp->head, tail, resp->lost);
 282        }
 283        rte_io_wmb();
 284        resp->tail = tail;
 285}
 286
 287static void
 288hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req)
 289{
 290        struct hns3_mbx_resp_status *resp = &hw->mbx_resp;
 291        uint32_t msg_data;
 292
 293        if (req->match_id != 0) {
 294                /*
 295                 * If match_id is not zero, it means PF support copy request's
 296                 * match_id to its response. So VF could use the match_id
 297                 * to match the request.
 298                 */
 299                if (resp->matching_scheme !=
 300                    HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID) {
 301                        resp->matching_scheme =
 302                                HNS3_MBX_RESP_MATCHING_SCHEME_OF_MATCH_ID;
 303                        hns3_info(hw, "detect mailbox support match id!");
 304                }
 305                if (req->match_id == resp->match_id) {
 306                        resp->resp_status = hns3_resp_to_errno(req->msg[3]);
 307                        memcpy(resp->additional_info, &req->msg[4],
 308                               HNS3_MBX_MAX_RESP_DATA_SIZE);
 309                        rte_io_wmb();
 310                        resp->received_match_resp = true;
 311                }
 312                return;
 313        }
 314
 315        /*
 316         * If the below instructions can be executed, it means PF does not
 317         * support copy request's match_id to its response. So VF follows the
 318         * original scheme to process.
 319         */
 320        resp->resp_status = hns3_resp_to_errno(req->msg[3]);
 321        memcpy(resp->additional_info, &req->msg[4],
 322               HNS3_MBX_MAX_RESP_DATA_SIZE);
 323        msg_data = (uint32_t)req->msg[1] << 16 | req->msg[2];
 324        hns3_update_resp_position(hw, msg_data);
 325}
 326
 327static void
 328hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
 329{
 330        switch (link_fail_code) {
 331        case HNS3_MBX_LF_NORMAL:
 332                break;
 333        case HNS3_MBX_LF_REF_CLOCK_LOST:
 334                hns3_warn(hw, "Reference clock lost!");
 335                break;
 336        case HNS3_MBX_LF_XSFP_TX_DISABLE:
 337                hns3_warn(hw, "SFP tx is disabled!");
 338                break;
 339        case HNS3_MBX_LF_XSFP_ABSENT:
 340                hns3_warn(hw, "SFP is absent!");
 341                break;
 342        default:
 343                hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
 344                break;
 345        }
 346}
 347
 348static void
 349hns3pf_handle_link_change_event(struct hns3_hw *hw,
 350                                struct hns3_mbx_vf_to_pf_cmd *req)
 351{
 352#define LINK_STATUS_OFFSET     1
 353#define LINK_FAIL_CODE_OFFSET  2
 354
 355        if (!req->msg[LINK_STATUS_OFFSET])
 356                hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
 357
 358        hns3_update_linkstatus_and_event(hw, true);
 359}
 360
 361static void
 362hns3_update_port_base_vlan_info(struct hns3_hw *hw,
 363                                struct hns3_mbx_pf_to_vf_cmd *req)
 364{
 365#define PVID_STATE_OFFSET       1
 366        uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ?
 367                HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
 368        /*
 369         * Currently, hardware doesn't support more than two layers VLAN offload
 370         * based on hns3 network engine, which would cause packets loss or wrong
 371         * packets for these types of packets. If the hns3 PF kernel ethdev
 372         * driver sets the PVID for VF device after initialization of the
 373         * related VF device, the PF driver will notify VF driver to update the
 374         * PVID configuration state. The VF driver will update the PVID
 375         * configuration state immediately to ensure that the VLAN process in Tx
 376         * and Rx is correct. But in the window period of this state transition,
 377         * packets loss or packets with wrong VLAN may occur.
 378         */
 379        if (hw->port_base_vlan_cfg.state != new_pvid_state) {
 380                hw->port_base_vlan_cfg.state = new_pvid_state;
 381                hns3_update_all_queues_pvid_proc_en(hw);
 382        }
 383}
 384
 385static void
 386hns3_handle_promisc_info(struct hns3_hw *hw, uint16_t promisc_en)
 387{
 388        if (!promisc_en) {
 389                /*
 390                 * When promisc/allmulti mode is closed by the hns3 PF kernel
 391                 * ethdev driver for untrusted, modify VF's related status.
 392                 */
 393                hns3_warn(hw, "Promisc mode will be closed by host for being "
 394                              "untrusted.");
 395                hw->data->promiscuous = 0;
 396                hw->data->all_multicast = 0;
 397        }
 398}
 399
 400static void
 401hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw)
 402{
 403        struct hns3_cmq_ring *crq = &hw->cmq.crq;
 404        struct hns3_mbx_pf_to_vf_cmd *req;
 405        struct hns3_cmd_desc *desc;
 406        uint32_t tail, next_to_use;
 407        uint8_t opcode;
 408        uint16_t flag;
 409
 410        tail = hns3_read_dev(hw, HNS3_CMDQ_RX_TAIL_REG);
 411        next_to_use = crq->next_to_use;
 412        while (next_to_use != tail) {
 413                desc = &crq->desc[next_to_use];
 414                req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
 415                opcode = req->msg[0] & 0xff;
 416
 417                flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag);
 418                if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))
 419                        goto scan_next;
 420
 421                if (crq->desc[next_to_use].opcode == 0)
 422                        goto scan_next;
 423
 424                if (opcode == HNS3_MBX_PF_VF_RESP) {
 425                        hns3_handle_mbx_response(hw, req);
 426                        /*
 427                         * Clear opcode to inform intr thread don't process
 428                         * again.
 429                         */
 430                        crq->desc[crq->next_to_use].opcode = 0;
 431                }
 432
 433scan_next:
 434                next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num;
 435        }
 436}
 437
 438void
 439hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
 440{
 441        struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
 442        struct hns3_cmq_ring *crq = &hw->cmq.crq;
 443        struct hns3_mbx_pf_to_vf_cmd *req;
 444        struct hns3_cmd_desc *desc;
 445        bool handle_out;
 446        uint8_t opcode;
 447        uint16_t flag;
 448
 449        rte_spinlock_lock(&hw->cmq.crq.lock);
 450
 451        handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY ||
 452                      !rte_thread_is_intr()) && hns->is_vf;
 453        if (handle_out) {
 454                /*
 455                 * Currently, any threads in the primary and secondary processes
 456                 * could send mailbox sync request, so it will need to process
 457                 * the crq message (which is the HNS3_MBX_PF_VF_RESP) in there
 458                 * own thread context. It may also process other messages
 459                 * because it uses the policy of processing all pending messages
 460                 * at once.
 461                 * But some messages such as HNS3_MBX_PUSH_LINK_STATUS could
 462                 * only process within the intr thread in primary process,
 463                 * otherwise it may lead to report lsc event in secondary
 464                 * process.
 465                 * So the threads other than intr thread in primary process
 466                 * could only process HNS3_MBX_PF_VF_RESP message, if the
 467                 * message processed, its opcode will rewrite with zero, then
 468                 * the intr thread in primary process will not process again.
 469                 */
 470                hns3_handle_mbx_msg_out_intr(hw);
 471                rte_spinlock_unlock(&hw->cmq.crq.lock);
 472                return;
 473        }
 474
 475        while (!hns3_cmd_crq_empty(hw)) {
 476                if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
 477                        rte_spinlock_unlock(&hw->cmq.crq.lock);
 478                        return;
 479                }
 480
 481                desc = &crq->desc[crq->next_to_use];
 482                req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data;
 483                opcode = req->msg[0] & 0xff;
 484
 485                flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
 486                if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
 487                        hns3_warn(hw,
 488                                  "dropped invalid mailbox message, code = %u",
 489                                  opcode);
 490
 491                        /* dropping/not processing this invalid message */
 492                        crq->desc[crq->next_to_use].flag = 0;
 493                        hns3_mbx_ring_ptr_move_crq(crq);
 494                        continue;
 495                }
 496
 497                handle_out = hns->is_vf && desc->opcode == 0;
 498                if (handle_out) {
 499                        /* Message already processed by other thread */
 500                        crq->desc[crq->next_to_use].flag = 0;
 501                        hns3_mbx_ring_ptr_move_crq(crq);
 502                        continue;
 503                }
 504
 505                switch (opcode) {
 506                case HNS3_MBX_PF_VF_RESP:
 507                        hns3_handle_mbx_response(hw, req);
 508                        break;
 509                case HNS3_MBX_LINK_STAT_CHANGE:
 510                        hns3vf_handle_link_change_event(hw, req);
 511                        break;
 512                case HNS3_MBX_ASSERTING_RESET:
 513                        hns3_handle_asserting_reset(hw, req);
 514                        break;
 515                case HNS3_MBX_PUSH_LINK_STATUS:
 516                        /*
 517                         * This message is reported by the firmware and is
 518                         * reported in 'struct hns3_mbx_vf_to_pf_cmd' format.
 519                         * Therefore, we should cast the req variable to
 520                         * 'struct hns3_mbx_vf_to_pf_cmd' and then process it.
 521                         */
 522                        hns3pf_handle_link_change_event(hw,
 523                                (struct hns3_mbx_vf_to_pf_cmd *)req);
 524                        break;
 525                case HNS3_MBX_PUSH_VLAN_INFO:
 526                        /*
 527                         * When the PVID configuration status of VF device is
 528                         * changed by the hns3 PF kernel driver, VF driver will
 529                         * receive this mailbox message from PF driver.
 530                         */
 531                        hns3_update_port_base_vlan_info(hw, req);
 532                        break;
 533                case HNS3_MBX_PUSH_PROMISC_INFO:
 534                        /*
 535                         * When the trust status of VF device changed by the
 536                         * hns3 PF kernel driver, VF driver will receive this
 537                         * mailbox message from PF driver.
 538                         */
 539                        hns3_handle_promisc_info(hw, req->msg[1]);
 540                        break;
 541                default:
 542                        hns3_err(hw, "received unsupported(%u) mbx msg",
 543                                 opcode);
 544                        break;
 545                }
 546
 547                crq->desc[crq->next_to_use].flag = 0;
 548                hns3_mbx_ring_ptr_move_crq(crq);
 549        }
 550
 551        /* Write back CMDQ_RQ header pointer, IMP need this pointer */
 552        hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use);
 553
 554        rte_spinlock_unlock(&hw->cmq.crq.lock);
 555}
 556