linux/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2// Copyright (c) 2016-2017 Hisilicon Limited.
   3
   4#include <linux/etherdevice.h>
   5#include <linux/iopoll.h>
   6#include <net/rtnetlink.h>
   7#include "hclgevf_cmd.h"
   8#include "hclgevf_main.h"
   9#include "hclge_mbx.h"
  10#include "hnae3.h"
  11
  12#define HCLGEVF_NAME    "hclgevf"
  13
  14static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
  15static struct hnae3_ae_algo ae_algovf;
  16
  17static const struct pci_device_id ae_algovf_pci_tbl[] = {
  18        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
  19        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
  20        /* required last entry */
  21        {0, }
  22};
  23
  24static const u8 hclgevf_hash_key[] = {
  25        0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
  26        0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
  27        0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
  28        0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
  29        0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
  30};
  31
  32MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
  33
  34static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
  35                                         HCLGEVF_CMDQ_TX_ADDR_H_REG,
  36                                         HCLGEVF_CMDQ_TX_DEPTH_REG,
  37                                         HCLGEVF_CMDQ_TX_TAIL_REG,
  38                                         HCLGEVF_CMDQ_TX_HEAD_REG,
  39                                         HCLGEVF_CMDQ_RX_ADDR_L_REG,
  40                                         HCLGEVF_CMDQ_RX_ADDR_H_REG,
  41                                         HCLGEVF_CMDQ_RX_DEPTH_REG,
  42                                         HCLGEVF_CMDQ_RX_TAIL_REG,
  43                                         HCLGEVF_CMDQ_RX_HEAD_REG,
  44                                         HCLGEVF_VECTOR0_CMDQ_SRC_REG,
  45                                         HCLGEVF_CMDQ_INTR_STS_REG,
  46                                         HCLGEVF_CMDQ_INTR_EN_REG,
  47                                         HCLGEVF_CMDQ_INTR_GEN_REG};
  48
  49static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
  50                                           HCLGEVF_RST_ING,
  51                                           HCLGEVF_GRO_EN_REG};
  52
  53static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
  54                                         HCLGEVF_RING_RX_ADDR_H_REG,
  55                                         HCLGEVF_RING_RX_BD_NUM_REG,
  56                                         HCLGEVF_RING_RX_BD_LENGTH_REG,
  57                                         HCLGEVF_RING_RX_MERGE_EN_REG,
  58                                         HCLGEVF_RING_RX_TAIL_REG,
  59                                         HCLGEVF_RING_RX_HEAD_REG,
  60                                         HCLGEVF_RING_RX_FBD_NUM_REG,
  61                                         HCLGEVF_RING_RX_OFFSET_REG,
  62                                         HCLGEVF_RING_RX_FBD_OFFSET_REG,
  63                                         HCLGEVF_RING_RX_STASH_REG,
  64                                         HCLGEVF_RING_RX_BD_ERR_REG,
  65                                         HCLGEVF_RING_TX_ADDR_L_REG,
  66                                         HCLGEVF_RING_TX_ADDR_H_REG,
  67                                         HCLGEVF_RING_TX_BD_NUM_REG,
  68                                         HCLGEVF_RING_TX_PRIORITY_REG,
  69                                         HCLGEVF_RING_TX_TC_REG,
  70                                         HCLGEVF_RING_TX_MERGE_EN_REG,
  71                                         HCLGEVF_RING_TX_TAIL_REG,
  72                                         HCLGEVF_RING_TX_HEAD_REG,
  73                                         HCLGEVF_RING_TX_FBD_NUM_REG,
  74                                         HCLGEVF_RING_TX_OFFSET_REG,
  75                                         HCLGEVF_RING_TX_EBD_NUM_REG,
  76                                         HCLGEVF_RING_TX_EBD_OFFSET_REG,
  77                                         HCLGEVF_RING_TX_BD_ERR_REG,
  78                                         HCLGEVF_RING_EN_REG};
  79
  80static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
  81                                             HCLGEVF_TQP_INTR_GL0_REG,
  82                                             HCLGEVF_TQP_INTR_GL1_REG,
  83                                             HCLGEVF_TQP_INTR_GL2_REG,
  84                                             HCLGEVF_TQP_INTR_RL_REG};
  85
  86static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
  87        struct hnae3_handle *handle)
  88{
  89        if (!handle->client)
  90                return container_of(handle, struct hclgevf_dev, nic);
  91        else if (handle->client->type == HNAE3_CLIENT_ROCE)
  92                return container_of(handle, struct hclgevf_dev, roce);
  93        else
  94                return container_of(handle, struct hclgevf_dev, nic);
  95}
  96
  97static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
  98{
  99        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 100        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 101        struct hclgevf_desc desc;
 102        struct hclgevf_tqp *tqp;
 103        int status;
 104        int i;
 105
 106        for (i = 0; i < kinfo->num_tqps; i++) {
 107                tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
 108                hclgevf_cmd_setup_basic_desc(&desc,
 109                                             HCLGEVF_OPC_QUERY_RX_STATUS,
 110                                             true);
 111
 112                desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
 113                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 114                if (status) {
 115                        dev_err(&hdev->pdev->dev,
 116                                "Query tqp stat fail, status = %d,queue = %d\n",
 117                                status, i);
 118                        return status;
 119                }
 120                tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
 121                        le32_to_cpu(desc.data[1]);
 122
 123                hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
 124                                             true);
 125
 126                desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
 127                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 128                if (status) {
 129                        dev_err(&hdev->pdev->dev,
 130                                "Query tqp stat fail, status = %d,queue = %d\n",
 131                                status, i);
 132                        return status;
 133                }
 134                tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
 135                        le32_to_cpu(desc.data[1]);
 136        }
 137
 138        return 0;
 139}
 140
 141static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
 142{
 143        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 144        struct hclgevf_tqp *tqp;
 145        u64 *buff = data;
 146        int i;
 147
 148        for (i = 0; i < kinfo->num_tqps; i++) {
 149                tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
 150                *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
 151        }
 152        for (i = 0; i < kinfo->num_tqps; i++) {
 153                tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
 154                *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
 155        }
 156
 157        return buff;
 158}
 159
 160static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
 161{
 162        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 163
 164        return kinfo->num_tqps * 2;
 165}
 166
 167static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
 168{
 169        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 170        u8 *buff = data;
 171        int i = 0;
 172
 173        for (i = 0; i < kinfo->num_tqps; i++) {
 174                struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
 175                                                       struct hclgevf_tqp, q);
 176                snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
 177                         tqp->index);
 178                buff += ETH_GSTRING_LEN;
 179        }
 180
 181        for (i = 0; i < kinfo->num_tqps; i++) {
 182                struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
 183                                                       struct hclgevf_tqp, q);
 184                snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
 185                         tqp->index);
 186                buff += ETH_GSTRING_LEN;
 187        }
 188
 189        return buff;
 190}
 191
 192static void hclgevf_update_stats(struct hnae3_handle *handle,
 193                                 struct net_device_stats *net_stats)
 194{
 195        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 196        int status;
 197
 198        status = hclgevf_tqps_update_stats(handle);
 199        if (status)
 200                dev_err(&hdev->pdev->dev,
 201                        "VF update of TQPS stats fail, status = %d.\n",
 202                        status);
 203}
 204
 205static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
 206{
 207        if (strset == ETH_SS_TEST)
 208                return -EOPNOTSUPP;
 209        else if (strset == ETH_SS_STATS)
 210                return hclgevf_tqps_get_sset_count(handle, strset);
 211
 212        return 0;
 213}
 214
 215static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
 216                                u8 *data)
 217{
 218        u8 *p = (char *)data;
 219
 220        if (strset == ETH_SS_STATS)
 221                p = hclgevf_tqps_get_strings(handle, p);
 222}
 223
 224static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
 225{
 226        hclgevf_tqps_get_stats(handle, data);
 227}
 228
 229static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
 230{
 231        u8 resp_msg;
 232        int status;
 233
 234        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
 235                                      true, &resp_msg, sizeof(u8));
 236        if (status) {
 237                dev_err(&hdev->pdev->dev,
 238                        "VF request to get TC info from PF failed %d",
 239                        status);
 240                return status;
 241        }
 242
 243        hdev->hw_tc_map = resp_msg;
 244
 245        return 0;
 246}
 247
 248static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
 249{
 250        struct hnae3_handle *nic = &hdev->nic;
 251        u8 resp_msg;
 252        int ret;
 253
 254        ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
 255                                   HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,
 256                                   NULL, 0, true, &resp_msg, sizeof(u8));
 257        if (ret) {
 258                dev_err(&hdev->pdev->dev,
 259                        "VF request to get port based vlan state failed %d",
 260                        ret);
 261                return ret;
 262        }
 263
 264        nic->port_base_vlan_state = resp_msg;
 265
 266        return 0;
 267}
 268
 269static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
 270{
 271#define HCLGEVF_TQPS_RSS_INFO_LEN       6
 272        u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
 273        int status;
 274
 275        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
 276                                      true, resp_msg,
 277                                      HCLGEVF_TQPS_RSS_INFO_LEN);
 278        if (status) {
 279                dev_err(&hdev->pdev->dev,
 280                        "VF request to get tqp info from PF failed %d",
 281                        status);
 282                return status;
 283        }
 284
 285        memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
 286        memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
 287        memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16));
 288
 289        return 0;
 290}
 291
 292static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
 293{
 294#define HCLGEVF_TQPS_DEPTH_INFO_LEN     4
 295        u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
 296        int ret;
 297
 298        ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0,
 299                                   true, resp_msg,
 300                                   HCLGEVF_TQPS_DEPTH_INFO_LEN);
 301        if (ret) {
 302                dev_err(&hdev->pdev->dev,
 303                        "VF request to get tqp depth info from PF failed %d",
 304                        ret);
 305                return ret;
 306        }
 307
 308        memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16));
 309        memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16));
 310
 311        return 0;
 312}
 313
 314static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
 315{
 316        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 317        u8 msg_data[2], resp_data[2];
 318        u16 qid_in_pf = 0;
 319        int ret;
 320
 321        memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
 322
 323        ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
 324                                   2, true, resp_data, 2);
 325        if (!ret)
 326                qid_in_pf = *(u16 *)resp_data;
 327
 328        return qid_in_pf;
 329}
 330
 331static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
 332{
 333        u8 resp_msg[2];
 334        int ret;
 335
 336        ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
 337                                   true, resp_msg, sizeof(resp_msg));
 338        if (ret) {
 339                dev_err(&hdev->pdev->dev,
 340                        "VF request to get the pf port media type failed %d",
 341                        ret);
 342                return ret;
 343        }
 344
 345        hdev->hw.mac.media_type = resp_msg[0];
 346        hdev->hw.mac.module_type = resp_msg[1];
 347
 348        return 0;
 349}
 350
 351static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
 352{
 353        struct hclgevf_tqp *tqp;
 354        int i;
 355
 356        hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
 357                                  sizeof(struct hclgevf_tqp), GFP_KERNEL);
 358        if (!hdev->htqp)
 359                return -ENOMEM;
 360
 361        tqp = hdev->htqp;
 362
 363        for (i = 0; i < hdev->num_tqps; i++) {
 364                tqp->dev = &hdev->pdev->dev;
 365                tqp->index = i;
 366
 367                tqp->q.ae_algo = &ae_algovf;
 368                tqp->q.buf_size = hdev->rx_buf_len;
 369                tqp->q.tx_desc_num = hdev->num_tx_desc;
 370                tqp->q.rx_desc_num = hdev->num_rx_desc;
 371                tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
 372                        i * HCLGEVF_TQP_REG_SIZE;
 373
 374                tqp++;
 375        }
 376
 377        return 0;
 378}
 379
 380static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
 381{
 382        struct hnae3_handle *nic = &hdev->nic;
 383        struct hnae3_knic_private_info *kinfo;
 384        u16 new_tqps = hdev->num_tqps;
 385        int i;
 386
 387        kinfo = &nic->kinfo;
 388        kinfo->num_tc = 0;
 389        kinfo->num_tx_desc = hdev->num_tx_desc;
 390        kinfo->num_rx_desc = hdev->num_rx_desc;
 391        kinfo->rx_buf_len = hdev->rx_buf_len;
 392        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
 393                if (hdev->hw_tc_map & BIT(i))
 394                        kinfo->num_tc++;
 395
 396        kinfo->rss_size
 397                = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
 398        new_tqps = kinfo->rss_size * kinfo->num_tc;
 399        kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
 400
 401        kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
 402                                  sizeof(struct hnae3_queue *), GFP_KERNEL);
 403        if (!kinfo->tqp)
 404                return -ENOMEM;
 405
 406        for (i = 0; i < kinfo->num_tqps; i++) {
 407                hdev->htqp[i].q.handle = &hdev->nic;
 408                hdev->htqp[i].q.tqp_index = i;
 409                kinfo->tqp[i] = &hdev->htqp[i].q;
 410        }
 411
 412        return 0;
 413}
 414
 415static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
 416{
 417        int status;
 418        u8 resp_msg;
 419
 420        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
 421                                      0, false, &resp_msg, sizeof(u8));
 422        if (status)
 423                dev_err(&hdev->pdev->dev,
 424                        "VF failed to fetch link status(%d) from PF", status);
 425}
 426
 427void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
 428{
 429        struct hnae3_handle *rhandle = &hdev->roce;
 430        struct hnae3_handle *handle = &hdev->nic;
 431        struct hnae3_client *rclient;
 432        struct hnae3_client *client;
 433
 434        client = handle->client;
 435        rclient = hdev->roce_client;
 436
 437        link_state =
 438                test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
 439
 440        if (link_state != hdev->hw.mac.link) {
 441                client->ops->link_status_change(handle, !!link_state);
 442                if (rclient && rclient->ops->link_status_change)
 443                        rclient->ops->link_status_change(rhandle, !!link_state);
 444                hdev->hw.mac.link = link_state;
 445        }
 446}
 447
 448static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
 449{
 450#define HCLGEVF_ADVERTISING 0
 451#define HCLGEVF_SUPPORTED   1
 452        u8 send_msg;
 453        u8 resp_msg;
 454
 455        send_msg = HCLGEVF_ADVERTISING;
 456        hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
 457                             sizeof(u8), false, &resp_msg, sizeof(u8));
 458        send_msg = HCLGEVF_SUPPORTED;
 459        hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
 460                             sizeof(u8), false, &resp_msg, sizeof(u8));
 461}
 462
 463static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
 464{
 465        struct hnae3_handle *nic = &hdev->nic;
 466        int ret;
 467
 468        nic->ae_algo = &ae_algovf;
 469        nic->pdev = hdev->pdev;
 470        nic->numa_node_mask = hdev->numa_node_mask;
 471        nic->flags |= HNAE3_SUPPORT_VF;
 472
 473        if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
 474                dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
 475                        hdev->ae_dev->dev_type);
 476                return -EINVAL;
 477        }
 478
 479        ret = hclgevf_knic_setup(hdev);
 480        if (ret)
 481                dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
 482                        ret);
 483        return ret;
 484}
 485
 486static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
 487{
 488        if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
 489                dev_warn(&hdev->pdev->dev,
 490                         "vector(vector_id %d) has been freed.\n", vector_id);
 491                return;
 492        }
 493
 494        hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
 495        hdev->num_msi_left += 1;
 496        hdev->num_msi_used -= 1;
 497}
 498
 499static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
 500                              struct hnae3_vector_info *vector_info)
 501{
 502        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 503        struct hnae3_vector_info *vector = vector_info;
 504        int alloc = 0;
 505        int i, j;
 506
 507        vector_num = min(hdev->num_msi_left, vector_num);
 508
 509        for (j = 0; j < vector_num; j++) {
 510                for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
 511                        if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
 512                                vector->vector = pci_irq_vector(hdev->pdev, i);
 513                                vector->io_addr = hdev->hw.io_base +
 514                                        HCLGEVF_VECTOR_REG_BASE +
 515                                        (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
 516                                hdev->vector_status[i] = 0;
 517                                hdev->vector_irq[i] = vector->vector;
 518
 519                                vector++;
 520                                alloc++;
 521
 522                                break;
 523                        }
 524                }
 525        }
 526        hdev->num_msi_left -= alloc;
 527        hdev->num_msi_used += alloc;
 528
 529        return alloc;
 530}
 531
 532static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
 533{
 534        int i;
 535
 536        for (i = 0; i < hdev->num_msi; i++)
 537                if (vector == hdev->vector_irq[i])
 538                        return i;
 539
 540        return -EINVAL;
 541}
 542
 543static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
 544                                    const u8 hfunc, const u8 *key)
 545{
 546        struct hclgevf_rss_config_cmd *req;
 547        struct hclgevf_desc desc;
 548        int key_offset;
 549        int key_size;
 550        int ret;
 551
 552        req = (struct hclgevf_rss_config_cmd *)desc.data;
 553
 554        for (key_offset = 0; key_offset < 3; key_offset++) {
 555                hclgevf_cmd_setup_basic_desc(&desc,
 556                                             HCLGEVF_OPC_RSS_GENERIC_CONFIG,
 557                                             false);
 558
 559                req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
 560                req->hash_config |=
 561                        (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
 562
 563                if (key_offset == 2)
 564                        key_size =
 565                        HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
 566                else
 567                        key_size = HCLGEVF_RSS_HASH_KEY_NUM;
 568
 569                memcpy(req->hash_key,
 570                       key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
 571
 572                ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 573                if (ret) {
 574                        dev_err(&hdev->pdev->dev,
 575                                "Configure RSS config fail, status = %d\n",
 576                                ret);
 577                        return ret;
 578                }
 579        }
 580
 581        return 0;
 582}
 583
 584static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
 585{
 586        return HCLGEVF_RSS_KEY_SIZE;
 587}
 588
 589static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
 590{
 591        return HCLGEVF_RSS_IND_TBL_SIZE;
 592}
 593
 594static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
 595{
 596        const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
 597        struct hclgevf_rss_indirection_table_cmd *req;
 598        struct hclgevf_desc desc;
 599        int status;
 600        int i, j;
 601
 602        req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
 603
 604        for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
 605                hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
 606                                             false);
 607                req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
 608                req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
 609                for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
 610                        req->rss_result[j] =
 611                                indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
 612
 613                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 614                if (status) {
 615                        dev_err(&hdev->pdev->dev,
 616                                "VF failed(=%d) to set RSS indirection table\n",
 617                                status);
 618                        return status;
 619                }
 620        }
 621
 622        return 0;
 623}
 624
 625static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
 626{
 627        struct hclgevf_rss_tc_mode_cmd *req;
 628        u16 tc_offset[HCLGEVF_MAX_TC_NUM];
 629        u16 tc_valid[HCLGEVF_MAX_TC_NUM];
 630        u16 tc_size[HCLGEVF_MAX_TC_NUM];
 631        struct hclgevf_desc desc;
 632        u16 roundup_size;
 633        int status;
 634        int i;
 635
 636        req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
 637
 638        roundup_size = roundup_pow_of_two(rss_size);
 639        roundup_size = ilog2(roundup_size);
 640
 641        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
 642                tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
 643                tc_size[i] = roundup_size;
 644                tc_offset[i] = rss_size * i;
 645        }
 646
 647        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
 648        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
 649                hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
 650                              (tc_valid[i] & 0x1));
 651                hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
 652                                HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
 653                hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
 654                                HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
 655        }
 656        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 657        if (status)
 658                dev_err(&hdev->pdev->dev,
 659                        "VF failed(=%d) to set rss tc mode\n", status);
 660
 661        return status;
 662}
 663
 664/* for revision 0x20, vf shared the same rss config with pf */
 665static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
 666{
 667#define HCLGEVF_RSS_MBX_RESP_LEN        8
 668
 669        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 670        u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
 671        u16 msg_num, hash_key_index;
 672        u8 index;
 673        int ret;
 674
 675        msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
 676                        HCLGEVF_RSS_MBX_RESP_LEN;
 677        for (index = 0; index < msg_num; index++) {
 678                ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0,
 679                                           &index, sizeof(index),
 680                                           true, resp_msg,
 681                                           HCLGEVF_RSS_MBX_RESP_LEN);
 682                if (ret) {
 683                        dev_err(&hdev->pdev->dev,
 684                                "VF get rss hash key from PF failed, ret=%d",
 685                                ret);
 686                        return ret;
 687                }
 688
 689                hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
 690                if (index == msg_num - 1)
 691                        memcpy(&rss_cfg->rss_hash_key[hash_key_index],
 692                               &resp_msg[0],
 693                               HCLGEVF_RSS_KEY_SIZE - hash_key_index);
 694                else
 695                        memcpy(&rss_cfg->rss_hash_key[hash_key_index],
 696                               &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
 697        }
 698
 699        return 0;
 700}
 701
 702static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
 703                           u8 *hfunc)
 704{
 705        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 706        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 707        int i, ret;
 708
 709        if (handle->pdev->revision >= 0x21) {
 710                /* Get hash algorithm */
 711                if (hfunc) {
 712                        switch (rss_cfg->hash_algo) {
 713                        case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
 714                                *hfunc = ETH_RSS_HASH_TOP;
 715                                break;
 716                        case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
 717                                *hfunc = ETH_RSS_HASH_XOR;
 718                                break;
 719                        default:
 720                                *hfunc = ETH_RSS_HASH_UNKNOWN;
 721                                break;
 722                        }
 723                }
 724
 725                /* Get the RSS Key required by the user */
 726                if (key)
 727                        memcpy(key, rss_cfg->rss_hash_key,
 728                               HCLGEVF_RSS_KEY_SIZE);
 729        } else {
 730                if (hfunc)
 731                        *hfunc = ETH_RSS_HASH_TOP;
 732                if (key) {
 733                        ret = hclgevf_get_rss_hash_key(hdev);
 734                        if (ret)
 735                                return ret;
 736                        memcpy(key, rss_cfg->rss_hash_key,
 737                               HCLGEVF_RSS_KEY_SIZE);
 738                }
 739        }
 740
 741        if (indir)
 742                for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
 743                        indir[i] = rss_cfg->rss_indirection_tbl[i];
 744
 745        return 0;
 746}
 747
 748static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
 749                           const  u8 *key, const  u8 hfunc)
 750{
 751        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 752        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 753        int ret, i;
 754
 755        if (handle->pdev->revision >= 0x21) {
 756                /* Set the RSS Hash Key if specififed by the user */
 757                if (key) {
 758                        switch (hfunc) {
 759                        case ETH_RSS_HASH_TOP:
 760                                rss_cfg->hash_algo =
 761                                        HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
 762                                break;
 763                        case ETH_RSS_HASH_XOR:
 764                                rss_cfg->hash_algo =
 765                                        HCLGEVF_RSS_HASH_ALGO_SIMPLE;
 766                                break;
 767                        case ETH_RSS_HASH_NO_CHANGE:
 768                                break;
 769                        default:
 770                                return -EINVAL;
 771                        }
 772
 773                        ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
 774                                                       key);
 775                        if (ret)
 776                                return ret;
 777
 778                        /* Update the shadow RSS key with user specified qids */
 779                        memcpy(rss_cfg->rss_hash_key, key,
 780                               HCLGEVF_RSS_KEY_SIZE);
 781                }
 782        }
 783
 784        /* update the shadow RSS table with user specified qids */
 785        for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
 786                rss_cfg->rss_indirection_tbl[i] = indir[i];
 787
 788        /* update the hardware */
 789        return hclgevf_set_rss_indir_table(hdev);
 790}
 791
 792static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
 793{
 794        u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
 795
 796        if (nfc->data & RXH_L4_B_2_3)
 797                hash_sets |= HCLGEVF_D_PORT_BIT;
 798        else
 799                hash_sets &= ~HCLGEVF_D_PORT_BIT;
 800
 801        if (nfc->data & RXH_IP_SRC)
 802                hash_sets |= HCLGEVF_S_IP_BIT;
 803        else
 804                hash_sets &= ~HCLGEVF_S_IP_BIT;
 805
 806        if (nfc->data & RXH_IP_DST)
 807                hash_sets |= HCLGEVF_D_IP_BIT;
 808        else
 809                hash_sets &= ~HCLGEVF_D_IP_BIT;
 810
 811        if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
 812                hash_sets |= HCLGEVF_V_TAG_BIT;
 813
 814        return hash_sets;
 815}
 816
 817static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
 818                                 struct ethtool_rxnfc *nfc)
 819{
 820        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 821        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 822        struct hclgevf_rss_input_tuple_cmd *req;
 823        struct hclgevf_desc desc;
 824        u8 tuple_sets;
 825        int ret;
 826
 827        if (handle->pdev->revision == 0x20)
 828                return -EOPNOTSUPP;
 829
 830        if (nfc->data &
 831            ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
 832                return -EINVAL;
 833
 834        req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
 835        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
 836
 837        req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
 838        req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
 839        req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
 840        req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
 841        req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
 842        req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
 843        req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
 844        req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
 845
 846        tuple_sets = hclgevf_get_rss_hash_bits(nfc);
 847        switch (nfc->flow_type) {
 848        case TCP_V4_FLOW:
 849                req->ipv4_tcp_en = tuple_sets;
 850                break;
 851        case TCP_V6_FLOW:
 852                req->ipv6_tcp_en = tuple_sets;
 853                break;
 854        case UDP_V4_FLOW:
 855                req->ipv4_udp_en = tuple_sets;
 856                break;
 857        case UDP_V6_FLOW:
 858                req->ipv6_udp_en = tuple_sets;
 859                break;
 860        case SCTP_V4_FLOW:
 861                req->ipv4_sctp_en = tuple_sets;
 862                break;
 863        case SCTP_V6_FLOW:
 864                if ((nfc->data & RXH_L4_B_0_1) ||
 865                    (nfc->data & RXH_L4_B_2_3))
 866                        return -EINVAL;
 867
 868                req->ipv6_sctp_en = tuple_sets;
 869                break;
 870        case IPV4_FLOW:
 871                req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
 872                break;
 873        case IPV6_FLOW:
 874                req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
 875                break;
 876        default:
 877                return -EINVAL;
 878        }
 879
 880        ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 881        if (ret) {
 882                dev_err(&hdev->pdev->dev,
 883                        "Set rss tuple fail, status = %d\n", ret);
 884                return ret;
 885        }
 886
 887        rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
 888        rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
 889        rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
 890        rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
 891        rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
 892        rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
 893        rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
 894        rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
 895        return 0;
 896}
 897
 898static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
 899                                 struct ethtool_rxnfc *nfc)
 900{
 901        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 902        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 903        u8 tuple_sets;
 904
 905        if (handle->pdev->revision == 0x20)
 906                return -EOPNOTSUPP;
 907
 908        nfc->data = 0;
 909
 910        switch (nfc->flow_type) {
 911        case TCP_V4_FLOW:
 912                tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
 913                break;
 914        case UDP_V4_FLOW:
 915                tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
 916                break;
 917        case TCP_V6_FLOW:
 918                tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
 919                break;
 920        case UDP_V6_FLOW:
 921                tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
 922                break;
 923        case SCTP_V4_FLOW:
 924                tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
 925                break;
 926        case SCTP_V6_FLOW:
 927                tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
 928                break;
 929        case IPV4_FLOW:
 930        case IPV6_FLOW:
 931                tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
 932                break;
 933        default:
 934                return -EINVAL;
 935        }
 936
 937        if (!tuple_sets)
 938                return 0;
 939
 940        if (tuple_sets & HCLGEVF_D_PORT_BIT)
 941                nfc->data |= RXH_L4_B_2_3;
 942        if (tuple_sets & HCLGEVF_S_PORT_BIT)
 943                nfc->data |= RXH_L4_B_0_1;
 944        if (tuple_sets & HCLGEVF_D_IP_BIT)
 945                nfc->data |= RXH_IP_DST;
 946        if (tuple_sets & HCLGEVF_S_IP_BIT)
 947                nfc->data |= RXH_IP_SRC;
 948
 949        return 0;
 950}
 951
 952static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
 953                                       struct hclgevf_rss_cfg *rss_cfg)
 954{
 955        struct hclgevf_rss_input_tuple_cmd *req;
 956        struct hclgevf_desc desc;
 957        int ret;
 958
 959        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
 960
 961        req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
 962
 963        req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
 964        req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
 965        req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
 966        req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
 967        req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
 968        req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
 969        req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
 970        req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
 971
 972        ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 973        if (ret)
 974                dev_err(&hdev->pdev->dev,
 975                        "Configure rss input fail, status = %d\n", ret);
 976        return ret;
 977}
 978
 979static int hclgevf_get_tc_size(struct hnae3_handle *handle)
 980{
 981        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 982        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 983
 984        return rss_cfg->rss_size;
 985}
 986
 987static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
 988                                       int vector_id,
 989                                       struct hnae3_ring_chain_node *ring_chain)
 990{
 991        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 992        struct hnae3_ring_chain_node *node;
 993        struct hclge_mbx_vf_to_pf_cmd *req;
 994        struct hclgevf_desc desc;
 995        int i = 0;
 996        int status;
 997        u8 type;
 998
 999        req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
1000
1001        for (node = ring_chain; node; node = node->next) {
1002                int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
1003                                        HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
1004
1005                if (i == 0) {
1006                        hclgevf_cmd_setup_basic_desc(&desc,
1007                                                     HCLGEVF_OPC_MBX_VF_TO_PF,
1008                                                     false);
1009                        type = en ?
1010                                HCLGE_MBX_MAP_RING_TO_VECTOR :
1011                                HCLGE_MBX_UNMAP_RING_TO_VECTOR;
1012                        req->msg[0] = type;
1013                        req->msg[1] = vector_id;
1014                }
1015
1016                req->msg[idx_offset] =
1017                                hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
1018                req->msg[idx_offset + 1] = node->tqp_index;
1019                req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
1020                                                           HNAE3_RING_GL_IDX_M,
1021                                                           HNAE3_RING_GL_IDX_S);
1022
1023                i++;
1024                if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
1025                     HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
1026                     HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
1027                    !node->next) {
1028                        req->msg[2] = i;
1029
1030                        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1031                        if (status) {
1032                                dev_err(&hdev->pdev->dev,
1033                                        "Map TQP fail, status is %d.\n",
1034                                        status);
1035                                return status;
1036                        }
1037                        i = 0;
1038                        hclgevf_cmd_setup_basic_desc(&desc,
1039                                                     HCLGEVF_OPC_MBX_VF_TO_PF,
1040                                                     false);
1041                        req->msg[0] = type;
1042                        req->msg[1] = vector_id;
1043                }
1044        }
1045
1046        return 0;
1047}
1048
1049static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
1050                                      struct hnae3_ring_chain_node *ring_chain)
1051{
1052        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1053        int vector_id;
1054
1055        vector_id = hclgevf_get_vector_index(hdev, vector);
1056        if (vector_id < 0) {
1057                dev_err(&handle->pdev->dev,
1058                        "Get vector index fail. ret =%d\n", vector_id);
1059                return vector_id;
1060        }
1061
1062        return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
1063}
1064
1065static int hclgevf_unmap_ring_from_vector(
1066                                struct hnae3_handle *handle,
1067                                int vector,
1068                                struct hnae3_ring_chain_node *ring_chain)
1069{
1070        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1071        int ret, vector_id;
1072
1073        if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1074                return 0;
1075
1076        vector_id = hclgevf_get_vector_index(hdev, vector);
1077        if (vector_id < 0) {
1078                dev_err(&handle->pdev->dev,
1079                        "Get vector index fail. ret =%d\n", vector_id);
1080                return vector_id;
1081        }
1082
1083        ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
1084        if (ret)
1085                dev_err(&handle->pdev->dev,
1086                        "Unmap ring from vector fail. vector=%d, ret =%d\n",
1087                        vector_id,
1088                        ret);
1089
1090        return ret;
1091}
1092
1093static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
1094{
1095        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1096        int vector_id;
1097
1098        vector_id = hclgevf_get_vector_index(hdev, vector);
1099        if (vector_id < 0) {
1100                dev_err(&handle->pdev->dev,
1101                        "hclgevf_put_vector get vector index fail. ret =%d\n",
1102                        vector_id);
1103                return vector_id;
1104        }
1105
1106        hclgevf_free_vector(hdev, vector_id);
1107
1108        return 0;
1109}
1110
1111static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
1112                                        bool en_bc_pmc)
1113{
1114        struct hclge_mbx_vf_to_pf_cmd *req;
1115        struct hclgevf_desc desc;
1116        int ret;
1117
1118        req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
1119
1120        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
1121        req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
1122        req->msg[1] = en_bc_pmc ? 1 : 0;
1123
1124        ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1125        if (ret)
1126                dev_err(&hdev->pdev->dev,
1127                        "Set promisc mode fail, status is %d.\n", ret);
1128
1129        return ret;
1130}
1131
1132static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc)
1133{
1134        return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc);
1135}
1136
1137static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
1138                              int stream_id, bool enable)
1139{
1140        struct hclgevf_cfg_com_tqp_queue_cmd *req;
1141        struct hclgevf_desc desc;
1142        int status;
1143
1144        req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
1145
1146        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
1147                                     false);
1148        req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
1149        req->stream_id = cpu_to_le16(stream_id);
1150        req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
1151
1152        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1153        if (status)
1154                dev_err(&hdev->pdev->dev,
1155                        "TQP enable fail, status =%d.\n", status);
1156
1157        return status;
1158}
1159
1160static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
1161{
1162        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
1163        struct hclgevf_tqp *tqp;
1164        int i;
1165
1166        for (i = 0; i < kinfo->num_tqps; i++) {
1167                tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
1168                memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
1169        }
1170}
1171
1172static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
1173{
1174        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1175
1176        ether_addr_copy(p, hdev->hw.mac.mac_addr);
1177}
1178
1179static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
1180                                bool is_first)
1181{
1182        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1183        u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
1184        u8 *new_mac_addr = (u8 *)p;
1185        u8 msg_data[ETH_ALEN * 2];
1186        u16 subcode;
1187        int status;
1188
1189        ether_addr_copy(msg_data, new_mac_addr);
1190        ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
1191
1192        subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
1193                        HCLGE_MBX_MAC_VLAN_UC_MODIFY;
1194
1195        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1196                                      subcode, msg_data, ETH_ALEN * 2,
1197                                      true, NULL, 0);
1198        if (!status)
1199                ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
1200
1201        return status;
1202}
1203
1204static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
1205                               const unsigned char *addr)
1206{
1207        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1208
1209        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1210                                    HCLGE_MBX_MAC_VLAN_UC_ADD,
1211                                    addr, ETH_ALEN, false, NULL, 0);
1212}
1213
1214static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1215                              const unsigned char *addr)
1216{
1217        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1218
1219        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1220                                    HCLGE_MBX_MAC_VLAN_UC_REMOVE,
1221                                    addr, ETH_ALEN, false, NULL, 0);
1222}
1223
1224static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1225                               const unsigned char *addr)
1226{
1227        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1228
1229        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
1230                                    HCLGE_MBX_MAC_VLAN_MC_ADD,
1231                                    addr, ETH_ALEN, false, NULL, 0);
1232}
1233
1234static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1235                              const unsigned char *addr)
1236{
1237        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1238
1239        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
1240                                    HCLGE_MBX_MAC_VLAN_MC_REMOVE,
1241                                    addr, ETH_ALEN, false, NULL, 0);
1242}
1243
1244static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1245                                   __be16 proto, u16 vlan_id,
1246                                   bool is_kill)
1247{
1248#define HCLGEVF_VLAN_MBX_MSG_LEN 5
1249        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1250        u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
1251
1252        if (vlan_id > 4095)
1253                return -EINVAL;
1254
1255        if (proto != htons(ETH_P_8021Q))
1256                return -EPROTONOSUPPORT;
1257
1258        msg_data[0] = is_kill;
1259        memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1260        memcpy(&msg_data[3], &proto, sizeof(proto));
1261        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
1262                                    HCLGE_MBX_VLAN_FILTER, msg_data,
1263                                    HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
1264}
1265
1266static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1267{
1268        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1269        u8 msg_data;
1270
1271        msg_data = enable ? 1 : 0;
1272        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
1273                                    HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
1274                                    1, false, NULL, 0);
1275}
1276
1277static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
1278{
1279        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1280        u8 msg_data[2];
1281        int ret;
1282
1283        memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
1284
1285        /* disable vf queue before send queue reset msg to PF */
1286        ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
1287        if (ret)
1288                return ret;
1289
1290        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
1291                                    2, true, NULL, 0);
1292}
1293
1294static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1295{
1296        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1297
1298        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
1299                                    sizeof(new_mtu), true, NULL, 0);
1300}
1301
1302static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1303                                 enum hnae3_reset_notify_type type)
1304{
1305        struct hnae3_client *client = hdev->nic_client;
1306        struct hnae3_handle *handle = &hdev->nic;
1307        int ret;
1308
1309        if (!client->ops->reset_notify)
1310                return -EOPNOTSUPP;
1311
1312        ret = client->ops->reset_notify(handle, type);
1313        if (ret)
1314                dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1315                        type, ret);
1316
1317        return ret;
1318}
1319
1320static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
1321{
1322        struct hclgevf_dev *hdev = ae_dev->priv;
1323
1324        set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
1325}
1326
1327static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
1328                                    unsigned long delay_us,
1329                                    unsigned long wait_cnt)
1330{
1331        unsigned long cnt = 0;
1332
1333        while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
1334               cnt++ < wait_cnt)
1335                usleep_range(delay_us, delay_us * 2);
1336
1337        if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
1338                dev_err(&hdev->pdev->dev,
1339                        "flr wait timeout\n");
1340                return -ETIMEDOUT;
1341        }
1342
1343        return 0;
1344}
1345
1346static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1347{
1348#define HCLGEVF_RESET_WAIT_US   20000
1349#define HCLGEVF_RESET_WAIT_CNT  2000
1350#define HCLGEVF_RESET_WAIT_TIMEOUT_US   \
1351        (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1352
1353        u32 val;
1354        int ret;
1355
1356        /* wait to check the hardware reset completion status */
1357        val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1358        dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
1359
1360        if (hdev->reset_type == HNAE3_FLR_RESET)
1361                return hclgevf_flr_poll_timeout(hdev,
1362                                                HCLGEVF_RESET_WAIT_US,
1363                                                HCLGEVF_RESET_WAIT_CNT);
1364
1365        ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
1366                                 !(val & HCLGEVF_RST_ING_BITS),
1367                                 HCLGEVF_RESET_WAIT_US,
1368                                 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1369
1370        /* hardware completion status should be available by this time */
1371        if (ret) {
1372                dev_err(&hdev->pdev->dev,
1373                        "could'nt get reset done status from h/w, timeout!\n");
1374                return ret;
1375        }
1376
1377        /* we will wait a bit more to let reset of the stack to complete. This
1378         * might happen in case reset assertion was made by PF. Yes, this also
1379         * means we might end up waiting bit more even for VF reset.
1380         */
1381        msleep(5000);
1382
1383        return 0;
1384}
1385
1386static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1387{
1388        int ret;
1389
1390        /* uninitialize the nic client */
1391        ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1392        if (ret)
1393                return ret;
1394
1395        /* re-initialize the hclge device */
1396        ret = hclgevf_reset_hdev(hdev);
1397        if (ret) {
1398                dev_err(&hdev->pdev->dev,
1399                        "hclge device re-init failed, VF is disabled!\n");
1400                return ret;
1401        }
1402
1403        /* bring up the nic client again */
1404        ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1405        if (ret)
1406                return ret;
1407
1408        return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
1409}
1410
1411static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1412{
1413        int ret = 0;
1414
1415        switch (hdev->reset_type) {
1416        case HNAE3_VF_FUNC_RESET:
1417                ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1418                                           0, true, NULL, sizeof(u8));
1419                hdev->rst_stats.vf_func_rst_cnt++;
1420                break;
1421        case HNAE3_FLR_RESET:
1422                set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
1423                hdev->rst_stats.flr_rst_cnt++;
1424                break;
1425        default:
1426                break;
1427        }
1428
1429        set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1430
1431        dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
1432                 hdev->reset_type, ret);
1433
1434        return ret;
1435}
1436
1437static int hclgevf_reset(struct hclgevf_dev *hdev)
1438{
1439        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1440        int ret;
1441
1442        /* Initialize ae_dev reset status as well, in case enet layer wants to
1443         * know if device is undergoing reset
1444         */
1445        ae_dev->reset_type = hdev->reset_type;
1446        hdev->rst_stats.rst_cnt++;
1447        rtnl_lock();
1448
1449        /* bring down the nic to stop any ongoing TX/RX */
1450        ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1451        if (ret)
1452                goto err_reset_lock;
1453
1454        rtnl_unlock();
1455
1456        ret = hclgevf_reset_prepare_wait(hdev);
1457        if (ret)
1458                goto err_reset;
1459
1460        /* check if VF could successfully fetch the hardware reset completion
1461         * status from the hardware
1462         */
1463        ret = hclgevf_reset_wait(hdev);
1464        if (ret) {
1465                /* can't do much in this situation, will disable VF */
1466                dev_err(&hdev->pdev->dev,
1467                        "VF failed(=%d) to fetch H/W reset completion status\n",
1468                        ret);
1469                goto err_reset;
1470        }
1471
1472        hdev->rst_stats.hw_rst_done_cnt++;
1473
1474        rtnl_lock();
1475
1476        /* now, re-initialize the nic client and ae device*/
1477        ret = hclgevf_reset_stack(hdev);
1478        if (ret) {
1479                dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1480                goto err_reset_lock;
1481        }
1482
1483        /* bring up the nic to enable TX/RX again */
1484        ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1485        if (ret)
1486                goto err_reset_lock;
1487
1488        rtnl_unlock();
1489
1490        hdev->last_reset_time = jiffies;
1491        ae_dev->reset_type = HNAE3_NONE_RESET;
1492        hdev->rst_stats.rst_done_cnt++;
1493
1494        return ret;
1495err_reset_lock:
1496        rtnl_unlock();
1497err_reset:
1498        /* When VF reset failed, only the higher level reset asserted by PF
1499         * can restore it, so re-initialize the command queue to receive
1500         * this higher reset event.
1501         */
1502        hclgevf_cmd_init(hdev);
1503        dev_err(&hdev->pdev->dev, "failed to reset VF\n");
1504        if (hclgevf_is_reset_pending(hdev))
1505                hclgevf_reset_task_schedule(hdev);
1506
1507        return ret;
1508}
1509
1510static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
1511                                                     unsigned long *addr)
1512{
1513        enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1514
1515        /* return the highest priority reset level amongst all */
1516        if (test_bit(HNAE3_VF_RESET, addr)) {
1517                rst_level = HNAE3_VF_RESET;
1518                clear_bit(HNAE3_VF_RESET, addr);
1519                clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1520                clear_bit(HNAE3_VF_FUNC_RESET, addr);
1521        } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1522                rst_level = HNAE3_VF_FULL_RESET;
1523                clear_bit(HNAE3_VF_FULL_RESET, addr);
1524                clear_bit(HNAE3_VF_FUNC_RESET, addr);
1525        } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1526                rst_level = HNAE3_VF_PF_FUNC_RESET;
1527                clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1528                clear_bit(HNAE3_VF_FUNC_RESET, addr);
1529        } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1530                rst_level = HNAE3_VF_FUNC_RESET;
1531                clear_bit(HNAE3_VF_FUNC_RESET, addr);
1532        } else if (test_bit(HNAE3_FLR_RESET, addr)) {
1533                rst_level = HNAE3_FLR_RESET;
1534                clear_bit(HNAE3_FLR_RESET, addr);
1535        }
1536
1537        return rst_level;
1538}
1539
1540static void hclgevf_reset_event(struct pci_dev *pdev,
1541                                struct hnae3_handle *handle)
1542{
1543        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1544        struct hclgevf_dev *hdev = ae_dev->priv;
1545
1546        dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1547
1548        if (hdev->default_reset_request)
1549                hdev->reset_level =
1550                        hclgevf_get_reset_level(hdev,
1551                                                &hdev->default_reset_request);
1552        else
1553                hdev->reset_level = HNAE3_VF_FUNC_RESET;
1554
1555        /* reset of this VF requested */
1556        set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1557        hclgevf_reset_task_schedule(hdev);
1558
1559        hdev->last_reset_time = jiffies;
1560}
1561
1562static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1563                                          enum hnae3_reset_type rst_type)
1564{
1565        struct hclgevf_dev *hdev = ae_dev->priv;
1566
1567        set_bit(rst_type, &hdev->default_reset_request);
1568}
1569
1570static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
1571{
1572#define HCLGEVF_FLR_WAIT_MS     100
1573#define HCLGEVF_FLR_WAIT_CNT    50
1574        struct hclgevf_dev *hdev = ae_dev->priv;
1575        int cnt = 0;
1576
1577        clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
1578        clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
1579        set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
1580        hclgevf_reset_event(hdev->pdev, NULL);
1581
1582        while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
1583               cnt++ < HCLGEVF_FLR_WAIT_CNT)
1584                msleep(HCLGEVF_FLR_WAIT_MS);
1585
1586        if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
1587                dev_err(&hdev->pdev->dev,
1588                        "flr wait down timeout: %d\n", cnt);
1589}
1590
1591static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1592{
1593        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1594
1595        return hdev->fw_version;
1596}
1597
1598static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1599{
1600        struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1601
1602        vector->vector_irq = pci_irq_vector(hdev->pdev,
1603                                            HCLGEVF_MISC_VECTOR_NUM);
1604        vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1605        /* vector status always valid for Vector 0 */
1606        hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1607        hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1608
1609        hdev->num_msi_left -= 1;
1610        hdev->num_msi_used += 1;
1611}
1612
1613void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1614{
1615        if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) {
1616                set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1617                schedule_work(&hdev->rst_service_task);
1618        }
1619}
1620
1621void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1622{
1623        if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1624            !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1625                set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1626                schedule_work(&hdev->mbx_service_task);
1627        }
1628}
1629
1630static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1631{
1632        if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state)  &&
1633            !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1634                schedule_work(&hdev->service_task);
1635}
1636
1637static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1638{
1639        /* if we have any pending mailbox event then schedule the mbx task */
1640        if (hdev->mbx_event_pending)
1641                hclgevf_mbx_task_schedule(hdev);
1642
1643        if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1644                hclgevf_reset_task_schedule(hdev);
1645}
1646
1647static void hclgevf_service_timer(struct timer_list *t)
1648{
1649        struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1650
1651        mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1652
1653        hdev->stats_timer++;
1654        hclgevf_task_schedule(hdev);
1655}
1656
1657static void hclgevf_reset_service_task(struct work_struct *work)
1658{
1659        struct hclgevf_dev *hdev =
1660                container_of(work, struct hclgevf_dev, rst_service_task);
1661        int ret;
1662
1663        if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1664                return;
1665
1666        clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1667
1668        if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1669                               &hdev->reset_state)) {
1670                /* PF has initmated that it is about to reset the hardware.
1671                 * We now have to poll & check if harware has actually completed
1672                 * the reset sequence. On hardware reset completion, VF needs to
1673                 * reset the client and ae device.
1674                 */
1675                hdev->reset_attempts = 0;
1676
1677                hdev->last_reset_time = jiffies;
1678                while ((hdev->reset_type =
1679                        hclgevf_get_reset_level(hdev, &hdev->reset_pending))
1680                       != HNAE3_NONE_RESET) {
1681                        ret = hclgevf_reset(hdev);
1682                        if (ret)
1683                                dev_err(&hdev->pdev->dev,
1684                                        "VF stack reset failed %d.\n", ret);
1685                }
1686        } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1687                                      &hdev->reset_state)) {
1688                /* we could be here when either of below happens:
1689                 * 1. reset was initiated due to watchdog timeout due to
1690                 *    a. IMP was earlier reset and our TX got choked down and
1691                 *       which resulted in watchdog reacting and inducing VF
1692                 *       reset. This also means our cmdq would be unreliable.
1693                 *    b. problem in TX due to other lower layer(example link
1694                 *       layer not functioning properly etc.)
1695                 * 2. VF reset might have been initiated due to some config
1696                 *    change.
1697                 *
1698                 * NOTE: Theres no clear way to detect above cases than to react
1699                 * to the response of PF for this reset request. PF will ack the
1700                 * 1b and 2. cases but we will not get any intimation about 1a
1701                 * from PF as cmdq would be in unreliable state i.e. mailbox
1702                 * communication between PF and VF would be broken.
1703                 */
1704
1705                /* if we are never geting into pending state it means either:
1706                 * 1. PF is not receiving our request which could be due to IMP
1707                 *    reset
1708                 * 2. PF is screwed
1709                 * We cannot do much for 2. but to check first we can try reset
1710                 * our PCIe + stack and see if it alleviates the problem.
1711                 */
1712                if (hdev->reset_attempts > 3) {
1713                        /* prepare for full reset of stack + pcie interface */
1714                        set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
1715
1716                        /* "defer" schedule the reset task again */
1717                        set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1718                } else {
1719                        hdev->reset_attempts++;
1720
1721                        set_bit(hdev->reset_level, &hdev->reset_pending);
1722                        set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1723                }
1724                hclgevf_reset_task_schedule(hdev);
1725        }
1726
1727        clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1728}
1729
1730static void hclgevf_mailbox_service_task(struct work_struct *work)
1731{
1732        struct hclgevf_dev *hdev;
1733
1734        hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
1735
1736        if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1737                return;
1738
1739        clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1740
1741        hclgevf_mbx_async_handler(hdev);
1742
1743        clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1744}
1745
1746static void hclgevf_keep_alive_timer(struct timer_list *t)
1747{
1748        struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
1749
1750        schedule_work(&hdev->keep_alive_task);
1751        mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
1752}
1753
1754static void hclgevf_keep_alive_task(struct work_struct *work)
1755{
1756        struct hclgevf_dev *hdev;
1757        u8 respmsg;
1758        int ret;
1759
1760        hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
1761
1762        if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
1763                return;
1764
1765        ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
1766                                   0, false, &respmsg, sizeof(u8));
1767        if (ret)
1768                dev_err(&hdev->pdev->dev,
1769                        "VF sends keep alive cmd failed(=%d)\n", ret);
1770}
1771
1772static void hclgevf_service_task(struct work_struct *work)
1773{
1774        struct hnae3_handle *handle;
1775        struct hclgevf_dev *hdev;
1776
1777        hdev = container_of(work, struct hclgevf_dev, service_task);
1778        handle = &hdev->nic;
1779
1780        if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
1781                hclgevf_tqps_update_stats(handle);
1782                hdev->stats_timer = 0;
1783        }
1784
1785        /* request the link status from the PF. PF would be able to tell VF
1786         * about such updates in future so we might remove this later
1787         */
1788        hclgevf_request_link_info(hdev);
1789
1790        hclgevf_update_link_mode(hdev);
1791
1792        hclgevf_deferred_task_schedule(hdev);
1793
1794        clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1795}
1796
1797static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1798{
1799        hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
1800}
1801
1802static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
1803                                                      u32 *clearval)
1804{
1805        u32 cmdq_src_reg, rst_ing_reg;
1806
1807        /* fetch the events from their corresponding regs */
1808        cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1809                                        HCLGEVF_VECTOR0_CMDQ_SRC_REG);
1810
1811        if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
1812                rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1813                dev_info(&hdev->pdev->dev,
1814                         "receive reset interrupt 0x%x!\n", rst_ing_reg);
1815                set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
1816                set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1817                set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1818                cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
1819                *clearval = cmdq_src_reg;
1820                hdev->rst_stats.vf_rst_cnt++;
1821                return HCLGEVF_VECTOR0_EVENT_RST;
1822        }
1823
1824        /* check for vector0 mailbox(=CMDQ RX) event source */
1825        if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1826                cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1827                *clearval = cmdq_src_reg;
1828                return HCLGEVF_VECTOR0_EVENT_MBX;
1829        }
1830
1831        dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
1832
1833        return HCLGEVF_VECTOR0_EVENT_OTHER;
1834}
1835
1836static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1837{
1838        writel(en ? 1 : 0, vector->addr);
1839}
1840
1841static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1842{
1843        enum hclgevf_evt_cause event_cause;
1844        struct hclgevf_dev *hdev = data;
1845        u32 clearval;
1846
1847        hclgevf_enable_vector(&hdev->misc_vector, false);
1848        event_cause = hclgevf_check_evt_cause(hdev, &clearval);
1849
1850        switch (event_cause) {
1851        case HCLGEVF_VECTOR0_EVENT_RST:
1852                hclgevf_reset_task_schedule(hdev);
1853                break;
1854        case HCLGEVF_VECTOR0_EVENT_MBX:
1855                hclgevf_mbx_handler(hdev);
1856                break;
1857        default:
1858                break;
1859        }
1860
1861        if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
1862                hclgevf_clear_event_cause(hdev, clearval);
1863                hclgevf_enable_vector(&hdev->misc_vector, true);
1864        }
1865
1866        return IRQ_HANDLED;
1867}
1868
1869static int hclgevf_configure(struct hclgevf_dev *hdev)
1870{
1871        int ret;
1872
1873        /* get current port based vlan state from PF */
1874        ret = hclgevf_get_port_base_vlan_filter_state(hdev);
1875        if (ret)
1876                return ret;
1877
1878        /* get queue configuration from PF */
1879        ret = hclgevf_get_queue_info(hdev);
1880        if (ret)
1881                return ret;
1882
1883        /* get queue depth info from PF */
1884        ret = hclgevf_get_queue_depth(hdev);
1885        if (ret)
1886                return ret;
1887
1888        ret = hclgevf_get_pf_media_type(hdev);
1889        if (ret)
1890                return ret;
1891
1892        /* get tc configuration from PF */
1893        return hclgevf_get_tc_info(hdev);
1894}
1895
1896static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
1897{
1898        struct pci_dev *pdev = ae_dev->pdev;
1899        struct hclgevf_dev *hdev;
1900
1901        hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1902        if (!hdev)
1903                return -ENOMEM;
1904
1905        hdev->pdev = pdev;
1906        hdev->ae_dev = ae_dev;
1907        ae_dev->priv = hdev;
1908
1909        return 0;
1910}
1911
1912static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
1913{
1914        struct hnae3_handle *roce = &hdev->roce;
1915        struct hnae3_handle *nic = &hdev->nic;
1916
1917        roce->rinfo.num_vectors = hdev->num_roce_msix;
1918
1919        if (hdev->num_msi_left < roce->rinfo.num_vectors ||
1920            hdev->num_msi_left == 0)
1921                return -EINVAL;
1922
1923        roce->rinfo.base_vector = hdev->roce_base_vector;
1924
1925        roce->rinfo.netdev = nic->kinfo.netdev;
1926        roce->rinfo.roce_io_base = hdev->hw.io_base;
1927
1928        roce->pdev = nic->pdev;
1929        roce->ae_algo = nic->ae_algo;
1930        roce->numa_node_mask = nic->numa_node_mask;
1931
1932        return 0;
1933}
1934
1935static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
1936{
1937        struct hclgevf_cfg_gro_status_cmd *req;
1938        struct hclgevf_desc desc;
1939        int ret;
1940
1941        if (!hnae3_dev_gro_supported(hdev))
1942                return 0;
1943
1944        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
1945                                     false);
1946        req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
1947
1948        req->gro_en = cpu_to_le16(en ? 1 : 0);
1949
1950        ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1951        if (ret)
1952                dev_err(&hdev->pdev->dev,
1953                        "VF GRO hardware config cmd failed, ret = %d.\n", ret);
1954
1955        return ret;
1956}
1957
1958static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
1959{
1960        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1961        int i, ret;
1962
1963        rss_cfg->rss_size = hdev->rss_size_max;
1964
1965        if (hdev->pdev->revision >= 0x21) {
1966                rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
1967                memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
1968                       HCLGEVF_RSS_KEY_SIZE);
1969
1970                ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
1971                                               rss_cfg->rss_hash_key);
1972                if (ret)
1973                        return ret;
1974
1975                rss_cfg->rss_tuple_sets.ipv4_tcp_en =
1976                                        HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1977                rss_cfg->rss_tuple_sets.ipv4_udp_en =
1978                                        HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1979                rss_cfg->rss_tuple_sets.ipv4_sctp_en =
1980                                        HCLGEVF_RSS_INPUT_TUPLE_SCTP;
1981                rss_cfg->rss_tuple_sets.ipv4_fragment_en =
1982                                        HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1983                rss_cfg->rss_tuple_sets.ipv6_tcp_en =
1984                                        HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1985                rss_cfg->rss_tuple_sets.ipv6_udp_en =
1986                                        HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1987                rss_cfg->rss_tuple_sets.ipv6_sctp_en =
1988                                        HCLGEVF_RSS_INPUT_TUPLE_SCTP;
1989                rss_cfg->rss_tuple_sets.ipv6_fragment_en =
1990                                        HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1991
1992                ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
1993                if (ret)
1994                        return ret;
1995
1996        }
1997
1998        /* Initialize RSS indirect table for each vport */
1999        for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
2000                rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
2001
2002        ret = hclgevf_set_rss_indir_table(hdev);
2003        if (ret)
2004                return ret;
2005
2006        return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
2007}
2008
2009static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
2010{
2011        /* other vlan config(like, VLAN TX/RX offload) would also be added
2012         * here later
2013         */
2014        return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
2015                                       false);
2016}
2017
2018static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
2019{
2020        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2021
2022        if (enable) {
2023                mod_timer(&hdev->service_timer, jiffies + HZ);
2024        } else {
2025                del_timer_sync(&hdev->service_timer);
2026                cancel_work_sync(&hdev->service_task);
2027                clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
2028        }
2029}
2030
2031static int hclgevf_ae_start(struct hnae3_handle *handle)
2032{
2033        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2034
2035        /* reset tqp stats */
2036        hclgevf_reset_tqp_stats(handle);
2037
2038        hclgevf_request_link_info(hdev);
2039
2040        hclgevf_update_link_mode(hdev);
2041
2042        clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2043
2044        return 0;
2045}
2046
2047static void hclgevf_ae_stop(struct hnae3_handle *handle)
2048{
2049        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2050        int i;
2051
2052        set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2053
2054        if (hdev->reset_type != HNAE3_VF_RESET)
2055                for (i = 0; i < handle->kinfo.num_tqps; i++)
2056                        if (hclgevf_reset_tqp(handle, i))
2057                                break;
2058
2059        /* reset tqp stats */
2060        hclgevf_reset_tqp_stats(handle);
2061        hclgevf_update_link_status(hdev, 0);
2062}
2063
2064static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
2065{
2066        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2067        u8 msg_data;
2068
2069        msg_data = alive ? 1 : 0;
2070        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
2071                                    0, &msg_data, 1, false, NULL, 0);
2072}
2073
2074static int hclgevf_client_start(struct hnae3_handle *handle)
2075{
2076        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2077        int ret;
2078
2079        ret = hclgevf_set_alive(handle, true);
2080        if (ret)
2081                return ret;
2082
2083        mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
2084
2085        return 0;
2086}
2087
2088static void hclgevf_client_stop(struct hnae3_handle *handle)
2089{
2090        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2091        int ret;
2092
2093        ret = hclgevf_set_alive(handle, false);
2094        if (ret)
2095                dev_warn(&hdev->pdev->dev,
2096                         "%s failed %d\n", __func__, ret);
2097
2098        del_timer_sync(&hdev->keep_alive_timer);
2099        cancel_work_sync(&hdev->keep_alive_task);
2100}
2101
2102static void hclgevf_state_init(struct hclgevf_dev *hdev)
2103{
2104        /* setup tasks for the MBX */
2105        INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
2106        clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
2107        clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
2108
2109        /* setup tasks for service timer */
2110        timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
2111
2112        INIT_WORK(&hdev->service_task, hclgevf_service_task);
2113        clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
2114
2115        INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
2116
2117        mutex_init(&hdev->mbx_resp.mbx_mutex);
2118
2119        /* bring the device down */
2120        set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2121}
2122
2123static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
2124{
2125        set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
2126
2127        if (hdev->keep_alive_timer.function)
2128                del_timer_sync(&hdev->keep_alive_timer);
2129        if (hdev->keep_alive_task.func)
2130                cancel_work_sync(&hdev->keep_alive_task);
2131        if (hdev->service_timer.function)
2132                del_timer_sync(&hdev->service_timer);
2133        if (hdev->service_task.func)
2134                cancel_work_sync(&hdev->service_task);
2135        if (hdev->mbx_service_task.func)
2136                cancel_work_sync(&hdev->mbx_service_task);
2137        if (hdev->rst_service_task.func)
2138                cancel_work_sync(&hdev->rst_service_task);
2139
2140        mutex_destroy(&hdev->mbx_resp.mbx_mutex);
2141}
2142
2143static int hclgevf_init_msi(struct hclgevf_dev *hdev)
2144{
2145        struct pci_dev *pdev = hdev->pdev;
2146        int vectors;
2147        int i;
2148
2149        if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
2150                vectors = pci_alloc_irq_vectors(pdev,
2151                                                hdev->roce_base_msix_offset + 1,
2152                                                hdev->num_msi,
2153                                                PCI_IRQ_MSIX);
2154        else
2155                vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2156                                                PCI_IRQ_MSI | PCI_IRQ_MSIX);
2157
2158        if (vectors < 0) {
2159                dev_err(&pdev->dev,
2160                        "failed(%d) to allocate MSI/MSI-X vectors\n",
2161                        vectors);
2162                return vectors;
2163        }
2164        if (vectors < hdev->num_msi)
2165                dev_warn(&hdev->pdev->dev,
2166                         "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2167                         hdev->num_msi, vectors);
2168
2169        hdev->num_msi = vectors;
2170        hdev->num_msi_left = vectors;
2171        hdev->base_msi_vector = pdev->irq;
2172        hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
2173
2174        hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2175                                           sizeof(u16), GFP_KERNEL);
2176        if (!hdev->vector_status) {
2177                pci_free_irq_vectors(pdev);
2178                return -ENOMEM;
2179        }
2180
2181        for (i = 0; i < hdev->num_msi; i++)
2182                hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
2183
2184        hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2185                                        sizeof(int), GFP_KERNEL);
2186        if (!hdev->vector_irq) {
2187                devm_kfree(&pdev->dev, hdev->vector_status);
2188                pci_free_irq_vectors(pdev);
2189                return -ENOMEM;
2190        }
2191
2192        return 0;
2193}
2194
2195static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2196{
2197        struct pci_dev *pdev = hdev->pdev;
2198
2199        devm_kfree(&pdev->dev, hdev->vector_status);
2200        devm_kfree(&pdev->dev, hdev->vector_irq);
2201        pci_free_irq_vectors(pdev);
2202}
2203
2204static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2205{
2206        int ret = 0;
2207
2208        hclgevf_get_misc_vector(hdev);
2209
2210        ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2211                          0, "hclgevf_cmd", hdev);
2212        if (ret) {
2213                dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2214                        hdev->misc_vector.vector_irq);
2215                return ret;
2216        }
2217
2218        hclgevf_clear_event_cause(hdev, 0);
2219
2220        /* enable misc. vector(vector 0) */
2221        hclgevf_enable_vector(&hdev->misc_vector, true);
2222
2223        return ret;
2224}
2225
2226static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2227{
2228        /* disable misc vector(vector 0) */
2229        hclgevf_enable_vector(&hdev->misc_vector, false);
2230        synchronize_irq(hdev->misc_vector.vector_irq);
2231        free_irq(hdev->misc_vector.vector_irq, hdev);
2232        hclgevf_free_vector(hdev, 0);
2233}
2234
2235static void hclgevf_info_show(struct hclgevf_dev *hdev)
2236{
2237        struct device *dev = &hdev->pdev->dev;
2238
2239        dev_info(dev, "VF info begin:\n");
2240
2241        dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
2242        dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
2243        dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
2244        dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
2245        dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
2246        dev_info(dev, "PF media type of this VF: %d\n",
2247                 hdev->hw.mac.media_type);
2248
2249        dev_info(dev, "VF info end.\n");
2250}
2251
2252static int hclgevf_init_client_instance(struct hnae3_client *client,
2253                                        struct hnae3_ae_dev *ae_dev)
2254{
2255        struct hclgevf_dev *hdev = ae_dev->priv;
2256        int ret;
2257
2258        switch (client->type) {
2259        case HNAE3_CLIENT_KNIC:
2260                hdev->nic_client = client;
2261                hdev->nic.client = client;
2262
2263                ret = client->ops->init_instance(&hdev->nic);
2264                if (ret)
2265                        goto clear_nic;
2266
2267                hnae3_set_client_init_flag(client, ae_dev, 1);
2268
2269                if (netif_msg_drv(&hdev->nic))
2270                        hclgevf_info_show(hdev);
2271
2272                if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
2273                        struct hnae3_client *rc = hdev->roce_client;
2274
2275                        ret = hclgevf_init_roce_base_info(hdev);
2276                        if (ret)
2277                                goto clear_roce;
2278                        ret = rc->ops->init_instance(&hdev->roce);
2279                        if (ret)
2280                                goto clear_roce;
2281
2282                        hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
2283                                                   1);
2284                }
2285                break;
2286        case HNAE3_CLIENT_UNIC:
2287                hdev->nic_client = client;
2288                hdev->nic.client = client;
2289
2290                ret = client->ops->init_instance(&hdev->nic);
2291                if (ret)
2292                        goto clear_nic;
2293
2294                hnae3_set_client_init_flag(client, ae_dev, 1);
2295                break;
2296        case HNAE3_CLIENT_ROCE:
2297                if (hnae3_dev_roce_supported(hdev)) {
2298                        hdev->roce_client = client;
2299                        hdev->roce.client = client;
2300                }
2301
2302                if (hdev->roce_client && hdev->nic_client) {
2303                        ret = hclgevf_init_roce_base_info(hdev);
2304                        if (ret)
2305                                goto clear_roce;
2306
2307                        ret = client->ops->init_instance(&hdev->roce);
2308                        if (ret)
2309                                goto clear_roce;
2310                }
2311
2312                hnae3_set_client_init_flag(client, ae_dev, 1);
2313                break;
2314        default:
2315                return -EINVAL;
2316        }
2317
2318        return 0;
2319
2320clear_nic:
2321        hdev->nic_client = NULL;
2322        hdev->nic.client = NULL;
2323        return ret;
2324clear_roce:
2325        hdev->roce_client = NULL;
2326        hdev->roce.client = NULL;
2327        return ret;
2328}
2329
2330static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2331                                           struct hnae3_ae_dev *ae_dev)
2332{
2333        struct hclgevf_dev *hdev = ae_dev->priv;
2334
2335        /* un-init roce, if it exists */
2336        if (hdev->roce_client) {
2337                hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2338                hdev->roce_client = NULL;
2339                hdev->roce.client = NULL;
2340        }
2341
2342        /* un-init nic/unic, if this was not called by roce client */
2343        if (client->ops->uninit_instance && hdev->nic_client &&
2344            client->type != HNAE3_CLIENT_ROCE) {
2345                client->ops->uninit_instance(&hdev->nic, 0);
2346                hdev->nic_client = NULL;
2347                hdev->nic.client = NULL;
2348        }
2349}
2350
2351static int hclgevf_pci_init(struct hclgevf_dev *hdev)
2352{
2353        struct pci_dev *pdev = hdev->pdev;
2354        struct hclgevf_hw *hw;
2355        int ret;
2356
2357        ret = pci_enable_device(pdev);
2358        if (ret) {
2359                dev_err(&pdev->dev, "failed to enable PCI device\n");
2360                return ret;
2361        }
2362
2363        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2364        if (ret) {
2365                dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2366                goto err_disable_device;
2367        }
2368
2369        ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2370        if (ret) {
2371                dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2372                goto err_disable_device;
2373        }
2374
2375        pci_set_master(pdev);
2376        hw = &hdev->hw;
2377        hw->hdev = hdev;
2378        hw->io_base = pci_iomap(pdev, 2, 0);
2379        if (!hw->io_base) {
2380                dev_err(&pdev->dev, "can't map configuration register space\n");
2381                ret = -ENOMEM;
2382                goto err_clr_master;
2383        }
2384
2385        return 0;
2386
2387err_clr_master:
2388        pci_clear_master(pdev);
2389        pci_release_regions(pdev);
2390err_disable_device:
2391        pci_disable_device(pdev);
2392
2393        return ret;
2394}
2395
2396static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2397{
2398        struct pci_dev *pdev = hdev->pdev;
2399
2400        pci_iounmap(pdev, hdev->hw.io_base);
2401        pci_clear_master(pdev);
2402        pci_release_regions(pdev);
2403        pci_disable_device(pdev);
2404}
2405
2406static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
2407{
2408        struct hclgevf_query_res_cmd *req;
2409        struct hclgevf_desc desc;
2410        int ret;
2411
2412        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
2413        ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2414        if (ret) {
2415                dev_err(&hdev->pdev->dev,
2416                        "query vf resource failed, ret = %d.\n", ret);
2417                return ret;
2418        }
2419
2420        req = (struct hclgevf_query_res_cmd *)desc.data;
2421
2422        if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
2423                hdev->roce_base_msix_offset =
2424                hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
2425                                HCLGEVF_MSIX_OFT_ROCEE_M,
2426                                HCLGEVF_MSIX_OFT_ROCEE_S);
2427                hdev->num_roce_msix =
2428                hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2429                                HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2430
2431                /* VF should have NIC vectors and Roce vectors, NIC vectors
2432                 * are queued before Roce vectors. The offset is fixed to 64.
2433                 */
2434                hdev->num_msi = hdev->num_roce_msix +
2435                                hdev->roce_base_msix_offset;
2436        } else {
2437                hdev->num_msi =
2438                hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2439                                HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2440        }
2441
2442        return 0;
2443}
2444
2445static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
2446{
2447        struct pci_dev *pdev = hdev->pdev;
2448        int ret = 0;
2449
2450        if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
2451            test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2452                hclgevf_misc_irq_uninit(hdev);
2453                hclgevf_uninit_msi(hdev);
2454                clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2455        }
2456
2457        if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2458                pci_set_master(pdev);
2459                ret = hclgevf_init_msi(hdev);
2460                if (ret) {
2461                        dev_err(&pdev->dev,
2462                                "failed(%d) to init MSI/MSI-X\n", ret);
2463                        return ret;
2464                }
2465
2466                ret = hclgevf_misc_irq_init(hdev);
2467                if (ret) {
2468                        hclgevf_uninit_msi(hdev);
2469                        dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2470                                ret);
2471                        return ret;
2472                }
2473
2474                set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2475        }
2476
2477        return ret;
2478}
2479
2480static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
2481{
2482        struct pci_dev *pdev = hdev->pdev;
2483        int ret;
2484
2485        ret = hclgevf_pci_reset(hdev);
2486        if (ret) {
2487                dev_err(&pdev->dev, "pci reset failed %d\n", ret);
2488                return ret;
2489        }
2490
2491        ret = hclgevf_cmd_init(hdev);
2492        if (ret) {
2493                dev_err(&pdev->dev, "cmd failed %d\n", ret);
2494                return ret;
2495        }
2496
2497        ret = hclgevf_rss_init_hw(hdev);
2498        if (ret) {
2499                dev_err(&hdev->pdev->dev,
2500                        "failed(%d) to initialize RSS\n", ret);
2501                return ret;
2502        }
2503
2504        ret = hclgevf_config_gro(hdev, true);
2505        if (ret)
2506                return ret;
2507
2508        ret = hclgevf_init_vlan_config(hdev);
2509        if (ret) {
2510                dev_err(&hdev->pdev->dev,
2511                        "failed(%d) to initialize VLAN config\n", ret);
2512                return ret;
2513        }
2514
2515        dev_info(&hdev->pdev->dev, "Reset done\n");
2516
2517        return 0;
2518}
2519
2520static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
2521{
2522        struct pci_dev *pdev = hdev->pdev;
2523        int ret;
2524
2525        ret = hclgevf_pci_init(hdev);
2526        if (ret) {
2527                dev_err(&pdev->dev, "PCI initialization failed\n");
2528                return ret;
2529        }
2530
2531        ret = hclgevf_cmd_queue_init(hdev);
2532        if (ret) {
2533                dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
2534                goto err_cmd_queue_init;
2535        }
2536
2537        ret = hclgevf_cmd_init(hdev);
2538        if (ret)
2539                goto err_cmd_init;
2540
2541        /* Get vf resource */
2542        ret = hclgevf_query_vf_resource(hdev);
2543        if (ret) {
2544                dev_err(&hdev->pdev->dev,
2545                        "Query vf status error, ret = %d.\n", ret);
2546                goto err_cmd_init;
2547        }
2548
2549        ret = hclgevf_init_msi(hdev);
2550        if (ret) {
2551                dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
2552                goto err_cmd_init;
2553        }
2554
2555        hclgevf_state_init(hdev);
2556        hdev->reset_level = HNAE3_VF_FUNC_RESET;
2557
2558        ret = hclgevf_misc_irq_init(hdev);
2559        if (ret) {
2560                dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2561                        ret);
2562                goto err_misc_irq_init;
2563        }
2564
2565        set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2566
2567        ret = hclgevf_configure(hdev);
2568        if (ret) {
2569                dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
2570                goto err_config;
2571        }
2572
2573        ret = hclgevf_alloc_tqps(hdev);
2574        if (ret) {
2575                dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
2576                goto err_config;
2577        }
2578
2579        ret = hclgevf_set_handle_info(hdev);
2580        if (ret) {
2581                dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
2582                goto err_config;
2583        }
2584
2585        ret = hclgevf_config_gro(hdev, true);
2586        if (ret)
2587                goto err_config;
2588
2589        /* vf is not allowed to enable unicast/multicast promisc mode.
2590         * For revision 0x20, default to disable broadcast promisc mode,
2591         * firmware makes sure broadcast packets can be accepted.
2592         * For revision 0x21, default to enable broadcast promisc mode.
2593         */
2594        ret = hclgevf_set_promisc_mode(hdev, true);
2595        if (ret)
2596                goto err_config;
2597
2598        /* Initialize RSS for this VF */
2599        ret = hclgevf_rss_init_hw(hdev);
2600        if (ret) {
2601                dev_err(&hdev->pdev->dev,
2602                        "failed(%d) to initialize RSS\n", ret);
2603                goto err_config;
2604        }
2605
2606        ret = hclgevf_init_vlan_config(hdev);
2607        if (ret) {
2608                dev_err(&hdev->pdev->dev,
2609                        "failed(%d) to initialize VLAN config\n", ret);
2610                goto err_config;
2611        }
2612
2613        hdev->last_reset_time = jiffies;
2614        pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
2615
2616        return 0;
2617
2618err_config:
2619        hclgevf_misc_irq_uninit(hdev);
2620err_misc_irq_init:
2621        hclgevf_state_uninit(hdev);
2622        hclgevf_uninit_msi(hdev);
2623err_cmd_init:
2624        hclgevf_cmd_uninit(hdev);
2625err_cmd_queue_init:
2626        hclgevf_pci_uninit(hdev);
2627        clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2628        return ret;
2629}
2630
2631static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
2632{
2633        hclgevf_state_uninit(hdev);
2634
2635        if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2636                hclgevf_misc_irq_uninit(hdev);
2637                hclgevf_uninit_msi(hdev);
2638        }
2639
2640        hclgevf_pci_uninit(hdev);
2641        hclgevf_cmd_uninit(hdev);
2642}
2643
2644static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
2645{
2646        struct pci_dev *pdev = ae_dev->pdev;
2647        struct hclgevf_dev *hdev;
2648        int ret;
2649
2650        ret = hclgevf_alloc_hdev(ae_dev);
2651        if (ret) {
2652                dev_err(&pdev->dev, "hclge device allocation failed\n");
2653                return ret;
2654        }
2655
2656        ret = hclgevf_init_hdev(ae_dev->priv);
2657        if (ret) {
2658                dev_err(&pdev->dev, "hclge device initialization failed\n");
2659                return ret;
2660        }
2661
2662        hdev = ae_dev->priv;
2663        timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
2664        INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
2665
2666        return 0;
2667}
2668
2669static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
2670{
2671        struct hclgevf_dev *hdev = ae_dev->priv;
2672
2673        hclgevf_uninit_hdev(hdev);
2674        ae_dev->priv = NULL;
2675}
2676
2677static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
2678{
2679        struct hnae3_handle *nic = &hdev->nic;
2680        struct hnae3_knic_private_info *kinfo = &nic->kinfo;
2681
2682        return min_t(u32, hdev->rss_size_max,
2683                     hdev->num_tqps / kinfo->num_tc);
2684}
2685
2686/**
2687 * hclgevf_get_channels - Get the current channels enabled and max supported.
2688 * @handle: hardware information for network interface
2689 * @ch: ethtool channels structure
2690 *
2691 * We don't support separate tx and rx queues as channels. The other count
2692 * represents how many queues are being used for control. max_combined counts
2693 * how many queue pairs we can support. They may not be mapped 1 to 1 with
2694 * q_vectors since we support a lot more queue pairs than q_vectors.
2695 **/
2696static void hclgevf_get_channels(struct hnae3_handle *handle,
2697                                 struct ethtool_channels *ch)
2698{
2699        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2700
2701        ch->max_combined = hclgevf_get_max_channels(hdev);
2702        ch->other_count = 0;
2703        ch->max_other = 0;
2704        ch->combined_count = handle->kinfo.rss_size;
2705}
2706
2707static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
2708                                          u16 *alloc_tqps, u16 *max_rss_size)
2709{
2710        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2711
2712        *alloc_tqps = hdev->num_tqps;
2713        *max_rss_size = hdev->rss_size_max;
2714}
2715
2716static int hclgevf_get_status(struct hnae3_handle *handle)
2717{
2718        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2719
2720        return hdev->hw.mac.link;
2721}
2722
2723static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
2724                                            u8 *auto_neg, u32 *speed,
2725                                            u8 *duplex)
2726{
2727        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2728
2729        if (speed)
2730                *speed = hdev->hw.mac.speed;
2731        if (duplex)
2732                *duplex = hdev->hw.mac.duplex;
2733        if (auto_neg)
2734                *auto_neg = AUTONEG_DISABLE;
2735}
2736
2737void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
2738                                 u8 duplex)
2739{
2740        hdev->hw.mac.speed = speed;
2741        hdev->hw.mac.duplex = duplex;
2742}
2743
2744static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
2745{
2746        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2747
2748        return hclgevf_config_gro(hdev, enable);
2749}
2750
2751static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
2752                                   u8 *module_type)
2753{
2754        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2755
2756        if (media_type)
2757                *media_type = hdev->hw.mac.media_type;
2758
2759        if (module_type)
2760                *module_type = hdev->hw.mac.module_type;
2761}
2762
2763static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
2764{
2765        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2766
2767        return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2768}
2769
2770static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
2771{
2772        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2773
2774        return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2775}
2776
2777static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
2778{
2779        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2780
2781        return hdev->rst_stats.hw_rst_done_cnt;
2782}
2783
2784static void hclgevf_get_link_mode(struct hnae3_handle *handle,
2785                                  unsigned long *supported,
2786                                  unsigned long *advertising)
2787{
2788        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2789
2790        *supported = hdev->hw.mac.supported;
2791        *advertising = hdev->hw.mac.advertising;
2792}
2793
2794#define MAX_SEPARATE_NUM        4
2795#define SEPARATOR_VALUE         0xFFFFFFFF
2796#define REG_NUM_PER_LINE        4
2797#define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
2798
2799static int hclgevf_get_regs_len(struct hnae3_handle *handle)
2800{
2801        int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
2802        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2803
2804        cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
2805        common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
2806        ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
2807        tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
2808
2809        return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
2810                tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
2811}
2812
2813static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
2814                             void *data)
2815{
2816        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2817        int i, j, reg_um, separator_num;
2818        u32 *reg = data;
2819
2820        *version = hdev->fw_version;
2821
2822        /* fetching per-VF registers values from VF PCIe register space */
2823        reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
2824        separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2825        for (i = 0; i < reg_um; i++)
2826                *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
2827        for (i = 0; i < separator_num; i++)
2828                *reg++ = SEPARATOR_VALUE;
2829
2830        reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
2831        separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2832        for (i = 0; i < reg_um; i++)
2833                *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
2834        for (i = 0; i < separator_num; i++)
2835                *reg++ = SEPARATOR_VALUE;
2836
2837        reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
2838        separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2839        for (j = 0; j < hdev->num_tqps; j++) {
2840                for (i = 0; i < reg_um; i++)
2841                        *reg++ = hclgevf_read_dev(&hdev->hw,
2842                                                  ring_reg_addr_list[i] +
2843                                                  0x200 * j);
2844                for (i = 0; i < separator_num; i++)
2845                        *reg++ = SEPARATOR_VALUE;
2846        }
2847
2848        reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
2849        separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2850        for (j = 0; j < hdev->num_msi_used - 1; j++) {
2851                for (i = 0; i < reg_um; i++)
2852                        *reg++ = hclgevf_read_dev(&hdev->hw,
2853                                                  tqp_intr_reg_addr_list[i] +
2854                                                  4 * j);
2855                for (i = 0; i < separator_num; i++)
2856                        *reg++ = SEPARATOR_VALUE;
2857        }
2858}
2859
2860void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
2861                                        u8 *port_base_vlan_info, u8 data_size)
2862{
2863        struct hnae3_handle *nic = &hdev->nic;
2864
2865        rtnl_lock();
2866        hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
2867        rtnl_unlock();
2868
2869        /* send msg to PF and wait update port based vlan info */
2870        hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
2871                             HCLGE_MBX_PORT_BASE_VLAN_CFG,
2872                             port_base_vlan_info, data_size,
2873                             false, NULL, 0);
2874
2875        if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
2876                nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
2877        else
2878                nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
2879
2880        rtnl_lock();
2881        hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
2882        rtnl_unlock();
2883}
2884
2885static const struct hnae3_ae_ops hclgevf_ops = {
2886        .init_ae_dev = hclgevf_init_ae_dev,
2887        .uninit_ae_dev = hclgevf_uninit_ae_dev,
2888        .flr_prepare = hclgevf_flr_prepare,
2889        .flr_done = hclgevf_flr_done,
2890        .init_client_instance = hclgevf_init_client_instance,
2891        .uninit_client_instance = hclgevf_uninit_client_instance,
2892        .start = hclgevf_ae_start,
2893        .stop = hclgevf_ae_stop,
2894        .client_start = hclgevf_client_start,
2895        .client_stop = hclgevf_client_stop,
2896        .map_ring_to_vector = hclgevf_map_ring_to_vector,
2897        .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
2898        .get_vector = hclgevf_get_vector,
2899        .put_vector = hclgevf_put_vector,
2900        .reset_queue = hclgevf_reset_tqp,
2901        .get_mac_addr = hclgevf_get_mac_addr,
2902        .set_mac_addr = hclgevf_set_mac_addr,
2903        .add_uc_addr = hclgevf_add_uc_addr,
2904        .rm_uc_addr = hclgevf_rm_uc_addr,
2905        .add_mc_addr = hclgevf_add_mc_addr,
2906        .rm_mc_addr = hclgevf_rm_mc_addr,
2907        .get_stats = hclgevf_get_stats,
2908        .update_stats = hclgevf_update_stats,
2909        .get_strings = hclgevf_get_strings,
2910        .get_sset_count = hclgevf_get_sset_count,
2911        .get_rss_key_size = hclgevf_get_rss_key_size,
2912        .get_rss_indir_size = hclgevf_get_rss_indir_size,
2913        .get_rss = hclgevf_get_rss,
2914        .set_rss = hclgevf_set_rss,
2915        .get_rss_tuple = hclgevf_get_rss_tuple,
2916        .set_rss_tuple = hclgevf_set_rss_tuple,
2917        .get_tc_size = hclgevf_get_tc_size,
2918        .get_fw_version = hclgevf_get_fw_version,
2919        .set_vlan_filter = hclgevf_set_vlan_filter,
2920        .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
2921        .reset_event = hclgevf_reset_event,
2922        .set_default_reset_request = hclgevf_set_def_reset_request,
2923        .get_channels = hclgevf_get_channels,
2924        .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
2925        .get_regs_len = hclgevf_get_regs_len,
2926        .get_regs = hclgevf_get_regs,
2927        .get_status = hclgevf_get_status,
2928        .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
2929        .get_media_type = hclgevf_get_media_type,
2930        .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
2931        .ae_dev_resetting = hclgevf_ae_dev_resetting,
2932        .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
2933        .set_gro_en = hclgevf_gro_en,
2934        .set_mtu = hclgevf_set_mtu,
2935        .get_global_queue_id = hclgevf_get_qid_global,
2936        .set_timer_task = hclgevf_set_timer_task,
2937        .get_link_mode = hclgevf_get_link_mode,
2938};
2939
2940static struct hnae3_ae_algo ae_algovf = {
2941        .ops = &hclgevf_ops,
2942        .pdev_id_table = ae_algovf_pci_tbl,
2943};
2944
2945static int hclgevf_init(void)
2946{
2947        pr_info("%s is initializing\n", HCLGEVF_NAME);
2948
2949        hnae3_register_ae_algo(&ae_algovf);
2950
2951        return 0;
2952}
2953
2954static void hclgevf_exit(void)
2955{
2956        hnae3_unregister_ae_algo(&ae_algovf);
2957}
2958module_init(hclgevf_init);
2959module_exit(hclgevf_exit);
2960
2961MODULE_LICENSE("GPL");
2962MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2963MODULE_DESCRIPTION("HCLGEVF Driver");
2964MODULE_VERSION(HCLGEVF_MOD_VERSION);
2965