linux/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2// Copyright (c) 2016-2017 Hisilicon Limited.
   3
   4#include <linux/etherdevice.h>
   5#include <net/rtnetlink.h>
   6#include "hclgevf_cmd.h"
   7#include "hclgevf_main.h"
   8#include "hclge_mbx.h"
   9#include "hnae3.h"
  10
  11#define HCLGEVF_NAME    "hclgevf"
  12
  13static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
  14static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
  15static struct hnae3_ae_algo ae_algovf;
  16
  17static const struct pci_device_id ae_algovf_pci_tbl[] = {
  18        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
  19        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
  20        /* required last entry */
  21        {0, }
  22};
  23
  24MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
  25
  26static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
  27        struct hnae3_handle *handle)
  28{
  29        return container_of(handle, struct hclgevf_dev, nic);
  30}
  31
  32static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
  33{
  34        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
  35        struct hnae3_queue *queue;
  36        struct hclgevf_desc desc;
  37        struct hclgevf_tqp *tqp;
  38        int status;
  39        int i;
  40
  41        for (i = 0; i < hdev->num_tqps; i++) {
  42                queue = handle->kinfo.tqp[i];
  43                tqp = container_of(queue, struct hclgevf_tqp, q);
  44                hclgevf_cmd_setup_basic_desc(&desc,
  45                                             HCLGEVF_OPC_QUERY_RX_STATUS,
  46                                             true);
  47
  48                desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
  49                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
  50                if (status) {
  51                        dev_err(&hdev->pdev->dev,
  52                                "Query tqp stat fail, status = %d,queue = %d\n",
  53                                status, i);
  54                        return status;
  55                }
  56                tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
  57                        le32_to_cpu(desc.data[1]);
  58
  59                hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
  60                                             true);
  61
  62                desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
  63                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
  64                if (status) {
  65                        dev_err(&hdev->pdev->dev,
  66                                "Query tqp stat fail, status = %d,queue = %d\n",
  67                                status, i);
  68                        return status;
  69                }
  70                tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
  71                        le32_to_cpu(desc.data[1]);
  72        }
  73
  74        return 0;
  75}
  76
  77static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
  78{
  79        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
  80        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
  81        struct hclgevf_tqp *tqp;
  82        u64 *buff = data;
  83        int i;
  84
  85        for (i = 0; i < hdev->num_tqps; i++) {
  86                tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
  87                *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
  88        }
  89        for (i = 0; i < kinfo->num_tqps; i++) {
  90                tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
  91                *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
  92        }
  93
  94        return buff;
  95}
  96
  97static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
  98{
  99        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 100
 101        return hdev->num_tqps * 2;
 102}
 103
 104static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
 105{
 106        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 107        u8 *buff = data;
 108        int i = 0;
 109
 110        for (i = 0; i < hdev->num_tqps; i++) {
 111                struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
 112                        struct hclgevf_tqp, q);
 113                snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
 114                         tqp->index);
 115                buff += ETH_GSTRING_LEN;
 116        }
 117
 118        for (i = 0; i < hdev->num_tqps; i++) {
 119                struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
 120                        struct hclgevf_tqp, q);
 121                snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
 122                         tqp->index);
 123                buff += ETH_GSTRING_LEN;
 124        }
 125
 126        return buff;
 127}
 128
 129static void hclgevf_update_stats(struct hnae3_handle *handle,
 130                                 struct net_device_stats *net_stats)
 131{
 132        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 133        int status;
 134
 135        status = hclgevf_tqps_update_stats(handle);
 136        if (status)
 137                dev_err(&hdev->pdev->dev,
 138                        "VF update of TQPS stats fail, status = %d.\n",
 139                        status);
 140}
 141
 142static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
 143{
 144        if (strset == ETH_SS_TEST)
 145                return -EOPNOTSUPP;
 146        else if (strset == ETH_SS_STATS)
 147                return hclgevf_tqps_get_sset_count(handle, strset);
 148
 149        return 0;
 150}
 151
 152static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
 153                                u8 *data)
 154{
 155        u8 *p = (char *)data;
 156
 157        if (strset == ETH_SS_STATS)
 158                p = hclgevf_tqps_get_strings(handle, p);
 159}
 160
 161static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
 162{
 163        hclgevf_tqps_get_stats(handle, data);
 164}
 165
 166static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
 167{
 168        u8 resp_msg;
 169        int status;
 170
 171        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
 172                                      true, &resp_msg, sizeof(u8));
 173        if (status) {
 174                dev_err(&hdev->pdev->dev,
 175                        "VF request to get TC info from PF failed %d",
 176                        status);
 177                return status;
 178        }
 179
 180        hdev->hw_tc_map = resp_msg;
 181
 182        return 0;
 183}
 184
 185static int hclge_get_queue_info(struct hclgevf_dev *hdev)
 186{
 187#define HCLGEVF_TQPS_RSS_INFO_LEN       8
 188        u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
 189        int status;
 190
 191        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
 192                                      true, resp_msg,
 193                                      HCLGEVF_TQPS_RSS_INFO_LEN);
 194        if (status) {
 195                dev_err(&hdev->pdev->dev,
 196                        "VF request to get tqp info from PF failed %d",
 197                        status);
 198                return status;
 199        }
 200
 201        memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
 202        memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
 203        memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
 204        memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
 205
 206        return 0;
 207}
 208
 209static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
 210{
 211        struct hclgevf_tqp *tqp;
 212        int i;
 213
 214        /* if this is on going reset then we need to re-allocate the TPQs
 215         * since we cannot assume we would get same number of TPQs back from PF
 216         */
 217        if (hclgevf_dev_ongoing_reset(hdev))
 218                devm_kfree(&hdev->pdev->dev, hdev->htqp);
 219
 220        hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
 221                                  sizeof(struct hclgevf_tqp), GFP_KERNEL);
 222        if (!hdev->htqp)
 223                return -ENOMEM;
 224
 225        tqp = hdev->htqp;
 226
 227        for (i = 0; i < hdev->num_tqps; i++) {
 228                tqp->dev = &hdev->pdev->dev;
 229                tqp->index = i;
 230
 231                tqp->q.ae_algo = &ae_algovf;
 232                tqp->q.buf_size = hdev->rx_buf_len;
 233                tqp->q.desc_num = hdev->num_desc;
 234                tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
 235                        i * HCLGEVF_TQP_REG_SIZE;
 236
 237                tqp++;
 238        }
 239
 240        return 0;
 241}
 242
 243static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
 244{
 245        struct hnae3_handle *nic = &hdev->nic;
 246        struct hnae3_knic_private_info *kinfo;
 247        u16 new_tqps = hdev->num_tqps;
 248        int i;
 249
 250        kinfo = &nic->kinfo;
 251        kinfo->num_tc = 0;
 252        kinfo->num_desc = hdev->num_desc;
 253        kinfo->rx_buf_len = hdev->rx_buf_len;
 254        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
 255                if (hdev->hw_tc_map & BIT(i))
 256                        kinfo->num_tc++;
 257
 258        kinfo->rss_size
 259                = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
 260        new_tqps = kinfo->rss_size * kinfo->num_tc;
 261        kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
 262
 263        /* if this is on going reset then we need to re-allocate the hnae queues
 264         * as well since number of TPQs from PF might have changed.
 265         */
 266        if (hclgevf_dev_ongoing_reset(hdev))
 267                devm_kfree(&hdev->pdev->dev, kinfo->tqp);
 268
 269        kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
 270                                  sizeof(struct hnae3_queue *), GFP_KERNEL);
 271        if (!kinfo->tqp)
 272                return -ENOMEM;
 273
 274        for (i = 0; i < kinfo->num_tqps; i++) {
 275                hdev->htqp[i].q.handle = &hdev->nic;
 276                hdev->htqp[i].q.tqp_index = i;
 277                kinfo->tqp[i] = &hdev->htqp[i].q;
 278        }
 279
 280        return 0;
 281}
 282
 283static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
 284{
 285        int status;
 286        u8 resp_msg;
 287
 288        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
 289                                      0, false, &resp_msg, sizeof(u8));
 290        if (status)
 291                dev_err(&hdev->pdev->dev,
 292                        "VF failed to fetch link status(%d) from PF", status);
 293}
 294
 295void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
 296{
 297        struct hnae3_handle *handle = &hdev->nic;
 298        struct hnae3_client *client;
 299
 300        client = handle->client;
 301
 302        if (link_state != hdev->hw.mac.link) {
 303                client->ops->link_status_change(handle, !!link_state);
 304                hdev->hw.mac.link = link_state;
 305        }
 306}
 307
 308static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
 309{
 310        struct hnae3_handle *nic = &hdev->nic;
 311        int ret;
 312
 313        nic->ae_algo = &ae_algovf;
 314        nic->pdev = hdev->pdev;
 315        nic->numa_node_mask = hdev->numa_node_mask;
 316        nic->flags |= HNAE3_SUPPORT_VF;
 317
 318        if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
 319                dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
 320                        hdev->ae_dev->dev_type);
 321                return -EINVAL;
 322        }
 323
 324        ret = hclgevf_knic_setup(hdev);
 325        if (ret)
 326                dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
 327                        ret);
 328        return ret;
 329}
 330
 331static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
 332{
 333        if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
 334                dev_warn(&hdev->pdev->dev,
 335                         "vector(vector_id %d) has been freed.\n", vector_id);
 336                return;
 337        }
 338
 339        hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
 340        hdev->num_msi_left += 1;
 341        hdev->num_msi_used -= 1;
 342}
 343
 344static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
 345                              struct hnae3_vector_info *vector_info)
 346{
 347        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 348        struct hnae3_vector_info *vector = vector_info;
 349        int alloc = 0;
 350        int i, j;
 351
 352        vector_num = min(hdev->num_msi_left, vector_num);
 353
 354        for (j = 0; j < vector_num; j++) {
 355                for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
 356                        if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
 357                                vector->vector = pci_irq_vector(hdev->pdev, i);
 358                                vector->io_addr = hdev->hw.io_base +
 359                                        HCLGEVF_VECTOR_REG_BASE +
 360                                        (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
 361                                hdev->vector_status[i] = 0;
 362                                hdev->vector_irq[i] = vector->vector;
 363
 364                                vector++;
 365                                alloc++;
 366
 367                                break;
 368                        }
 369                }
 370        }
 371        hdev->num_msi_left -= alloc;
 372        hdev->num_msi_used += alloc;
 373
 374        return alloc;
 375}
 376
 377static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
 378{
 379        int i;
 380
 381        for (i = 0; i < hdev->num_msi; i++)
 382                if (vector == hdev->vector_irq[i])
 383                        return i;
 384
 385        return -EINVAL;
 386}
 387
 388static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
 389{
 390        return HCLGEVF_RSS_KEY_SIZE;
 391}
 392
 393static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
 394{
 395        return HCLGEVF_RSS_IND_TBL_SIZE;
 396}
 397
 398static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
 399{
 400        const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
 401        struct hclgevf_rss_indirection_table_cmd *req;
 402        struct hclgevf_desc desc;
 403        int status;
 404        int i, j;
 405
 406        req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
 407
 408        for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
 409                hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
 410                                             false);
 411                req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
 412                req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
 413                for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
 414                        req->rss_result[j] =
 415                                indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
 416
 417                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 418                if (status) {
 419                        dev_err(&hdev->pdev->dev,
 420                                "VF failed(=%d) to set RSS indirection table\n",
 421                                status);
 422                        return status;
 423                }
 424        }
 425
 426        return 0;
 427}
 428
 429static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
 430{
 431        struct hclgevf_rss_tc_mode_cmd *req;
 432        u16 tc_offset[HCLGEVF_MAX_TC_NUM];
 433        u16 tc_valid[HCLGEVF_MAX_TC_NUM];
 434        u16 tc_size[HCLGEVF_MAX_TC_NUM];
 435        struct hclgevf_desc desc;
 436        u16 roundup_size;
 437        int status;
 438        int i;
 439
 440        req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
 441
 442        roundup_size = roundup_pow_of_two(rss_size);
 443        roundup_size = ilog2(roundup_size);
 444
 445        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
 446                tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
 447                tc_size[i] = roundup_size;
 448                tc_offset[i] = rss_size * i;
 449        }
 450
 451        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
 452        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
 453                hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
 454                              (tc_valid[i] & 0x1));
 455                hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
 456                                HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
 457                hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
 458                                HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
 459        }
 460        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 461        if (status)
 462                dev_err(&hdev->pdev->dev,
 463                        "VF failed(=%d) to set rss tc mode\n", status);
 464
 465        return status;
 466}
 467
 468static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
 469                                  u8 *key)
 470{
 471        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 472        struct hclgevf_rss_config_cmd *req;
 473        int lkup_times = key ? 3 : 1;
 474        struct hclgevf_desc desc;
 475        int key_offset;
 476        int key_size;
 477        int status;
 478
 479        req = (struct hclgevf_rss_config_cmd *)desc.data;
 480        lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
 481
 482        for (key_offset = 0; key_offset < lkup_times; key_offset++) {
 483                hclgevf_cmd_setup_basic_desc(&desc,
 484                                             HCLGEVF_OPC_RSS_GENERIC_CONFIG,
 485                                             true);
 486                req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
 487
 488                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 489                if (status) {
 490                        dev_err(&hdev->pdev->dev,
 491                                "failed to get hardware RSS cfg, status = %d\n",
 492                                status);
 493                        return status;
 494                }
 495
 496                if (key_offset == 2)
 497                        key_size =
 498                        HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
 499                else
 500                        key_size = HCLGEVF_RSS_HASH_KEY_NUM;
 501
 502                if (key)
 503                        memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
 504                               req->hash_key,
 505                               key_size);
 506        }
 507
 508        if (hash) {
 509                if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
 510                        *hash = ETH_RSS_HASH_TOP;
 511                else
 512                        *hash = ETH_RSS_HASH_UNKNOWN;
 513        }
 514
 515        return 0;
 516}
 517
 518static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
 519                           u8 *hfunc)
 520{
 521        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 522        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 523        int i;
 524
 525        if (indir)
 526                for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
 527                        indir[i] = rss_cfg->rss_indirection_tbl[i];
 528
 529        return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
 530}
 531
 532static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
 533                           const  u8 *key, const  u8 hfunc)
 534{
 535        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 536        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 537        int i;
 538
 539        /* update the shadow RSS table with user specified qids */
 540        for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
 541                rss_cfg->rss_indirection_tbl[i] = indir[i];
 542
 543        /* update the hardware */
 544        return hclgevf_set_rss_indir_table(hdev);
 545}
 546
 547static int hclgevf_get_tc_size(struct hnae3_handle *handle)
 548{
 549        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 550        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 551
 552        return rss_cfg->rss_size;
 553}
 554
 555static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
 556                                       int vector_id,
 557                                       struct hnae3_ring_chain_node *ring_chain)
 558{
 559        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 560        struct hnae3_ring_chain_node *node;
 561        struct hclge_mbx_vf_to_pf_cmd *req;
 562        struct hclgevf_desc desc;
 563        int i = 0;
 564        int status;
 565        u8 type;
 566
 567        req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
 568
 569        for (node = ring_chain; node; node = node->next) {
 570                int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
 571                                        HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
 572
 573                if (i == 0) {
 574                        hclgevf_cmd_setup_basic_desc(&desc,
 575                                                     HCLGEVF_OPC_MBX_VF_TO_PF,
 576                                                     false);
 577                        type = en ?
 578                                HCLGE_MBX_MAP_RING_TO_VECTOR :
 579                                HCLGE_MBX_UNMAP_RING_TO_VECTOR;
 580                        req->msg[0] = type;
 581                        req->msg[1] = vector_id;
 582                }
 583
 584                req->msg[idx_offset] =
 585                                hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
 586                req->msg[idx_offset + 1] = node->tqp_index;
 587                req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
 588                                                           HNAE3_RING_GL_IDX_M,
 589                                                           HNAE3_RING_GL_IDX_S);
 590
 591                i++;
 592                if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
 593                     HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
 594                     HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
 595                    !node->next) {
 596                        req->msg[2] = i;
 597
 598                        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 599                        if (status) {
 600                                dev_err(&hdev->pdev->dev,
 601                                        "Map TQP fail, status is %d.\n",
 602                                        status);
 603                                return status;
 604                        }
 605                        i = 0;
 606                        hclgevf_cmd_setup_basic_desc(&desc,
 607                                                     HCLGEVF_OPC_MBX_VF_TO_PF,
 608                                                     false);
 609                        req->msg[0] = type;
 610                        req->msg[1] = vector_id;
 611                }
 612        }
 613
 614        return 0;
 615}
 616
 617static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
 618                                      struct hnae3_ring_chain_node *ring_chain)
 619{
 620        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 621        int vector_id;
 622
 623        vector_id = hclgevf_get_vector_index(hdev, vector);
 624        if (vector_id < 0) {
 625                dev_err(&handle->pdev->dev,
 626                        "Get vector index fail. ret =%d\n", vector_id);
 627                return vector_id;
 628        }
 629
 630        return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
 631}
 632
 633static int hclgevf_unmap_ring_from_vector(
 634                                struct hnae3_handle *handle,
 635                                int vector,
 636                                struct hnae3_ring_chain_node *ring_chain)
 637{
 638        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 639        int ret, vector_id;
 640
 641        vector_id = hclgevf_get_vector_index(hdev, vector);
 642        if (vector_id < 0) {
 643                dev_err(&handle->pdev->dev,
 644                        "Get vector index fail. ret =%d\n", vector_id);
 645                return vector_id;
 646        }
 647
 648        ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
 649        if (ret)
 650                dev_err(&handle->pdev->dev,
 651                        "Unmap ring from vector fail. vector=%d, ret =%d\n",
 652                        vector_id,
 653                        ret);
 654
 655        return ret;
 656}
 657
 658static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
 659{
 660        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 661        int vector_id;
 662
 663        vector_id = hclgevf_get_vector_index(hdev, vector);
 664        if (vector_id < 0) {
 665                dev_err(&handle->pdev->dev,
 666                        "hclgevf_put_vector get vector index fail. ret =%d\n",
 667                        vector_id);
 668                return vector_id;
 669        }
 670
 671        hclgevf_free_vector(hdev, vector_id);
 672
 673        return 0;
 674}
 675
 676static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
 677                                        bool en_uc_pmc, bool en_mc_pmc)
 678{
 679        struct hclge_mbx_vf_to_pf_cmd *req;
 680        struct hclgevf_desc desc;
 681        int status;
 682
 683        req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
 684
 685        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
 686        req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
 687        req->msg[1] = en_uc_pmc ? 1 : 0;
 688        req->msg[2] = en_mc_pmc ? 1 : 0;
 689
 690        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 691        if (status)
 692                dev_err(&hdev->pdev->dev,
 693                        "Set promisc mode fail, status is %d.\n", status);
 694
 695        return status;
 696}
 697
 698static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
 699                                     bool en_uc_pmc, bool en_mc_pmc)
 700{
 701        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 702
 703        hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
 704}
 705
 706static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
 707                              int stream_id, bool enable)
 708{
 709        struct hclgevf_cfg_com_tqp_queue_cmd *req;
 710        struct hclgevf_desc desc;
 711        int status;
 712
 713        req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
 714
 715        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
 716                                     false);
 717        req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
 718        req->stream_id = cpu_to_le16(stream_id);
 719        req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
 720
 721        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 722        if (status)
 723                dev_err(&hdev->pdev->dev,
 724                        "TQP enable fail, status =%d.\n", status);
 725
 726        return status;
 727}
 728
 729static int hclgevf_get_queue_id(struct hnae3_queue *queue)
 730{
 731        struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
 732
 733        return tqp->index;
 734}
 735
 736static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
 737{
 738        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 739        struct hnae3_queue *queue;
 740        struct hclgevf_tqp *tqp;
 741        int i;
 742
 743        for (i = 0; i < hdev->num_tqps; i++) {
 744                queue = handle->kinfo.tqp[i];
 745                tqp = container_of(queue, struct hclgevf_tqp, q);
 746                memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
 747        }
 748}
 749
 750static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
 751{
 752        u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
 753        int ret;
 754
 755        ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
 756                                   HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
 757                                   NULL, 0, true, &resp_msg, sizeof(u8));
 758
 759        if (ret) {
 760                dev_err(&hdev->pdev->dev,
 761                        "Read mta type fail, ret=%d.\n", ret);
 762                return ret;
 763        }
 764
 765        if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
 766                dev_err(&hdev->pdev->dev,
 767                        "Read mta type invalid, resp=%d.\n", resp_msg);
 768                return -EINVAL;
 769        }
 770
 771        hdev->mta_mac_sel_type = resp_msg;
 772
 773        return 0;
 774}
 775
 776static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
 777                                             const u8 *addr)
 778{
 779        u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
 780        u16 high_val = addr[1] | (addr[0] << 8);
 781
 782        return (high_val >> rsh) & 0xfff;
 783}
 784
 785static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
 786                                        unsigned long *status)
 787{
 788#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
 789#define HCLGEVF_MTA_STATUS_MSG_BITS \
 790                        (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
 791#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
 792                        (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
 793        u16 tbl_cnt;
 794        u16 tbl_idx;
 795        u8 msg_cnt;
 796        u8 msg_idx;
 797        int ret;
 798
 799        msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
 800                               HCLGEVF_MTA_STATUS_MSG_BITS);
 801        tbl_idx = 0;
 802        msg_idx = 0;
 803        while (msg_cnt--) {
 804                u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
 805                u8 *p = &msg[1];
 806                u8 msg_ofs;
 807                u8 msg_bit;
 808
 809                memset(msg, 0, sizeof(msg));
 810
 811                /* set index field */
 812                msg[0] = 0x7F & msg_idx;
 813
 814                /* set end flag field */
 815                if (msg_cnt == 0) {
 816                        msg[0] |= 0x80;
 817                        tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
 818                } else {
 819                        tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
 820                }
 821
 822                /* set status field */
 823                msg_ofs = 0;
 824                msg_bit = 0;
 825                while (tbl_cnt--) {
 826                        if (test_bit(tbl_idx, status))
 827                                p[msg_ofs] |= BIT(msg_bit);
 828
 829                        tbl_idx++;
 830
 831                        msg_bit++;
 832                        if (msg_bit == BITS_PER_BYTE) {
 833                                msg_bit = 0;
 834                                msg_ofs++;
 835                        }
 836                }
 837
 838                ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
 839                                           HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
 840                                           msg, sizeof(msg), false, NULL, 0);
 841                if (ret)
 842                        break;
 843
 844                msg_idx++;
 845        }
 846
 847        return ret;
 848}
 849
 850static int hclgevf_update_mta_status(struct hnae3_handle *handle)
 851{
 852        unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
 853        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 854        struct net_device *netdev = hdev->nic.kinfo.netdev;
 855        struct netdev_hw_addr *ha;
 856        u16 tbl_idx;
 857
 858        /* clear status */
 859        memset(mta_status, 0, sizeof(mta_status));
 860
 861        /* update status from mc addr list */
 862        netdev_for_each_mc_addr(ha, netdev) {
 863                tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
 864                set_bit(tbl_idx, mta_status);
 865        }
 866
 867        return hclgevf_do_update_mta_status(hdev, mta_status);
 868}
 869
 870static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
 871{
 872        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 873
 874        ether_addr_copy(p, hdev->hw.mac.mac_addr);
 875}
 876
 877static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
 878                                bool is_first)
 879{
 880        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 881        u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
 882        u8 *new_mac_addr = (u8 *)p;
 883        u8 msg_data[ETH_ALEN * 2];
 884        u16 subcode;
 885        int status;
 886
 887        ether_addr_copy(msg_data, new_mac_addr);
 888        ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
 889
 890        subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
 891                        HCLGE_MBX_MAC_VLAN_UC_MODIFY;
 892
 893        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
 894                                      subcode, msg_data, ETH_ALEN * 2,
 895                                      true, NULL, 0);
 896        if (!status)
 897                ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
 898
 899        return status;
 900}
 901
 902static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
 903                               const unsigned char *addr)
 904{
 905        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 906
 907        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
 908                                    HCLGE_MBX_MAC_VLAN_UC_ADD,
 909                                    addr, ETH_ALEN, false, NULL, 0);
 910}
 911
 912static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
 913                              const unsigned char *addr)
 914{
 915        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 916
 917        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
 918                                    HCLGE_MBX_MAC_VLAN_UC_REMOVE,
 919                                    addr, ETH_ALEN, false, NULL, 0);
 920}
 921
 922static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
 923                               const unsigned char *addr)
 924{
 925        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 926
 927        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
 928                                    HCLGE_MBX_MAC_VLAN_MC_ADD,
 929                                    addr, ETH_ALEN, false, NULL, 0);
 930}
 931
 932static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
 933                              const unsigned char *addr)
 934{
 935        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 936
 937        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
 938                                    HCLGE_MBX_MAC_VLAN_MC_REMOVE,
 939                                    addr, ETH_ALEN, false, NULL, 0);
 940}
 941
 942static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
 943                                   __be16 proto, u16 vlan_id,
 944                                   bool is_kill)
 945{
 946#define HCLGEVF_VLAN_MBX_MSG_LEN 5
 947        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 948        u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
 949
 950        if (vlan_id > 4095)
 951                return -EINVAL;
 952
 953        if (proto != htons(ETH_P_8021Q))
 954                return -EPROTONOSUPPORT;
 955
 956        msg_data[0] = is_kill;
 957        memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
 958        memcpy(&msg_data[3], &proto, sizeof(proto));
 959        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
 960                                    HCLGE_MBX_VLAN_FILTER, msg_data,
 961                                    HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
 962}
 963
 964static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
 965{
 966        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 967        u8 msg_data;
 968
 969        msg_data = enable ? 1 : 0;
 970        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
 971                                    HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
 972                                    1, false, NULL, 0);
 973}
 974
 975static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
 976{
 977        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 978        u8 msg_data[2];
 979        int ret;
 980
 981        memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
 982
 983        /* disable vf queue before send queue reset msg to PF */
 984        ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
 985        if (ret)
 986                return;
 987
 988        hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
 989                             2, true, NULL, 0);
 990}
 991
 992static int hclgevf_notify_client(struct hclgevf_dev *hdev,
 993                                 enum hnae3_reset_notify_type type)
 994{
 995        struct hnae3_client *client = hdev->nic_client;
 996        struct hnae3_handle *handle = &hdev->nic;
 997
 998        if (!client->ops->reset_notify)
 999                return -EOPNOTSUPP;
1000
1001        return client->ops->reset_notify(handle, type);
1002}
1003
1004static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1005{
1006#define HCLGEVF_RESET_WAIT_MS   500
1007#define HCLGEVF_RESET_WAIT_CNT  20
1008        u32 val, cnt = 0;
1009
1010        /* wait to check the hardware reset completion status */
1011        val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
1012        while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
1013               (cnt < HCLGEVF_RESET_WAIT_CNT)) {
1014                msleep(HCLGEVF_RESET_WAIT_MS);
1015                val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
1016                cnt++;
1017        }
1018
1019        /* hardware completion status should be available by this time */
1020        if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
1021                dev_warn(&hdev->pdev->dev,
1022                         "could'nt get reset done status from h/w, timeout!\n");
1023                return -EBUSY;
1024        }
1025
1026        /* we will wait a bit more to let reset of the stack to complete. This
1027         * might happen in case reset assertion was made by PF. Yes, this also
1028         * means we might end up waiting bit more even for VF reset.
1029         */
1030        msleep(5000);
1031
1032        return 0;
1033}
1034
1035static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1036{
1037        int ret;
1038
1039        /* uninitialize the nic client */
1040        hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1041
1042        /* re-initialize the hclge device */
1043        ret = hclgevf_init_hdev(hdev);
1044        if (ret) {
1045                dev_err(&hdev->pdev->dev,
1046                        "hclge device re-init failed, VF is disabled!\n");
1047                return ret;
1048        }
1049
1050        /* bring up the nic client again */
1051        hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1052
1053        return 0;
1054}
1055
1056static int hclgevf_reset(struct hclgevf_dev *hdev)
1057{
1058        int ret;
1059
1060        rtnl_lock();
1061
1062        /* bring down the nic to stop any ongoing TX/RX */
1063        hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1064
1065        /* check if VF could successfully fetch the hardware reset completion
1066         * status from the hardware
1067         */
1068        ret = hclgevf_reset_wait(hdev);
1069        if (ret) {
1070                /* can't do much in this situation, will disable VF */
1071                dev_err(&hdev->pdev->dev,
1072                        "VF failed(=%d) to fetch H/W reset completion status\n",
1073                        ret);
1074
1075                dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
1076                hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1077
1078                rtnl_unlock();
1079                return ret;
1080        }
1081
1082        /* now, re-initialize the nic client and ae device*/
1083        ret = hclgevf_reset_stack(hdev);
1084        if (ret)
1085                dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1086
1087        /* bring up the nic to enable TX/RX again */
1088        hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1089
1090        rtnl_unlock();
1091
1092        return ret;
1093}
1094
1095static int hclgevf_do_reset(struct hclgevf_dev *hdev)
1096{
1097        int status;
1098        u8 respmsg;
1099
1100        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1101                                      0, false, &respmsg, sizeof(u8));
1102        if (status)
1103                dev_err(&hdev->pdev->dev,
1104                        "VF reset request to PF failed(=%d)\n", status);
1105
1106        return status;
1107}
1108
1109static void hclgevf_reset_event(struct hnae3_handle *handle)
1110{
1111        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1112
1113        dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1114
1115        handle->reset_level = HNAE3_VF_RESET;
1116
1117        /* reset of this VF requested */
1118        set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1119        hclgevf_reset_task_schedule(hdev);
1120
1121        handle->last_reset_time = jiffies;
1122}
1123
1124static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1125{
1126        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1127
1128        return hdev->fw_version;
1129}
1130
1131static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1132{
1133        struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1134
1135        vector->vector_irq = pci_irq_vector(hdev->pdev,
1136                                            HCLGEVF_MISC_VECTOR_NUM);
1137        vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1138        /* vector status always valid for Vector 0 */
1139        hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1140        hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1141
1142        hdev->num_msi_left -= 1;
1143        hdev->num_msi_used += 1;
1144}
1145
1146void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1147{
1148        if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
1149            !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
1150                set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1151                schedule_work(&hdev->rst_service_task);
1152        }
1153}
1154
1155void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1156{
1157        if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1158            !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1159                set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1160                schedule_work(&hdev->mbx_service_task);
1161        }
1162}
1163
1164static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1165{
1166        if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state)  &&
1167            !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1168                schedule_work(&hdev->service_task);
1169}
1170
1171static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1172{
1173        /* if we have any pending mailbox event then schedule the mbx task */
1174        if (hdev->mbx_event_pending)
1175                hclgevf_mbx_task_schedule(hdev);
1176
1177        if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1178                hclgevf_reset_task_schedule(hdev);
1179}
1180
1181static void hclgevf_service_timer(struct timer_list *t)
1182{
1183        struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1184
1185        mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1186
1187        hclgevf_task_schedule(hdev);
1188}
1189
1190static void hclgevf_reset_service_task(struct work_struct *work)
1191{
1192        struct hclgevf_dev *hdev =
1193                container_of(work, struct hclgevf_dev, rst_service_task);
1194        int ret;
1195
1196        if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1197                return;
1198
1199        clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1200
1201        if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1202                               &hdev->reset_state)) {
1203                /* PF has initmated that it is about to reset the hardware.
1204                 * We now have to poll & check if harware has actually completed
1205                 * the reset sequence. On hardware reset completion, VF needs to
1206                 * reset the client and ae device.
1207                 */
1208                hdev->reset_attempts = 0;
1209
1210                ret = hclgevf_reset(hdev);
1211                if (ret)
1212                        dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
1213        } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1214                                      &hdev->reset_state)) {
1215                /* we could be here when either of below happens:
1216                 * 1. reset was initiated due to watchdog timeout due to
1217                 *    a. IMP was earlier reset and our TX got choked down and
1218                 *       which resulted in watchdog reacting and inducing VF
1219                 *       reset. This also means our cmdq would be unreliable.
1220                 *    b. problem in TX due to other lower layer(example link
1221                 *       layer not functioning properly etc.)
1222                 * 2. VF reset might have been initiated due to some config
1223                 *    change.
1224                 *
1225                 * NOTE: Theres no clear way to detect above cases than to react
1226                 * to the response of PF for this reset request. PF will ack the
1227                 * 1b and 2. cases but we will not get any intimation about 1a
1228                 * from PF as cmdq would be in unreliable state i.e. mailbox
1229                 * communication between PF and VF would be broken.
1230                 */
1231
1232                /* if we are never geting into pending state it means either:
1233                 * 1. PF is not receiving our request which could be due to IMP
1234                 *    reset
1235                 * 2. PF is screwed
1236                 * We cannot do much for 2. but to check first we can try reset
1237                 * our PCIe + stack and see if it alleviates the problem.
1238                 */
1239                if (hdev->reset_attempts > 3) {
1240                        /* prepare for full reset of stack + pcie interface */
1241                        hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
1242
1243                        /* "defer" schedule the reset task again */
1244                        set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1245                } else {
1246                        hdev->reset_attempts++;
1247
1248                        /* request PF for resetting this VF via mailbox */
1249                        ret = hclgevf_do_reset(hdev);
1250                        if (ret)
1251                                dev_warn(&hdev->pdev->dev,
1252                                         "VF rst fail, stack will call\n");
1253                }
1254        }
1255
1256        clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1257}
1258
1259static void hclgevf_mailbox_service_task(struct work_struct *work)
1260{
1261        struct hclgevf_dev *hdev;
1262
1263        hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
1264
1265        if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1266                return;
1267
1268        clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1269
1270        hclgevf_mbx_async_handler(hdev);
1271
1272        clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1273}
1274
1275static void hclgevf_service_task(struct work_struct *work)
1276{
1277        struct hclgevf_dev *hdev;
1278
1279        hdev = container_of(work, struct hclgevf_dev, service_task);
1280
1281        /* request the link status from the PF. PF would be able to tell VF
1282         * about such updates in future so we might remove this later
1283         */
1284        hclgevf_request_link_info(hdev);
1285
1286        hclgevf_deferred_task_schedule(hdev);
1287
1288        clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1289}
1290
1291static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1292{
1293        hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
1294}
1295
1296static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
1297{
1298        u32 cmdq_src_reg;
1299
1300        /* fetch the events from their corresponding regs */
1301        cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1302                                        HCLGEVF_VECTOR0_CMDQ_SRC_REG);
1303
1304        /* check for vector0 mailbox(=CMDQ RX) event source */
1305        if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1306                cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1307                *clearval = cmdq_src_reg;
1308                return true;
1309        }
1310
1311        dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
1312
1313        return false;
1314}
1315
1316static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1317{
1318        writel(en ? 1 : 0, vector->addr);
1319}
1320
1321static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1322{
1323        struct hclgevf_dev *hdev = data;
1324        u32 clearval;
1325
1326        hclgevf_enable_vector(&hdev->misc_vector, false);
1327        if (!hclgevf_check_event_cause(hdev, &clearval))
1328                goto skip_sched;
1329
1330        hclgevf_mbx_handler(hdev);
1331
1332        hclgevf_clear_event_cause(hdev, clearval);
1333
1334skip_sched:
1335        hclgevf_enable_vector(&hdev->misc_vector, true);
1336
1337        return IRQ_HANDLED;
1338}
1339
1340static int hclgevf_configure(struct hclgevf_dev *hdev)
1341{
1342        int ret;
1343
1344        /* get queue configuration from PF */
1345        ret = hclge_get_queue_info(hdev);
1346        if (ret)
1347                return ret;
1348        /* get tc configuration from PF */
1349        return hclgevf_get_tc_info(hdev);
1350}
1351
1352static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
1353{
1354        struct pci_dev *pdev = ae_dev->pdev;
1355        struct hclgevf_dev *hdev = ae_dev->priv;
1356
1357        hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1358        if (!hdev)
1359                return -ENOMEM;
1360
1361        hdev->pdev = pdev;
1362        hdev->ae_dev = ae_dev;
1363        ae_dev->priv = hdev;
1364
1365        return 0;
1366}
1367
1368static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
1369{
1370        struct hnae3_handle *roce = &hdev->roce;
1371        struct hnae3_handle *nic = &hdev->nic;
1372
1373        roce->rinfo.num_vectors = hdev->num_roce_msix;
1374
1375        if (hdev->num_msi_left < roce->rinfo.num_vectors ||
1376            hdev->num_msi_left == 0)
1377                return -EINVAL;
1378
1379        roce->rinfo.base_vector = hdev->roce_base_vector;
1380
1381        roce->rinfo.netdev = nic->kinfo.netdev;
1382        roce->rinfo.roce_io_base = hdev->hw.io_base;
1383
1384        roce->pdev = nic->pdev;
1385        roce->ae_algo = nic->ae_algo;
1386        roce->numa_node_mask = nic->numa_node_mask;
1387
1388        return 0;
1389}
1390
1391static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
1392{
1393        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1394        int i, ret;
1395
1396        rss_cfg->rss_size = hdev->rss_size_max;
1397
1398        /* Initialize RSS indirect table for each vport */
1399        for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1400                rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1401
1402        ret = hclgevf_set_rss_indir_table(hdev);
1403        if (ret)
1404                return ret;
1405
1406        return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1407}
1408
1409static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1410{
1411        /* other vlan config(like, VLAN TX/RX offload) would also be added
1412         * here later
1413         */
1414        return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1415                                       false);
1416}
1417
1418static int hclgevf_ae_start(struct hnae3_handle *handle)
1419{
1420        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1421        int i, queue_id;
1422
1423        for (i = 0; i < handle->kinfo.num_tqps; i++) {
1424                /* ring enable */
1425                queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1426                if (queue_id < 0) {
1427                        dev_warn(&hdev->pdev->dev,
1428                                 "Get invalid queue id, ignore it\n");
1429                        continue;
1430                }
1431
1432                hclgevf_tqp_enable(hdev, queue_id, 0, true);
1433        }
1434
1435        /* reset tqp stats */
1436        hclgevf_reset_tqp_stats(handle);
1437
1438        hclgevf_request_link_info(hdev);
1439
1440        clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1441        mod_timer(&hdev->service_timer, jiffies + HZ);
1442
1443        return 0;
1444}
1445
1446static void hclgevf_ae_stop(struct hnae3_handle *handle)
1447{
1448        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1449        int i, queue_id;
1450
1451        for (i = 0; i < hdev->num_tqps; i++) {
1452                /* Ring disable */
1453                queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1454                if (queue_id < 0) {
1455                        dev_warn(&hdev->pdev->dev,
1456                                 "Get invalid queue id, ignore it\n");
1457                        continue;
1458                }
1459
1460                hclgevf_tqp_enable(hdev, queue_id, 0, false);
1461        }
1462
1463        /* reset tqp stats */
1464        hclgevf_reset_tqp_stats(handle);
1465        del_timer_sync(&hdev->service_timer);
1466        cancel_work_sync(&hdev->service_task);
1467        clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1468        hclgevf_update_link_status(hdev, 0);
1469}
1470
1471static void hclgevf_state_init(struct hclgevf_dev *hdev)
1472{
1473        /* if this is on going reset then skip this initialization */
1474        if (hclgevf_dev_ongoing_reset(hdev))
1475                return;
1476
1477        /* setup tasks for the MBX */
1478        INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1479        clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1480        clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1481
1482        /* setup tasks for service timer */
1483        timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1484
1485        INIT_WORK(&hdev->service_task, hclgevf_service_task);
1486        clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1487
1488        INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
1489
1490        mutex_init(&hdev->mbx_resp.mbx_mutex);
1491
1492        /* bring the device down */
1493        set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1494}
1495
1496static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1497{
1498        set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1499
1500        if (hdev->service_timer.function)
1501                del_timer_sync(&hdev->service_timer);
1502        if (hdev->service_task.func)
1503                cancel_work_sync(&hdev->service_task);
1504        if (hdev->mbx_service_task.func)
1505                cancel_work_sync(&hdev->mbx_service_task);
1506        if (hdev->rst_service_task.func)
1507                cancel_work_sync(&hdev->rst_service_task);
1508
1509        mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1510}
1511
1512static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1513{
1514        struct pci_dev *pdev = hdev->pdev;
1515        int vectors;
1516        int i;
1517
1518        /* if this is on going reset then skip this initialization */
1519        if (hclgevf_dev_ongoing_reset(hdev))
1520                return 0;
1521
1522        if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
1523                vectors = pci_alloc_irq_vectors(pdev,
1524                                                hdev->roce_base_msix_offset + 1,
1525                                                hdev->num_msi,
1526                                                PCI_IRQ_MSIX);
1527        else
1528                vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1529                                                PCI_IRQ_MSI | PCI_IRQ_MSIX);
1530
1531        if (vectors < 0) {
1532                dev_err(&pdev->dev,
1533                        "failed(%d) to allocate MSI/MSI-X vectors\n",
1534                        vectors);
1535                return vectors;
1536        }
1537        if (vectors < hdev->num_msi)
1538                dev_warn(&hdev->pdev->dev,
1539                         "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1540                         hdev->num_msi, vectors);
1541
1542        hdev->num_msi = vectors;
1543        hdev->num_msi_left = vectors;
1544        hdev->base_msi_vector = pdev->irq;
1545        hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
1546
1547        hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1548                                           sizeof(u16), GFP_KERNEL);
1549        if (!hdev->vector_status) {
1550                pci_free_irq_vectors(pdev);
1551                return -ENOMEM;
1552        }
1553
1554        for (i = 0; i < hdev->num_msi; i++)
1555                hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1556
1557        hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1558                                        sizeof(int), GFP_KERNEL);
1559        if (!hdev->vector_irq) {
1560                pci_free_irq_vectors(pdev);
1561                return -ENOMEM;
1562        }
1563
1564        return 0;
1565}
1566
1567static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1568{
1569        struct pci_dev *pdev = hdev->pdev;
1570
1571        pci_free_irq_vectors(pdev);
1572}
1573
1574static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1575{
1576        int ret = 0;
1577
1578        /* if this is on going reset then skip this initialization */
1579        if (hclgevf_dev_ongoing_reset(hdev))
1580                return 0;
1581
1582        hclgevf_get_misc_vector(hdev);
1583
1584        ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1585                          0, "hclgevf_cmd", hdev);
1586        if (ret) {
1587                dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1588                        hdev->misc_vector.vector_irq);
1589                return ret;
1590        }
1591
1592        hclgevf_clear_event_cause(hdev, 0);
1593
1594        /* enable misc. vector(vector 0) */
1595        hclgevf_enable_vector(&hdev->misc_vector, true);
1596
1597        return ret;
1598}
1599
1600static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1601{
1602        /* disable misc vector(vector 0) */
1603        hclgevf_enable_vector(&hdev->misc_vector, false);
1604        synchronize_irq(hdev->misc_vector.vector_irq);
1605        free_irq(hdev->misc_vector.vector_irq, hdev);
1606        hclgevf_free_vector(hdev, 0);
1607}
1608
1609static int hclgevf_init_client_instance(struct hnae3_client *client,
1610                                        struct hnae3_ae_dev *ae_dev)
1611{
1612        struct hclgevf_dev *hdev = ae_dev->priv;
1613        int ret;
1614
1615        switch (client->type) {
1616        case HNAE3_CLIENT_KNIC:
1617                hdev->nic_client = client;
1618                hdev->nic.client = client;
1619
1620                ret = client->ops->init_instance(&hdev->nic);
1621                if (ret)
1622                        return ret;
1623
1624                if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1625                        struct hnae3_client *rc = hdev->roce_client;
1626
1627                        ret = hclgevf_init_roce_base_info(hdev);
1628                        if (ret)
1629                                return ret;
1630                        ret = rc->ops->init_instance(&hdev->roce);
1631                        if (ret)
1632                                return ret;
1633                }
1634                break;
1635        case HNAE3_CLIENT_UNIC:
1636                hdev->nic_client = client;
1637                hdev->nic.client = client;
1638
1639                ret = client->ops->init_instance(&hdev->nic);
1640                if (ret)
1641                        return ret;
1642                break;
1643        case HNAE3_CLIENT_ROCE:
1644                if (hnae3_dev_roce_supported(hdev)) {
1645                        hdev->roce_client = client;
1646                        hdev->roce.client = client;
1647                }
1648
1649                if (hdev->roce_client && hdev->nic_client) {
1650                        ret = hclgevf_init_roce_base_info(hdev);
1651                        if (ret)
1652                                return ret;
1653
1654                        ret = client->ops->init_instance(&hdev->roce);
1655                        if (ret)
1656                                return ret;
1657                }
1658        }
1659
1660        return 0;
1661}
1662
1663static void hclgevf_uninit_client_instance(struct hnae3_client *client,
1664                                           struct hnae3_ae_dev *ae_dev)
1665{
1666        struct hclgevf_dev *hdev = ae_dev->priv;
1667
1668        /* un-init roce, if it exists */
1669        if (hdev->roce_client)
1670                hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1671
1672        /* un-init nic/unic, if this was not called by roce client */
1673        if ((client->ops->uninit_instance) &&
1674            (client->type != HNAE3_CLIENT_ROCE))
1675                client->ops->uninit_instance(&hdev->nic, 0);
1676}
1677
1678static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1679{
1680        struct pci_dev *pdev = hdev->pdev;
1681        struct hclgevf_hw *hw;
1682        int ret;
1683
1684        /* check if we need to skip initialization of pci. This will happen if
1685         * device is undergoing VF reset. Otherwise, we would need to
1686         * re-initialize pci interface again i.e. when device is not going
1687         * through *any* reset or actually undergoing full reset.
1688         */
1689        if (hclgevf_dev_ongoing_reset(hdev))
1690                return 0;
1691
1692        ret = pci_enable_device(pdev);
1693        if (ret) {
1694                dev_err(&pdev->dev, "failed to enable PCI device\n");
1695                return ret;
1696        }
1697
1698        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1699        if (ret) {
1700                dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
1701                goto err_disable_device;
1702        }
1703
1704        ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
1705        if (ret) {
1706                dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
1707                goto err_disable_device;
1708        }
1709
1710        pci_set_master(pdev);
1711        hw = &hdev->hw;
1712        hw->hdev = hdev;
1713        hw->io_base = pci_iomap(pdev, 2, 0);
1714        if (!hw->io_base) {
1715                dev_err(&pdev->dev, "can't map configuration register space\n");
1716                ret = -ENOMEM;
1717                goto err_clr_master;
1718        }
1719
1720        return 0;
1721
1722err_clr_master:
1723        pci_clear_master(pdev);
1724        pci_release_regions(pdev);
1725err_disable_device:
1726        pci_disable_device(pdev);
1727
1728        return ret;
1729}
1730
1731static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
1732{
1733        struct pci_dev *pdev = hdev->pdev;
1734
1735        pci_iounmap(pdev, hdev->hw.io_base);
1736        pci_clear_master(pdev);
1737        pci_release_regions(pdev);
1738        pci_disable_device(pdev);
1739}
1740
1741static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
1742{
1743        struct hclgevf_query_res_cmd *req;
1744        struct hclgevf_desc desc;
1745        int ret;
1746
1747        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
1748        ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1749        if (ret) {
1750                dev_err(&hdev->pdev->dev,
1751                        "query vf resource failed, ret = %d.\n", ret);
1752                return ret;
1753        }
1754
1755        req = (struct hclgevf_query_res_cmd *)desc.data;
1756
1757        if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
1758                hdev->roce_base_msix_offset =
1759                hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
1760                                HCLGEVF_MSIX_OFT_ROCEE_M,
1761                                HCLGEVF_MSIX_OFT_ROCEE_S);
1762                hdev->num_roce_msix =
1763                hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
1764                                HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
1765
1766                /* VF should have NIC vectors and Roce vectors, NIC vectors
1767                 * are queued before Roce vectors. The offset is fixed to 64.
1768                 */
1769                hdev->num_msi = hdev->num_roce_msix +
1770                                hdev->roce_base_msix_offset;
1771        } else {
1772                hdev->num_msi =
1773                hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
1774                                HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
1775        }
1776
1777        return 0;
1778}
1779
1780static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
1781{
1782        struct pci_dev *pdev = hdev->pdev;
1783        int ret;
1784
1785        /* check if device is on-going full reset(i.e. pcie as well) */
1786        if (hclgevf_dev_ongoing_full_reset(hdev)) {
1787                dev_warn(&pdev->dev, "device is going full reset\n");
1788                hclgevf_uninit_hdev(hdev);
1789        }
1790
1791        ret = hclgevf_pci_init(hdev);
1792        if (ret) {
1793                dev_err(&pdev->dev, "PCI initialization failed\n");
1794                return ret;
1795        }
1796
1797        ret = hclgevf_cmd_init(hdev);
1798        if (ret)
1799                goto err_cmd_init;
1800
1801        /* Get vf resource */
1802        ret = hclgevf_query_vf_resource(hdev);
1803        if (ret) {
1804                dev_err(&hdev->pdev->dev,
1805                        "Query vf status error, ret = %d.\n", ret);
1806                goto err_query_vf;
1807        }
1808
1809        ret = hclgevf_init_msi(hdev);
1810        if (ret) {
1811                dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1812                goto err_query_vf;
1813        }
1814
1815        hclgevf_state_init(hdev);
1816
1817        ret = hclgevf_misc_irq_init(hdev);
1818        if (ret) {
1819                dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1820                        ret);
1821                goto err_misc_irq_init;
1822        }
1823
1824        ret = hclgevf_configure(hdev);
1825        if (ret) {
1826                dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
1827                goto err_config;
1828        }
1829
1830        ret = hclgevf_alloc_tqps(hdev);
1831        if (ret) {
1832                dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
1833                goto err_config;
1834        }
1835
1836        ret = hclgevf_set_handle_info(hdev);
1837        if (ret) {
1838                dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
1839                goto err_config;
1840        }
1841
1842        /* Initialize mta type for this VF */
1843        ret = hclgevf_cfg_func_mta_type(hdev);
1844        if (ret) {
1845                dev_err(&hdev->pdev->dev,
1846                        "failed(%d) to initialize MTA type\n", ret);
1847                goto err_config;
1848        }
1849
1850        /* Initialize RSS for this VF */
1851        ret = hclgevf_rss_init_hw(hdev);
1852        if (ret) {
1853                dev_err(&hdev->pdev->dev,
1854                        "failed(%d) to initialize RSS\n", ret);
1855                goto err_config;
1856        }
1857
1858        ret = hclgevf_init_vlan_config(hdev);
1859        if (ret) {
1860                dev_err(&hdev->pdev->dev,
1861                        "failed(%d) to initialize VLAN config\n", ret);
1862                goto err_config;
1863        }
1864
1865        pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
1866
1867        return 0;
1868
1869err_config:
1870        hclgevf_misc_irq_uninit(hdev);
1871err_misc_irq_init:
1872        hclgevf_state_uninit(hdev);
1873        hclgevf_uninit_msi(hdev);
1874err_query_vf:
1875        hclgevf_cmd_uninit(hdev);
1876err_cmd_init:
1877        hclgevf_pci_uninit(hdev);
1878        return ret;
1879}
1880
1881static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
1882{
1883        hclgevf_state_uninit(hdev);
1884        hclgevf_misc_irq_uninit(hdev);
1885        hclgevf_cmd_uninit(hdev);
1886        hclgevf_uninit_msi(hdev);
1887        hclgevf_pci_uninit(hdev);
1888}
1889
1890static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
1891{
1892        struct pci_dev *pdev = ae_dev->pdev;
1893        int ret;
1894
1895        ret = hclgevf_alloc_hdev(ae_dev);
1896        if (ret) {
1897                dev_err(&pdev->dev, "hclge device allocation failed\n");
1898                return ret;
1899        }
1900
1901        ret = hclgevf_init_hdev(ae_dev->priv);
1902        if (ret)
1903                dev_err(&pdev->dev, "hclge device initialization failed\n");
1904
1905        return ret;
1906}
1907
1908static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
1909{
1910        struct hclgevf_dev *hdev = ae_dev->priv;
1911
1912        hclgevf_uninit_hdev(hdev);
1913        ae_dev->priv = NULL;
1914}
1915
1916static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
1917{
1918        struct hnae3_handle *nic = &hdev->nic;
1919        struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1920
1921        return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
1922}
1923
1924/**
1925 * hclgevf_get_channels - Get the current channels enabled and max supported.
1926 * @handle: hardware information for network interface
1927 * @ch: ethtool channels structure
1928 *
1929 * We don't support separate tx and rx queues as channels. The other count
1930 * represents how many queues are being used for control. max_combined counts
1931 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1932 * q_vectors since we support a lot more queue pairs than q_vectors.
1933 **/
1934static void hclgevf_get_channels(struct hnae3_handle *handle,
1935                                 struct ethtool_channels *ch)
1936{
1937        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1938
1939        ch->max_combined = hclgevf_get_max_channels(hdev);
1940        ch->other_count = 0;
1941        ch->max_other = 0;
1942        ch->combined_count = hdev->num_tqps;
1943}
1944
1945static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
1946                                          u16 *free_tqps, u16 *max_rss_size)
1947{
1948        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1949
1950        *free_tqps = 0;
1951        *max_rss_size = hdev->rss_size_max;
1952}
1953
1954static int hclgevf_get_status(struct hnae3_handle *handle)
1955{
1956        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1957
1958        return hdev->hw.mac.link;
1959}
1960
1961static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
1962                                            u8 *auto_neg, u32 *speed,
1963                                            u8 *duplex)
1964{
1965        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1966
1967        if (speed)
1968                *speed = hdev->hw.mac.speed;
1969        if (duplex)
1970                *duplex = hdev->hw.mac.duplex;
1971        if (auto_neg)
1972                *auto_neg = AUTONEG_DISABLE;
1973}
1974
1975void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
1976                                 u8 duplex)
1977{
1978        hdev->hw.mac.speed = speed;
1979        hdev->hw.mac.duplex = duplex;
1980}
1981
1982static const struct hnae3_ae_ops hclgevf_ops = {
1983        .init_ae_dev = hclgevf_init_ae_dev,
1984        .uninit_ae_dev = hclgevf_uninit_ae_dev,
1985        .init_client_instance = hclgevf_init_client_instance,
1986        .uninit_client_instance = hclgevf_uninit_client_instance,
1987        .start = hclgevf_ae_start,
1988        .stop = hclgevf_ae_stop,
1989        .map_ring_to_vector = hclgevf_map_ring_to_vector,
1990        .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
1991        .get_vector = hclgevf_get_vector,
1992        .put_vector = hclgevf_put_vector,
1993        .reset_queue = hclgevf_reset_tqp,
1994        .set_promisc_mode = hclgevf_set_promisc_mode,
1995        .get_mac_addr = hclgevf_get_mac_addr,
1996        .set_mac_addr = hclgevf_set_mac_addr,
1997        .add_uc_addr = hclgevf_add_uc_addr,
1998        .rm_uc_addr = hclgevf_rm_uc_addr,
1999        .add_mc_addr = hclgevf_add_mc_addr,
2000        .rm_mc_addr = hclgevf_rm_mc_addr,
2001        .update_mta_status = hclgevf_update_mta_status,
2002        .get_stats = hclgevf_get_stats,
2003        .update_stats = hclgevf_update_stats,
2004        .get_strings = hclgevf_get_strings,
2005        .get_sset_count = hclgevf_get_sset_count,
2006        .get_rss_key_size = hclgevf_get_rss_key_size,
2007        .get_rss_indir_size = hclgevf_get_rss_indir_size,
2008        .get_rss = hclgevf_get_rss,
2009        .set_rss = hclgevf_set_rss,
2010        .get_tc_size = hclgevf_get_tc_size,
2011        .get_fw_version = hclgevf_get_fw_version,
2012        .set_vlan_filter = hclgevf_set_vlan_filter,
2013        .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
2014        .reset_event = hclgevf_reset_event,
2015        .get_channels = hclgevf_get_channels,
2016        .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
2017        .get_status = hclgevf_get_status,
2018        .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
2019};
2020
2021static struct hnae3_ae_algo ae_algovf = {
2022        .ops = &hclgevf_ops,
2023        .pdev_id_table = ae_algovf_pci_tbl,
2024};
2025
2026static int hclgevf_init(void)
2027{
2028        pr_info("%s is initializing\n", HCLGEVF_NAME);
2029
2030        hnae3_register_ae_algo(&ae_algovf);
2031
2032        return 0;
2033}
2034
2035static void hclgevf_exit(void)
2036{
2037        hnae3_unregister_ae_algo(&ae_algovf);
2038}
2039module_init(hclgevf_init);
2040module_exit(hclgevf_exit);
2041
2042MODULE_LICENSE("GPL");
2043MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2044MODULE_DESCRIPTION("HCLGEVF Driver");
2045MODULE_VERSION(HCLGEVF_MOD_VERSION);
2046