linux/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2// Copyright (c) 2016-2017 Hisilicon Limited.
   3
   4#include <linux/etherdevice.h>
   5#include <net/rtnetlink.h>
   6#include "hclgevf_cmd.h"
   7#include "hclgevf_main.h"
   8#include "hclge_mbx.h"
   9#include "hnae3.h"
  10
  11#define HCLGEVF_NAME    "hclgevf"
  12
  13static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
  14static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
  15static struct hnae3_ae_algo ae_algovf;
  16
  17static const struct pci_device_id ae_algovf_pci_tbl[] = {
  18        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
  19        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
  20        /* required last entry */
  21        {0, }
  22};
  23
  24MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
  25
  26static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
  27        struct hnae3_handle *handle)
  28{
  29        return container_of(handle, struct hclgevf_dev, nic);
  30}
  31
  32static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
  33{
  34        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
  35        struct hnae3_queue *queue;
  36        struct hclgevf_desc desc;
  37        struct hclgevf_tqp *tqp;
  38        int status;
  39        int i;
  40
  41        for (i = 0; i < hdev->num_tqps; i++) {
  42                queue = handle->kinfo.tqp[i];
  43                tqp = container_of(queue, struct hclgevf_tqp, q);
  44                hclgevf_cmd_setup_basic_desc(&desc,
  45                                             HCLGEVF_OPC_QUERY_RX_STATUS,
  46                                             true);
  47
  48                desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
  49                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
  50                if (status) {
  51                        dev_err(&hdev->pdev->dev,
  52                                "Query tqp stat fail, status = %d,queue = %d\n",
  53                                status, i);
  54                        return status;
  55                }
  56                tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
  57                        le32_to_cpu(desc.data[1]);
  58
  59                hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
  60                                             true);
  61
  62                desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
  63                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
  64                if (status) {
  65                        dev_err(&hdev->pdev->dev,
  66                                "Query tqp stat fail, status = %d,queue = %d\n",
  67                                status, i);
  68                        return status;
  69                }
  70                tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
  71                        le32_to_cpu(desc.data[1]);
  72        }
  73
  74        return 0;
  75}
  76
  77static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
  78{
  79        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
  80        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
  81        struct hclgevf_tqp *tqp;
  82        u64 *buff = data;
  83        int i;
  84
  85        for (i = 0; i < hdev->num_tqps; i++) {
  86                tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
  87                *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
  88        }
  89        for (i = 0; i < kinfo->num_tqps; i++) {
  90                tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
  91                *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
  92        }
  93
  94        return buff;
  95}
  96
  97static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
  98{
  99        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 100
 101        return hdev->num_tqps * 2;
 102}
 103
 104static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
 105{
 106        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 107        u8 *buff = data;
 108        int i = 0;
 109
 110        for (i = 0; i < hdev->num_tqps; i++) {
 111                struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
 112                        struct hclgevf_tqp, q);
 113                snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
 114                         tqp->index);
 115                buff += ETH_GSTRING_LEN;
 116        }
 117
 118        for (i = 0; i < hdev->num_tqps; i++) {
 119                struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
 120                        struct hclgevf_tqp, q);
 121                snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
 122                         tqp->index);
 123                buff += ETH_GSTRING_LEN;
 124        }
 125
 126        return buff;
 127}
 128
 129static void hclgevf_update_stats(struct hnae3_handle *handle,
 130                                 struct net_device_stats *net_stats)
 131{
 132        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 133        int status;
 134
 135        status = hclgevf_tqps_update_stats(handle);
 136        if (status)
 137                dev_err(&hdev->pdev->dev,
 138                        "VF update of TQPS stats fail, status = %d.\n",
 139                        status);
 140}
 141
 142static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
 143{
 144        if (strset == ETH_SS_TEST)
 145                return -EOPNOTSUPP;
 146        else if (strset == ETH_SS_STATS)
 147                return hclgevf_tqps_get_sset_count(handle, strset);
 148
 149        return 0;
 150}
 151
 152static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
 153                                u8 *data)
 154{
 155        u8 *p = (char *)data;
 156
 157        if (strset == ETH_SS_STATS)
 158                p = hclgevf_tqps_get_strings(handle, p);
 159}
 160
 161static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
 162{
 163        hclgevf_tqps_get_stats(handle, data);
 164}
 165
 166static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
 167{
 168        u8 resp_msg;
 169        int status;
 170
 171        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
 172                                      true, &resp_msg, sizeof(u8));
 173        if (status) {
 174                dev_err(&hdev->pdev->dev,
 175                        "VF request to get TC info from PF failed %d",
 176                        status);
 177                return status;
 178        }
 179
 180        hdev->hw_tc_map = resp_msg;
 181
 182        return 0;
 183}
 184
 185static int hclge_get_queue_info(struct hclgevf_dev *hdev)
 186{
 187#define HCLGEVF_TQPS_RSS_INFO_LEN       8
 188        u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
 189        int status;
 190
 191        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
 192                                      true, resp_msg,
 193                                      HCLGEVF_TQPS_RSS_INFO_LEN);
 194        if (status) {
 195                dev_err(&hdev->pdev->dev,
 196                        "VF request to get tqp info from PF failed %d",
 197                        status);
 198                return status;
 199        }
 200
 201        memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
 202        memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
 203        memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
 204        memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
 205
 206        return 0;
 207}
 208
 209static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
 210{
 211        struct hclgevf_tqp *tqp;
 212        int i;
 213
 214        /* if this is on going reset then we need to re-allocate the TPQs
 215         * since we cannot assume we would get same number of TPQs back from PF
 216         */
 217        if (hclgevf_dev_ongoing_reset(hdev))
 218                devm_kfree(&hdev->pdev->dev, hdev->htqp);
 219
 220        hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
 221                                  sizeof(struct hclgevf_tqp), GFP_KERNEL);
 222        if (!hdev->htqp)
 223                return -ENOMEM;
 224
 225        tqp = hdev->htqp;
 226
 227        for (i = 0; i < hdev->num_tqps; i++) {
 228                tqp->dev = &hdev->pdev->dev;
 229                tqp->index = i;
 230
 231                tqp->q.ae_algo = &ae_algovf;
 232                tqp->q.buf_size = hdev->rx_buf_len;
 233                tqp->q.desc_num = hdev->num_desc;
 234                tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
 235                        i * HCLGEVF_TQP_REG_SIZE;
 236
 237                tqp++;
 238        }
 239
 240        return 0;
 241}
 242
 243static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
 244{
 245        struct hnae3_handle *nic = &hdev->nic;
 246        struct hnae3_knic_private_info *kinfo;
 247        u16 new_tqps = hdev->num_tqps;
 248        int i;
 249
 250        kinfo = &nic->kinfo;
 251        kinfo->num_tc = 0;
 252        kinfo->num_desc = hdev->num_desc;
 253        kinfo->rx_buf_len = hdev->rx_buf_len;
 254        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
 255                if (hdev->hw_tc_map & BIT(i))
 256                        kinfo->num_tc++;
 257
 258        kinfo->rss_size
 259                = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
 260        new_tqps = kinfo->rss_size * kinfo->num_tc;
 261        kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
 262
 263        /* if this is on going reset then we need to re-allocate the hnae queues
 264         * as well since number of TPQs from PF might have changed.
 265         */
 266        if (hclgevf_dev_ongoing_reset(hdev))
 267                devm_kfree(&hdev->pdev->dev, kinfo->tqp);
 268
 269        kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
 270                                  sizeof(struct hnae3_queue *), GFP_KERNEL);
 271        if (!kinfo->tqp)
 272                return -ENOMEM;
 273
 274        for (i = 0; i < kinfo->num_tqps; i++) {
 275                hdev->htqp[i].q.handle = &hdev->nic;
 276                hdev->htqp[i].q.tqp_index = i;
 277                kinfo->tqp[i] = &hdev->htqp[i].q;
 278        }
 279
 280        return 0;
 281}
 282
 283static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
 284{
 285        int status;
 286        u8 resp_msg;
 287
 288        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
 289                                      0, false, &resp_msg, sizeof(u8));
 290        if (status)
 291                dev_err(&hdev->pdev->dev,
 292                        "VF failed to fetch link status(%d) from PF", status);
 293}
 294
 295void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
 296{
 297        struct hnae3_handle *handle = &hdev->nic;
 298        struct hnae3_client *client;
 299
 300        client = handle->client;
 301
 302        if (link_state != hdev->hw.mac.link) {
 303                client->ops->link_status_change(handle, !!link_state);
 304                hdev->hw.mac.link = link_state;
 305        }
 306}
 307
 308static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
 309{
 310        struct hnae3_handle *nic = &hdev->nic;
 311        int ret;
 312
 313        nic->ae_algo = &ae_algovf;
 314        nic->pdev = hdev->pdev;
 315        nic->numa_node_mask = hdev->numa_node_mask;
 316        nic->flags |= HNAE3_SUPPORT_VF;
 317
 318        if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
 319                dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
 320                        hdev->ae_dev->dev_type);
 321                return -EINVAL;
 322        }
 323
 324        ret = hclgevf_knic_setup(hdev);
 325        if (ret)
 326                dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
 327                        ret);
 328        return ret;
 329}
 330
 331static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
 332{
 333        hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
 334        hdev->num_msi_left += 1;
 335        hdev->num_msi_used -= 1;
 336}
 337
 338static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
 339                              struct hnae3_vector_info *vector_info)
 340{
 341        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 342        struct hnae3_vector_info *vector = vector_info;
 343        int alloc = 0;
 344        int i, j;
 345
 346        vector_num = min(hdev->num_msi_left, vector_num);
 347
 348        for (j = 0; j < vector_num; j++) {
 349                for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
 350                        if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
 351                                vector->vector = pci_irq_vector(hdev->pdev, i);
 352                                vector->io_addr = hdev->hw.io_base +
 353                                        HCLGEVF_VECTOR_REG_BASE +
 354                                        (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
 355                                hdev->vector_status[i] = 0;
 356                                hdev->vector_irq[i] = vector->vector;
 357
 358                                vector++;
 359                                alloc++;
 360
 361                                break;
 362                        }
 363                }
 364        }
 365        hdev->num_msi_left -= alloc;
 366        hdev->num_msi_used += alloc;
 367
 368        return alloc;
 369}
 370
 371static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
 372{
 373        int i;
 374
 375        for (i = 0; i < hdev->num_msi; i++)
 376                if (vector == hdev->vector_irq[i])
 377                        return i;
 378
 379        return -EINVAL;
 380}
 381
 382static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
 383{
 384        return HCLGEVF_RSS_KEY_SIZE;
 385}
 386
 387static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
 388{
 389        return HCLGEVF_RSS_IND_TBL_SIZE;
 390}
 391
 392static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
 393{
 394        const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
 395        struct hclgevf_rss_indirection_table_cmd *req;
 396        struct hclgevf_desc desc;
 397        int status;
 398        int i, j;
 399
 400        req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
 401
 402        for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
 403                hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
 404                                             false);
 405                req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
 406                req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
 407                for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
 408                        req->rss_result[j] =
 409                                indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
 410
 411                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 412                if (status) {
 413                        dev_err(&hdev->pdev->dev,
 414                                "VF failed(=%d) to set RSS indirection table\n",
 415                                status);
 416                        return status;
 417                }
 418        }
 419
 420        return 0;
 421}
 422
 423static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
 424{
 425        struct hclgevf_rss_tc_mode_cmd *req;
 426        u16 tc_offset[HCLGEVF_MAX_TC_NUM];
 427        u16 tc_valid[HCLGEVF_MAX_TC_NUM];
 428        u16 tc_size[HCLGEVF_MAX_TC_NUM];
 429        struct hclgevf_desc desc;
 430        u16 roundup_size;
 431        int status;
 432        int i;
 433
 434        req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
 435
 436        roundup_size = roundup_pow_of_two(rss_size);
 437        roundup_size = ilog2(roundup_size);
 438
 439        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
 440                tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
 441                tc_size[i] = roundup_size;
 442                tc_offset[i] = rss_size * i;
 443        }
 444
 445        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
 446        for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
 447                hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
 448                             (tc_valid[i] & 0x1));
 449                hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
 450                               HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
 451                hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
 452                               HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
 453        }
 454        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 455        if (status)
 456                dev_err(&hdev->pdev->dev,
 457                        "VF failed(=%d) to set rss tc mode\n", status);
 458
 459        return status;
 460}
 461
 462static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
 463                                  u8 *key)
 464{
 465        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 466        struct hclgevf_rss_config_cmd *req;
 467        int lkup_times = key ? 3 : 1;
 468        struct hclgevf_desc desc;
 469        int key_offset;
 470        int key_size;
 471        int status;
 472
 473        req = (struct hclgevf_rss_config_cmd *)desc.data;
 474        lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
 475
 476        for (key_offset = 0; key_offset < lkup_times; key_offset++) {
 477                hclgevf_cmd_setup_basic_desc(&desc,
 478                                             HCLGEVF_OPC_RSS_GENERIC_CONFIG,
 479                                             true);
 480                req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
 481
 482                status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 483                if (status) {
 484                        dev_err(&hdev->pdev->dev,
 485                                "failed to get hardware RSS cfg, status = %d\n",
 486                                status);
 487                        return status;
 488                }
 489
 490                if (key_offset == 2)
 491                        key_size =
 492                        HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
 493                else
 494                        key_size = HCLGEVF_RSS_HASH_KEY_NUM;
 495
 496                if (key)
 497                        memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
 498                               req->hash_key,
 499                               key_size);
 500        }
 501
 502        if (hash) {
 503                if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
 504                        *hash = ETH_RSS_HASH_TOP;
 505                else
 506                        *hash = ETH_RSS_HASH_UNKNOWN;
 507        }
 508
 509        return 0;
 510}
 511
 512static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
 513                           u8 *hfunc)
 514{
 515        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 516        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 517        int i;
 518
 519        if (indir)
 520                for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
 521                        indir[i] = rss_cfg->rss_indirection_tbl[i];
 522
 523        return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
 524}
 525
 526static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
 527                           const  u8 *key, const  u8 hfunc)
 528{
 529        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 530        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 531        int i;
 532
 533        /* update the shadow RSS table with user specified qids */
 534        for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
 535                rss_cfg->rss_indirection_tbl[i] = indir[i];
 536
 537        /* update the hardware */
 538        return hclgevf_set_rss_indir_table(hdev);
 539}
 540
 541static int hclgevf_get_tc_size(struct hnae3_handle *handle)
 542{
 543        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 544        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
 545
 546        return rss_cfg->rss_size;
 547}
 548
 549static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
 550                                       int vector,
 551                                       struct hnae3_ring_chain_node *ring_chain)
 552{
 553        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 554        struct hnae3_ring_chain_node *node;
 555        struct hclge_mbx_vf_to_pf_cmd *req;
 556        struct hclgevf_desc desc;
 557        int i = 0, vector_id;
 558        int status;
 559        u8 type;
 560
 561        req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
 562        vector_id = hclgevf_get_vector_index(hdev, vector);
 563        if (vector_id < 0) {
 564                dev_err(&handle->pdev->dev,
 565                        "Get vector index fail. ret =%d\n", vector_id);
 566                return vector_id;
 567        }
 568
 569        for (node = ring_chain; node; node = node->next) {
 570                int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
 571                                        HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
 572
 573                if (i == 0) {
 574                        hclgevf_cmd_setup_basic_desc(&desc,
 575                                                     HCLGEVF_OPC_MBX_VF_TO_PF,
 576                                                     false);
 577                        type = en ?
 578                                HCLGE_MBX_MAP_RING_TO_VECTOR :
 579                                HCLGE_MBX_UNMAP_RING_TO_VECTOR;
 580                        req->msg[0] = type;
 581                        req->msg[1] = vector_id;
 582                }
 583
 584                req->msg[idx_offset] =
 585                                hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
 586                req->msg[idx_offset + 1] = node->tqp_index;
 587                req->msg[idx_offset + 2] = hnae_get_field(node->int_gl_idx,
 588                                                          HNAE3_RING_GL_IDX_M,
 589                                                          HNAE3_RING_GL_IDX_S);
 590
 591                i++;
 592                if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
 593                     HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
 594                     HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
 595                    !node->next) {
 596                        req->msg[2] = i;
 597
 598                        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 599                        if (status) {
 600                                dev_err(&hdev->pdev->dev,
 601                                        "Map TQP fail, status is %d.\n",
 602                                        status);
 603                                return status;
 604                        }
 605                        i = 0;
 606                        hclgevf_cmd_setup_basic_desc(&desc,
 607                                                     HCLGEVF_OPC_MBX_VF_TO_PF,
 608                                                     false);
 609                        req->msg[0] = type;
 610                        req->msg[1] = vector_id;
 611                }
 612        }
 613
 614        return 0;
 615}
 616
 617static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
 618                                      struct hnae3_ring_chain_node *ring_chain)
 619{
 620        return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
 621}
 622
 623static int hclgevf_unmap_ring_from_vector(
 624                                struct hnae3_handle *handle,
 625                                int vector,
 626                                struct hnae3_ring_chain_node *ring_chain)
 627{
 628        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 629        int ret, vector_id;
 630
 631        vector_id = hclgevf_get_vector_index(hdev, vector);
 632        if (vector_id < 0) {
 633                dev_err(&handle->pdev->dev,
 634                        "Get vector index fail. ret =%d\n", vector_id);
 635                return vector_id;
 636        }
 637
 638        ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
 639        if (ret)
 640                dev_err(&handle->pdev->dev,
 641                        "Unmap ring from vector fail. vector=%d, ret =%d\n",
 642                        vector_id,
 643                        ret);
 644
 645        return ret;
 646}
 647
 648static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
 649{
 650        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 651
 652        hclgevf_free_vector(hdev, vector);
 653
 654        return 0;
 655}
 656
 657static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
 658                                        bool en_uc_pmc, bool en_mc_pmc)
 659{
 660        struct hclge_mbx_vf_to_pf_cmd *req;
 661        struct hclgevf_desc desc;
 662        int status;
 663
 664        req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
 665
 666        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
 667        req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
 668        req->msg[1] = en_uc_pmc ? 1 : 0;
 669        req->msg[2] = en_mc_pmc ? 1 : 0;
 670
 671        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 672        if (status)
 673                dev_err(&hdev->pdev->dev,
 674                        "Set promisc mode fail, status is %d.\n", status);
 675
 676        return status;
 677}
 678
 679static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
 680                                     bool en_uc_pmc, bool en_mc_pmc)
 681{
 682        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 683
 684        hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
 685}
 686
 687static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
 688                              int stream_id, bool enable)
 689{
 690        struct hclgevf_cfg_com_tqp_queue_cmd *req;
 691        struct hclgevf_desc desc;
 692        int status;
 693
 694        req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
 695
 696        hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
 697                                     false);
 698        req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
 699        req->stream_id = cpu_to_le16(stream_id);
 700        req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
 701
 702        status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
 703        if (status)
 704                dev_err(&hdev->pdev->dev,
 705                        "TQP enable fail, status =%d.\n", status);
 706
 707        return status;
 708}
 709
 710static int hclgevf_get_queue_id(struct hnae3_queue *queue)
 711{
 712        struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
 713
 714        return tqp->index;
 715}
 716
 717static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
 718{
 719        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 720        struct hnae3_queue *queue;
 721        struct hclgevf_tqp *tqp;
 722        int i;
 723
 724        for (i = 0; i < hdev->num_tqps; i++) {
 725                queue = handle->kinfo.tqp[i];
 726                tqp = container_of(queue, struct hclgevf_tqp, q);
 727                memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
 728        }
 729}
 730
 731static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
 732{
 733        u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
 734        int ret;
 735
 736        ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
 737                                   HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
 738                                   NULL, 0, true, &resp_msg, sizeof(u8));
 739
 740        if (ret) {
 741                dev_err(&hdev->pdev->dev,
 742                        "Read mta type fail, ret=%d.\n", ret);
 743                return ret;
 744        }
 745
 746        if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
 747                dev_err(&hdev->pdev->dev,
 748                        "Read mta type invalid, resp=%d.\n", resp_msg);
 749                return -EINVAL;
 750        }
 751
 752        hdev->mta_mac_sel_type = resp_msg;
 753
 754        return 0;
 755}
 756
 757static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
 758                                             const u8 *addr)
 759{
 760        u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
 761        u16 high_val = addr[1] | (addr[0] << 8);
 762
 763        return (high_val >> rsh) & 0xfff;
 764}
 765
 766static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
 767                                        unsigned long *status)
 768{
 769#define HCLGEVF_MTA_STATUS_MSG_SIZE 13
 770#define HCLGEVF_MTA_STATUS_MSG_BITS \
 771                        (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
 772#define HCLGEVF_MTA_STATUS_MSG_END_BITS \
 773                        (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
 774        u16 tbl_cnt;
 775        u16 tbl_idx;
 776        u8 msg_cnt;
 777        u8 msg_idx;
 778        int ret;
 779
 780        msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
 781                               HCLGEVF_MTA_STATUS_MSG_BITS);
 782        tbl_idx = 0;
 783        msg_idx = 0;
 784        while (msg_cnt--) {
 785                u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
 786                u8 *p = &msg[1];
 787                u8 msg_ofs;
 788                u8 msg_bit;
 789
 790                memset(msg, 0, sizeof(msg));
 791
 792                /* set index field */
 793                msg[0] = 0x7F & msg_idx;
 794
 795                /* set end flag field */
 796                if (msg_cnt == 0) {
 797                        msg[0] |= 0x80;
 798                        tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
 799                } else {
 800                        tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
 801                }
 802
 803                /* set status field */
 804                msg_ofs = 0;
 805                msg_bit = 0;
 806                while (tbl_cnt--) {
 807                        if (test_bit(tbl_idx, status))
 808                                p[msg_ofs] |= BIT(msg_bit);
 809
 810                        tbl_idx++;
 811
 812                        msg_bit++;
 813                        if (msg_bit == BITS_PER_BYTE) {
 814                                msg_bit = 0;
 815                                msg_ofs++;
 816                        }
 817                }
 818
 819                ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
 820                                           HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
 821                                           msg, sizeof(msg), false, NULL, 0);
 822                if (ret)
 823                        break;
 824
 825                msg_idx++;
 826        }
 827
 828        return ret;
 829}
 830
 831static int hclgevf_update_mta_status(struct hnae3_handle *handle)
 832{
 833        unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
 834        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 835        struct net_device *netdev = hdev->nic.kinfo.netdev;
 836        struct netdev_hw_addr *ha;
 837        u16 tbl_idx;
 838
 839        /* clear status */
 840        memset(mta_status, 0, sizeof(mta_status));
 841
 842        /* update status from mc addr list */
 843        netdev_for_each_mc_addr(ha, netdev) {
 844                tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
 845                set_bit(tbl_idx, mta_status);
 846        }
 847
 848        return hclgevf_do_update_mta_status(hdev, mta_status);
 849}
 850
 851static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
 852{
 853        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 854
 855        ether_addr_copy(p, hdev->hw.mac.mac_addr);
 856}
 857
 858static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
 859                                bool is_first)
 860{
 861        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 862        u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
 863        u8 *new_mac_addr = (u8 *)p;
 864        u8 msg_data[ETH_ALEN * 2];
 865        u16 subcode;
 866        int status;
 867
 868        ether_addr_copy(msg_data, new_mac_addr);
 869        ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
 870
 871        subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
 872                        HCLGE_MBX_MAC_VLAN_UC_MODIFY;
 873
 874        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
 875                                      subcode, msg_data, ETH_ALEN * 2,
 876                                      true, NULL, 0);
 877        if (!status)
 878                ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
 879
 880        return status;
 881}
 882
 883static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
 884                               const unsigned char *addr)
 885{
 886        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 887
 888        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
 889                                    HCLGE_MBX_MAC_VLAN_UC_ADD,
 890                                    addr, ETH_ALEN, false, NULL, 0);
 891}
 892
 893static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
 894                              const unsigned char *addr)
 895{
 896        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 897
 898        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
 899                                    HCLGE_MBX_MAC_VLAN_UC_REMOVE,
 900                                    addr, ETH_ALEN, false, NULL, 0);
 901}
 902
 903static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
 904                               const unsigned char *addr)
 905{
 906        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 907
 908        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
 909                                    HCLGE_MBX_MAC_VLAN_MC_ADD,
 910                                    addr, ETH_ALEN, false, NULL, 0);
 911}
 912
 913static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
 914                              const unsigned char *addr)
 915{
 916        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 917
 918        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
 919                                    HCLGE_MBX_MAC_VLAN_MC_REMOVE,
 920                                    addr, ETH_ALEN, false, NULL, 0);
 921}
 922
 923static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
 924                                   __be16 proto, u16 vlan_id,
 925                                   bool is_kill)
 926{
 927#define HCLGEVF_VLAN_MBX_MSG_LEN 5
 928        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 929        u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
 930
 931        if (vlan_id > 4095)
 932                return -EINVAL;
 933
 934        if (proto != htons(ETH_P_8021Q))
 935                return -EPROTONOSUPPORT;
 936
 937        msg_data[0] = is_kill;
 938        memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
 939        memcpy(&msg_data[3], &proto, sizeof(proto));
 940        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
 941                                    HCLGE_MBX_VLAN_FILTER, msg_data,
 942                                    HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
 943}
 944
 945static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
 946{
 947        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 948        u8 msg_data;
 949
 950        msg_data = enable ? 1 : 0;
 951        return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
 952                                    HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
 953                                    1, false, NULL, 0);
 954}
 955
 956static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
 957{
 958        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 959        u8 msg_data[2];
 960        int ret;
 961
 962        memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
 963
 964        /* disable vf queue before send queue reset msg to PF */
 965        ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
 966        if (ret)
 967                return;
 968
 969        hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
 970                             2, true, NULL, 0);
 971}
 972
 973static int hclgevf_notify_client(struct hclgevf_dev *hdev,
 974                                 enum hnae3_reset_notify_type type)
 975{
 976        struct hnae3_client *client = hdev->nic_client;
 977        struct hnae3_handle *handle = &hdev->nic;
 978
 979        if (!client->ops->reset_notify)
 980                return -EOPNOTSUPP;
 981
 982        return client->ops->reset_notify(handle, type);
 983}
 984
 985static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
 986{
 987#define HCLGEVF_RESET_WAIT_MS   500
 988#define HCLGEVF_RESET_WAIT_CNT  20
 989        u32 val, cnt = 0;
 990
 991        /* wait to check the hardware reset completion status */
 992        val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
 993        while (hnae_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
 994                            (cnt < HCLGEVF_RESET_WAIT_CNT)) {
 995                msleep(HCLGEVF_RESET_WAIT_MS);
 996                val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
 997                cnt++;
 998        }
 999
1000        /* hardware completion status should be available by this time */
1001        if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
1002                dev_warn(&hdev->pdev->dev,
1003                         "could'nt get reset done status from h/w, timeout!\n");
1004                return -EBUSY;
1005        }
1006
1007        /* we will wait a bit more to let reset of the stack to complete. This
1008         * might happen in case reset assertion was made by PF. Yes, this also
1009         * means we might end up waiting bit more even for VF reset.
1010         */
1011        msleep(5000);
1012
1013        return 0;
1014}
1015
1016static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1017{
1018        int ret;
1019
1020        /* uninitialize the nic client */
1021        hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1022
1023        /* re-initialize the hclge device */
1024        ret = hclgevf_init_hdev(hdev);
1025        if (ret) {
1026                dev_err(&hdev->pdev->dev,
1027                        "hclge device re-init failed, VF is disabled!\n");
1028                return ret;
1029        }
1030
1031        /* bring up the nic client again */
1032        hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1033
1034        return 0;
1035}
1036
1037static int hclgevf_reset(struct hclgevf_dev *hdev)
1038{
1039        int ret;
1040
1041        rtnl_lock();
1042
1043        /* bring down the nic to stop any ongoing TX/RX */
1044        hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1045
1046        /* check if VF could successfully fetch the hardware reset completion
1047         * status from the hardware
1048         */
1049        ret = hclgevf_reset_wait(hdev);
1050        if (ret) {
1051                /* can't do much in this situation, will disable VF */
1052                dev_err(&hdev->pdev->dev,
1053                        "VF failed(=%d) to fetch H/W reset completion status\n",
1054                        ret);
1055
1056                dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
1057                hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1058
1059                rtnl_unlock();
1060                return ret;
1061        }
1062
1063        /* now, re-initialize the nic client and ae device*/
1064        ret = hclgevf_reset_stack(hdev);
1065        if (ret)
1066                dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1067
1068        /* bring up the nic to enable TX/RX again */
1069        hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1070
1071        rtnl_unlock();
1072
1073        return ret;
1074}
1075
1076static int hclgevf_do_reset(struct hclgevf_dev *hdev)
1077{
1078        int status;
1079        u8 respmsg;
1080
1081        status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1082                                      0, false, &respmsg, sizeof(u8));
1083        if (status)
1084                dev_err(&hdev->pdev->dev,
1085                        "VF reset request to PF failed(=%d)\n", status);
1086
1087        return status;
1088}
1089
1090static void hclgevf_reset_event(struct hnae3_handle *handle)
1091{
1092        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1093
1094        dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1095
1096        handle->reset_level = HNAE3_VF_RESET;
1097
1098        /* reset of this VF requested */
1099        set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1100        hclgevf_reset_task_schedule(hdev);
1101
1102        handle->last_reset_time = jiffies;
1103}
1104
1105static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1106{
1107        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1108
1109        return hdev->fw_version;
1110}
1111
1112static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1113{
1114        struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1115
1116        vector->vector_irq = pci_irq_vector(hdev->pdev,
1117                                            HCLGEVF_MISC_VECTOR_NUM);
1118        vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1119        /* vector status always valid for Vector 0 */
1120        hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1121        hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1122
1123        hdev->num_msi_left -= 1;
1124        hdev->num_msi_used += 1;
1125}
1126
1127void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1128{
1129        if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
1130            !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
1131                set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1132                schedule_work(&hdev->rst_service_task);
1133        }
1134}
1135
1136void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1137{
1138        if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1139            !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1140                set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1141                schedule_work(&hdev->mbx_service_task);
1142        }
1143}
1144
1145static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1146{
1147        if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state)  &&
1148            !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1149                schedule_work(&hdev->service_task);
1150}
1151
1152static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1153{
1154        /* if we have any pending mailbox event then schedule the mbx task */
1155        if (hdev->mbx_event_pending)
1156                hclgevf_mbx_task_schedule(hdev);
1157
1158        if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1159                hclgevf_reset_task_schedule(hdev);
1160}
1161
1162static void hclgevf_service_timer(struct timer_list *t)
1163{
1164        struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1165
1166        mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1167
1168        hclgevf_task_schedule(hdev);
1169}
1170
1171static void hclgevf_reset_service_task(struct work_struct *work)
1172{
1173        struct hclgevf_dev *hdev =
1174                container_of(work, struct hclgevf_dev, rst_service_task);
1175        int ret;
1176
1177        if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1178                return;
1179
1180        clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1181
1182        if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1183                               &hdev->reset_state)) {
1184                /* PF has initmated that it is about to reset the hardware.
1185                 * We now have to poll & check if harware has actually completed
1186                 * the reset sequence. On hardware reset completion, VF needs to
1187                 * reset the client and ae device.
1188                 */
1189                hdev->reset_attempts = 0;
1190
1191                ret = hclgevf_reset(hdev);
1192                if (ret)
1193                        dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
1194        } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1195                                      &hdev->reset_state)) {
1196                /* we could be here when either of below happens:
1197                 * 1. reset was initiated due to watchdog timeout due to
1198                 *    a. IMP was earlier reset and our TX got choked down and
1199                 *       which resulted in watchdog reacting and inducing VF
1200                 *       reset. This also means our cmdq would be unreliable.
1201                 *    b. problem in TX due to other lower layer(example link
1202                 *       layer not functioning properly etc.)
1203                 * 2. VF reset might have been initiated due to some config
1204                 *    change.
1205                 *
1206                 * NOTE: Theres no clear way to detect above cases than to react
1207                 * to the response of PF for this reset request. PF will ack the
1208                 * 1b and 2. cases but we will not get any intimation about 1a
1209                 * from PF as cmdq would be in unreliable state i.e. mailbox
1210                 * communication between PF and VF would be broken.
1211                 */
1212
1213                /* if we are never geting into pending state it means either:
1214                 * 1. PF is not receiving our request which could be due to IMP
1215                 *    reset
1216                 * 2. PF is screwed
1217                 * We cannot do much for 2. but to check first we can try reset
1218                 * our PCIe + stack and see if it alleviates the problem.
1219                 */
1220                if (hdev->reset_attempts > 3) {
1221                        /* prepare for full reset of stack + pcie interface */
1222                        hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
1223
1224                        /* "defer" schedule the reset task again */
1225                        set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1226                } else {
1227                        hdev->reset_attempts++;
1228
1229                        /* request PF for resetting this VF via mailbox */
1230                        ret = hclgevf_do_reset(hdev);
1231                        if (ret)
1232                                dev_warn(&hdev->pdev->dev,
1233                                         "VF rst fail, stack will call\n");
1234                }
1235        }
1236
1237        clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1238}
1239
1240static void hclgevf_mailbox_service_task(struct work_struct *work)
1241{
1242        struct hclgevf_dev *hdev;
1243
1244        hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
1245
1246        if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1247                return;
1248
1249        clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1250
1251        hclgevf_mbx_async_handler(hdev);
1252
1253        clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1254}
1255
1256static void hclgevf_service_task(struct work_struct *work)
1257{
1258        struct hclgevf_dev *hdev;
1259
1260        hdev = container_of(work, struct hclgevf_dev, service_task);
1261
1262        /* request the link status from the PF. PF would be able to tell VF
1263         * about such updates in future so we might remove this later
1264         */
1265        hclgevf_request_link_info(hdev);
1266
1267        hclgevf_deferred_task_schedule(hdev);
1268
1269        clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1270}
1271
1272static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1273{
1274        hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
1275}
1276
1277static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
1278{
1279        u32 cmdq_src_reg;
1280
1281        /* fetch the events from their corresponding regs */
1282        cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1283                                        HCLGEVF_VECTOR0_CMDQ_SRC_REG);
1284
1285        /* check for vector0 mailbox(=CMDQ RX) event source */
1286        if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1287                cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1288                *clearval = cmdq_src_reg;
1289                return true;
1290        }
1291
1292        dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
1293
1294        return false;
1295}
1296
1297static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1298{
1299        writel(en ? 1 : 0, vector->addr);
1300}
1301
1302static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1303{
1304        struct hclgevf_dev *hdev = data;
1305        u32 clearval;
1306
1307        hclgevf_enable_vector(&hdev->misc_vector, false);
1308        if (!hclgevf_check_event_cause(hdev, &clearval))
1309                goto skip_sched;
1310
1311        hclgevf_mbx_handler(hdev);
1312
1313        hclgevf_clear_event_cause(hdev, clearval);
1314
1315skip_sched:
1316        hclgevf_enable_vector(&hdev->misc_vector, true);
1317
1318        return IRQ_HANDLED;
1319}
1320
1321static int hclgevf_configure(struct hclgevf_dev *hdev)
1322{
1323        int ret;
1324
1325        /* get queue configuration from PF */
1326        ret = hclge_get_queue_info(hdev);
1327        if (ret)
1328                return ret;
1329        /* get tc configuration from PF */
1330        return hclgevf_get_tc_info(hdev);
1331}
1332
1333static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
1334{
1335        struct pci_dev *pdev = ae_dev->pdev;
1336        struct hclgevf_dev *hdev = ae_dev->priv;
1337
1338        hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1339        if (!hdev)
1340                return -ENOMEM;
1341
1342        hdev->pdev = pdev;
1343        hdev->ae_dev = ae_dev;
1344        ae_dev->priv = hdev;
1345
1346        return 0;
1347}
1348
1349static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
1350{
1351        struct hnae3_handle *roce = &hdev->roce;
1352        struct hnae3_handle *nic = &hdev->nic;
1353
1354        roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
1355
1356        if (hdev->num_msi_left < roce->rinfo.num_vectors ||
1357            hdev->num_msi_left == 0)
1358                return -EINVAL;
1359
1360        roce->rinfo.base_vector =
1361                hdev->vector_status[hdev->num_msi_used];
1362
1363        roce->rinfo.netdev = nic->kinfo.netdev;
1364        roce->rinfo.roce_io_base = hdev->hw.io_base;
1365
1366        roce->pdev = nic->pdev;
1367        roce->ae_algo = nic->ae_algo;
1368        roce->numa_node_mask = nic->numa_node_mask;
1369
1370        return 0;
1371}
1372
1373static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
1374{
1375        struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1376        int i, ret;
1377
1378        rss_cfg->rss_size = hdev->rss_size_max;
1379
1380        /* Initialize RSS indirect table for each vport */
1381        for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1382                rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1383
1384        ret = hclgevf_set_rss_indir_table(hdev);
1385        if (ret)
1386                return ret;
1387
1388        return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1389}
1390
1391static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1392{
1393        /* other vlan config(like, VLAN TX/RX offload) would also be added
1394         * here later
1395         */
1396        return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1397                                       false);
1398}
1399
1400static int hclgevf_ae_start(struct hnae3_handle *handle)
1401{
1402        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1403        int i, queue_id;
1404
1405        for (i = 0; i < handle->kinfo.num_tqps; i++) {
1406                /* ring enable */
1407                queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1408                if (queue_id < 0) {
1409                        dev_warn(&hdev->pdev->dev,
1410                                 "Get invalid queue id, ignore it\n");
1411                        continue;
1412                }
1413
1414                hclgevf_tqp_enable(hdev, queue_id, 0, true);
1415        }
1416
1417        /* reset tqp stats */
1418        hclgevf_reset_tqp_stats(handle);
1419
1420        hclgevf_request_link_info(hdev);
1421
1422        clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1423        mod_timer(&hdev->service_timer, jiffies + HZ);
1424
1425        return 0;
1426}
1427
1428static void hclgevf_ae_stop(struct hnae3_handle *handle)
1429{
1430        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1431        int i, queue_id;
1432
1433        for (i = 0; i < hdev->num_tqps; i++) {
1434                /* Ring disable */
1435                queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
1436                if (queue_id < 0) {
1437                        dev_warn(&hdev->pdev->dev,
1438                                 "Get invalid queue id, ignore it\n");
1439                        continue;
1440                }
1441
1442                hclgevf_tqp_enable(hdev, queue_id, 0, false);
1443        }
1444
1445        /* reset tqp stats */
1446        hclgevf_reset_tqp_stats(handle);
1447        del_timer_sync(&hdev->service_timer);
1448        cancel_work_sync(&hdev->service_task);
1449        clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1450        hclgevf_update_link_status(hdev, 0);
1451}
1452
1453static void hclgevf_state_init(struct hclgevf_dev *hdev)
1454{
1455        /* if this is on going reset then skip this initialization */
1456        if (hclgevf_dev_ongoing_reset(hdev))
1457                return;
1458
1459        /* setup tasks for the MBX */
1460        INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1461        clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1462        clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1463
1464        /* setup tasks for service timer */
1465        timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1466
1467        INIT_WORK(&hdev->service_task, hclgevf_service_task);
1468        clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1469
1470        INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
1471
1472        mutex_init(&hdev->mbx_resp.mbx_mutex);
1473
1474        /* bring the device down */
1475        set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1476}
1477
1478static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1479{
1480        set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1481
1482        if (hdev->service_timer.function)
1483                del_timer_sync(&hdev->service_timer);
1484        if (hdev->service_task.func)
1485                cancel_work_sync(&hdev->service_task);
1486        if (hdev->mbx_service_task.func)
1487                cancel_work_sync(&hdev->mbx_service_task);
1488        if (hdev->rst_service_task.func)
1489                cancel_work_sync(&hdev->rst_service_task);
1490
1491        mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1492}
1493
1494static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1495{
1496        struct pci_dev *pdev = hdev->pdev;
1497        int vectors;
1498        int i;
1499
1500        /* if this is on going reset then skip this initialization */
1501        if (hclgevf_dev_ongoing_reset(hdev))
1502                return 0;
1503
1504        hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
1505
1506        vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1507                                        PCI_IRQ_MSI | PCI_IRQ_MSIX);
1508        if (vectors < 0) {
1509                dev_err(&pdev->dev,
1510                        "failed(%d) to allocate MSI/MSI-X vectors\n",
1511                        vectors);
1512                return vectors;
1513        }
1514        if (vectors < hdev->num_msi)
1515                dev_warn(&hdev->pdev->dev,
1516                         "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1517                         hdev->num_msi, vectors);
1518
1519        hdev->num_msi = vectors;
1520        hdev->num_msi_left = vectors;
1521        hdev->base_msi_vector = pdev->irq;
1522
1523        hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1524                                           sizeof(u16), GFP_KERNEL);
1525        if (!hdev->vector_status) {
1526                pci_free_irq_vectors(pdev);
1527                return -ENOMEM;
1528        }
1529
1530        for (i = 0; i < hdev->num_msi; i++)
1531                hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1532
1533        hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1534                                        sizeof(int), GFP_KERNEL);
1535        if (!hdev->vector_irq) {
1536                pci_free_irq_vectors(pdev);
1537                return -ENOMEM;
1538        }
1539
1540        return 0;
1541}
1542
1543static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1544{
1545        struct pci_dev *pdev = hdev->pdev;
1546
1547        pci_free_irq_vectors(pdev);
1548}
1549
1550static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1551{
1552        int ret = 0;
1553
1554        /* if this is on going reset then skip this initialization */
1555        if (hclgevf_dev_ongoing_reset(hdev))
1556                return 0;
1557
1558        hclgevf_get_misc_vector(hdev);
1559
1560        ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1561                          0, "hclgevf_cmd", hdev);
1562        if (ret) {
1563                dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1564                        hdev->misc_vector.vector_irq);
1565                return ret;
1566        }
1567
1568        hclgevf_clear_event_cause(hdev, 0);
1569
1570        /* enable misc. vector(vector 0) */
1571        hclgevf_enable_vector(&hdev->misc_vector, true);
1572
1573        return ret;
1574}
1575
1576static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1577{
1578        /* disable misc vector(vector 0) */
1579        hclgevf_enable_vector(&hdev->misc_vector, false);
1580        synchronize_irq(hdev->misc_vector.vector_irq);
1581        free_irq(hdev->misc_vector.vector_irq, hdev);
1582        hclgevf_free_vector(hdev, 0);
1583}
1584
1585static int hclgevf_init_instance(struct hclgevf_dev *hdev,
1586                                 struct hnae3_client *client)
1587{
1588        int ret;
1589
1590        switch (client->type) {
1591        case HNAE3_CLIENT_KNIC:
1592                hdev->nic_client = client;
1593                hdev->nic.client = client;
1594
1595                ret = client->ops->init_instance(&hdev->nic);
1596                if (ret)
1597                        return ret;
1598
1599                if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1600                        struct hnae3_client *rc = hdev->roce_client;
1601
1602                        ret = hclgevf_init_roce_base_info(hdev);
1603                        if (ret)
1604                                return ret;
1605                        ret = rc->ops->init_instance(&hdev->roce);
1606                        if (ret)
1607                                return ret;
1608                }
1609                break;
1610        case HNAE3_CLIENT_UNIC:
1611                hdev->nic_client = client;
1612                hdev->nic.client = client;
1613
1614                ret = client->ops->init_instance(&hdev->nic);
1615                if (ret)
1616                        return ret;
1617                break;
1618        case HNAE3_CLIENT_ROCE:
1619                if (hnae3_dev_roce_supported(hdev)) {
1620                        hdev->roce_client = client;
1621                        hdev->roce.client = client;
1622                }
1623
1624                if (hdev->roce_client && hdev->nic_client) {
1625                        ret = hclgevf_init_roce_base_info(hdev);
1626                        if (ret)
1627                                return ret;
1628
1629                        ret = client->ops->init_instance(&hdev->roce);
1630                        if (ret)
1631                                return ret;
1632                }
1633        }
1634
1635        return 0;
1636}
1637
1638static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
1639                                    struct hnae3_client *client)
1640{
1641        /* un-init roce, if it exists */
1642        if (hdev->roce_client)
1643                hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1644
1645        /* un-init nic/unic, if this was not called by roce client */
1646        if ((client->ops->uninit_instance) &&
1647            (client->type != HNAE3_CLIENT_ROCE))
1648                client->ops->uninit_instance(&hdev->nic, 0);
1649}
1650
1651static int hclgevf_register_client(struct hnae3_client *client,
1652                                   struct hnae3_ae_dev *ae_dev)
1653{
1654        struct hclgevf_dev *hdev = ae_dev->priv;
1655
1656        return hclgevf_init_instance(hdev, client);
1657}
1658
1659static void hclgevf_unregister_client(struct hnae3_client *client,
1660                                      struct hnae3_ae_dev *ae_dev)
1661{
1662        struct hclgevf_dev *hdev = ae_dev->priv;
1663
1664        hclgevf_uninit_instance(hdev, client);
1665}
1666
1667static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1668{
1669        struct pci_dev *pdev = hdev->pdev;
1670        struct hclgevf_hw *hw;
1671        int ret;
1672
1673        /* check if we need to skip initialization of pci. This will happen if
1674         * device is undergoing VF reset. Otherwise, we would need to
1675         * re-initialize pci interface again i.e. when device is not going
1676         * through *any* reset or actually undergoing full reset.
1677         */
1678        if (hclgevf_dev_ongoing_reset(hdev))
1679                return 0;
1680
1681        ret = pci_enable_device(pdev);
1682        if (ret) {
1683                dev_err(&pdev->dev, "failed to enable PCI device\n");
1684                return ret;
1685        }
1686
1687        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1688        if (ret) {
1689                dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
1690                goto err_disable_device;
1691        }
1692
1693        ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
1694        if (ret) {
1695                dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
1696                goto err_disable_device;
1697        }
1698
1699        pci_set_master(pdev);
1700        hw = &hdev->hw;
1701        hw->hdev = hdev;
1702        hw->io_base = pci_iomap(pdev, 2, 0);
1703        if (!hw->io_base) {
1704                dev_err(&pdev->dev, "can't map configuration register space\n");
1705                ret = -ENOMEM;
1706                goto err_clr_master;
1707        }
1708
1709        return 0;
1710
1711err_clr_master:
1712        pci_clear_master(pdev);
1713        pci_release_regions(pdev);
1714err_disable_device:
1715        pci_disable_device(pdev);
1716
1717        return ret;
1718}
1719
1720static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
1721{
1722        struct pci_dev *pdev = hdev->pdev;
1723
1724        pci_iounmap(pdev, hdev->hw.io_base);
1725        pci_clear_master(pdev);
1726        pci_release_regions(pdev);
1727        pci_disable_device(pdev);
1728}
1729
1730static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
1731{
1732        struct pci_dev *pdev = hdev->pdev;
1733        int ret;
1734
1735        /* check if device is on-going full reset(i.e. pcie as well) */
1736        if (hclgevf_dev_ongoing_full_reset(hdev)) {
1737                dev_warn(&pdev->dev, "device is going full reset\n");
1738                hclgevf_uninit_hdev(hdev);
1739        }
1740
1741        ret = hclgevf_pci_init(hdev);
1742        if (ret) {
1743                dev_err(&pdev->dev, "PCI initialization failed\n");
1744                return ret;
1745        }
1746
1747        ret = hclgevf_init_msi(hdev);
1748        if (ret) {
1749                dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1750                goto err_irq_init;
1751        }
1752
1753        hclgevf_state_init(hdev);
1754
1755        ret = hclgevf_cmd_init(hdev);
1756        if (ret)
1757                goto err_cmd_init;
1758
1759        ret = hclgevf_misc_irq_init(hdev);
1760        if (ret) {
1761                dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1762                        ret);
1763                goto err_misc_irq_init;
1764        }
1765
1766        ret = hclgevf_configure(hdev);
1767        if (ret) {
1768                dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
1769                goto err_config;
1770        }
1771
1772        ret = hclgevf_alloc_tqps(hdev);
1773        if (ret) {
1774                dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
1775                goto err_config;
1776        }
1777
1778        ret = hclgevf_set_handle_info(hdev);
1779        if (ret) {
1780                dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
1781                goto err_config;
1782        }
1783
1784        /* Initialize mta type for this VF */
1785        ret = hclgevf_cfg_func_mta_type(hdev);
1786        if (ret) {
1787                dev_err(&hdev->pdev->dev,
1788                        "failed(%d) to initialize MTA type\n", ret);
1789                goto err_config;
1790        }
1791
1792        /* Initialize RSS for this VF */
1793        ret = hclgevf_rss_init_hw(hdev);
1794        if (ret) {
1795                dev_err(&hdev->pdev->dev,
1796                        "failed(%d) to initialize RSS\n", ret);
1797                goto err_config;
1798        }
1799
1800        ret = hclgevf_init_vlan_config(hdev);
1801        if (ret) {
1802                dev_err(&hdev->pdev->dev,
1803                        "failed(%d) to initialize VLAN config\n", ret);
1804                goto err_config;
1805        }
1806
1807        pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
1808
1809        return 0;
1810
1811err_config:
1812        hclgevf_misc_irq_uninit(hdev);
1813err_misc_irq_init:
1814        hclgevf_cmd_uninit(hdev);
1815err_cmd_init:
1816        hclgevf_state_uninit(hdev);
1817        hclgevf_uninit_msi(hdev);
1818err_irq_init:
1819        hclgevf_pci_uninit(hdev);
1820        return ret;
1821}
1822
1823static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
1824{
1825        hclgevf_state_uninit(hdev);
1826        hclgevf_misc_irq_uninit(hdev);
1827        hclgevf_cmd_uninit(hdev);
1828        hclgevf_uninit_msi(hdev);
1829        hclgevf_pci_uninit(hdev);
1830}
1831
1832static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
1833{
1834        struct pci_dev *pdev = ae_dev->pdev;
1835        int ret;
1836
1837        ret = hclgevf_alloc_hdev(ae_dev);
1838        if (ret) {
1839                dev_err(&pdev->dev, "hclge device allocation failed\n");
1840                return ret;
1841        }
1842
1843        ret = hclgevf_init_hdev(ae_dev->priv);
1844        if (ret)
1845                dev_err(&pdev->dev, "hclge device initialization failed\n");
1846
1847        return ret;
1848}
1849
1850static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
1851{
1852        struct hclgevf_dev *hdev = ae_dev->priv;
1853
1854        hclgevf_uninit_hdev(hdev);
1855        ae_dev->priv = NULL;
1856}
1857
1858static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
1859{
1860        struct hnae3_handle *nic = &hdev->nic;
1861        struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1862
1863        return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
1864}
1865
1866/**
1867 * hclgevf_get_channels - Get the current channels enabled and max supported.
1868 * @handle: hardware information for network interface
1869 * @ch: ethtool channels structure
1870 *
1871 * We don't support separate tx and rx queues as channels. The other count
1872 * represents how many queues are being used for control. max_combined counts
1873 * how many queue pairs we can support. They may not be mapped 1 to 1 with
1874 * q_vectors since we support a lot more queue pairs than q_vectors.
1875 **/
1876static void hclgevf_get_channels(struct hnae3_handle *handle,
1877                                 struct ethtool_channels *ch)
1878{
1879        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1880
1881        ch->max_combined = hclgevf_get_max_channels(hdev);
1882        ch->other_count = 0;
1883        ch->max_other = 0;
1884        ch->combined_count = hdev->num_tqps;
1885}
1886
1887static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
1888                                          u16 *free_tqps, u16 *max_rss_size)
1889{
1890        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1891
1892        *free_tqps = 0;
1893        *max_rss_size = hdev->rss_size_max;
1894}
1895
1896static int hclgevf_get_status(struct hnae3_handle *handle)
1897{
1898        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1899
1900        return hdev->hw.mac.link;
1901}
1902
1903static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
1904                                            u8 *auto_neg, u32 *speed,
1905                                            u8 *duplex)
1906{
1907        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1908
1909        if (speed)
1910                *speed = hdev->hw.mac.speed;
1911        if (duplex)
1912                *duplex = hdev->hw.mac.duplex;
1913        if (auto_neg)
1914                *auto_neg = AUTONEG_DISABLE;
1915}
1916
1917void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
1918                                 u8 duplex)
1919{
1920        hdev->hw.mac.speed = speed;
1921        hdev->hw.mac.duplex = duplex;
1922}
1923
1924static const struct hnae3_ae_ops hclgevf_ops = {
1925        .init_ae_dev = hclgevf_init_ae_dev,
1926        .uninit_ae_dev = hclgevf_uninit_ae_dev,
1927        .init_client_instance = hclgevf_register_client,
1928        .uninit_client_instance = hclgevf_unregister_client,
1929        .start = hclgevf_ae_start,
1930        .stop = hclgevf_ae_stop,
1931        .map_ring_to_vector = hclgevf_map_ring_to_vector,
1932        .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
1933        .get_vector = hclgevf_get_vector,
1934        .put_vector = hclgevf_put_vector,
1935        .reset_queue = hclgevf_reset_tqp,
1936        .set_promisc_mode = hclgevf_set_promisc_mode,
1937        .get_mac_addr = hclgevf_get_mac_addr,
1938        .set_mac_addr = hclgevf_set_mac_addr,
1939        .add_uc_addr = hclgevf_add_uc_addr,
1940        .rm_uc_addr = hclgevf_rm_uc_addr,
1941        .add_mc_addr = hclgevf_add_mc_addr,
1942        .rm_mc_addr = hclgevf_rm_mc_addr,
1943        .update_mta_status = hclgevf_update_mta_status,
1944        .get_stats = hclgevf_get_stats,
1945        .update_stats = hclgevf_update_stats,
1946        .get_strings = hclgevf_get_strings,
1947        .get_sset_count = hclgevf_get_sset_count,
1948        .get_rss_key_size = hclgevf_get_rss_key_size,
1949        .get_rss_indir_size = hclgevf_get_rss_indir_size,
1950        .get_rss = hclgevf_get_rss,
1951        .set_rss = hclgevf_set_rss,
1952        .get_tc_size = hclgevf_get_tc_size,
1953        .get_fw_version = hclgevf_get_fw_version,
1954        .set_vlan_filter = hclgevf_set_vlan_filter,
1955        .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
1956        .reset_event = hclgevf_reset_event,
1957        .get_channels = hclgevf_get_channels,
1958        .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
1959        .get_status = hclgevf_get_status,
1960        .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
1961};
1962
1963static struct hnae3_ae_algo ae_algovf = {
1964        .ops = &hclgevf_ops,
1965        .name = HCLGEVF_NAME,
1966        .pdev_id_table = ae_algovf_pci_tbl,
1967};
1968
1969static int hclgevf_init(void)
1970{
1971        pr_info("%s is initializing\n", HCLGEVF_NAME);
1972
1973        hnae3_register_ae_algo(&ae_algovf);
1974
1975        return 0;
1976}
1977
1978static void hclgevf_exit(void)
1979{
1980        hnae3_unregister_ae_algo(&ae_algovf);
1981}
1982module_init(hclgevf_init);
1983module_exit(hclgevf_exit);
1984
1985MODULE_LICENSE("GPL");
1986MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
1987MODULE_DESCRIPTION("HCLGEVF Driver");
1988MODULE_VERSION(HCLGEVF_MOD_VERSION);
1989