linux/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016~2017 Hisilicon Limited.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 */
   9
  10#include <linux/dma-mapping.h>
  11#include <linux/etherdevice.h>
  12#include <linux/interrupt.h>
  13#include <linux/if_vlan.h>
  14#include <linux/ip.h>
  15#include <linux/ipv6.h>
  16#include <linux/module.h>
  17#include <linux/pci.h>
  18#include <linux/skbuff.h>
  19#include <linux/sctp.h>
  20#include <linux/vermagic.h>
  21#include <net/gre.h>
  22#include <net/pkt_cls.h>
  23#include <net/vxlan.h>
  24
  25#include "hnae3.h"
  26#include "hns3_enet.h"
  27
  28static void hns3_clear_all_ring(struct hnae3_handle *h);
  29static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
  30
  31static const char hns3_driver_name[] = "hns3";
  32const char hns3_driver_version[] = VERMAGIC_STRING;
  33static const char hns3_driver_string[] =
  34                        "Hisilicon Ethernet Network Driver for Hip08 Family";
  35static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
  36static struct hnae3_client client;
  37
  38/* hns3_pci_tbl - PCI Device ID Table
  39 *
  40 * Last entry must be all 0s
  41 *
  42 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  43 *   Class, Class Mask, private data (not used) }
  44 */
  45static const struct pci_device_id hns3_pci_tbl[] = {
  46        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
  47        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
  48        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
  49         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  50        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
  51         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  52        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
  53         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  54        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
  55         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  56        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
  57         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  58        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
  59        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
  60         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  61        /* required last entry */
  62        {0, }
  63};
  64MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
  65
  66static irqreturn_t hns3_irq_handle(int irq, void *dev)
  67{
  68        struct hns3_enet_tqp_vector *tqp_vector = dev;
  69
  70        napi_schedule(&tqp_vector->napi);
  71
  72        return IRQ_HANDLED;
  73}
  74
  75static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
  76{
  77        struct hns3_enet_tqp_vector *tqp_vectors;
  78        unsigned int i;
  79
  80        for (i = 0; i < priv->vector_num; i++) {
  81                tqp_vectors = &priv->tqp_vector[i];
  82
  83                if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
  84                        continue;
  85
  86                /* release the irq resource */
  87                free_irq(tqp_vectors->vector_irq, tqp_vectors);
  88                tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
  89        }
  90}
  91
  92static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
  93{
  94        struct hns3_enet_tqp_vector *tqp_vectors;
  95        int txrx_int_idx = 0;
  96        int rx_int_idx = 0;
  97        int tx_int_idx = 0;
  98        unsigned int i;
  99        int ret;
 100
 101        for (i = 0; i < priv->vector_num; i++) {
 102                tqp_vectors = &priv->tqp_vector[i];
 103
 104                if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
 105                        continue;
 106
 107                if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
 108                        snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 109                                 "%s-%s-%d", priv->netdev->name, "TxRx",
 110                                 txrx_int_idx++);
 111                        txrx_int_idx++;
 112                } else if (tqp_vectors->rx_group.ring) {
 113                        snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 114                                 "%s-%s-%d", priv->netdev->name, "Rx",
 115                                 rx_int_idx++);
 116                } else if (tqp_vectors->tx_group.ring) {
 117                        snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 118                                 "%s-%s-%d", priv->netdev->name, "Tx",
 119                                 tx_int_idx++);
 120                } else {
 121                        /* Skip this unused q_vector */
 122                        continue;
 123                }
 124
 125                tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
 126
 127                ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
 128                                  tqp_vectors->name,
 129                                       tqp_vectors);
 130                if (ret) {
 131                        netdev_err(priv->netdev, "request irq(%d) fail\n",
 132                                   tqp_vectors->vector_irq);
 133                        return ret;
 134                }
 135
 136                tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
 137        }
 138
 139        return 0;
 140}
 141
 142static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
 143                                 u32 mask_en)
 144{
 145        writel(mask_en, tqp_vector->mask_addr);
 146}
 147
 148static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
 149{
 150        napi_enable(&tqp_vector->napi);
 151
 152        /* enable vector */
 153        hns3_mask_vector_irq(tqp_vector, 1);
 154}
 155
 156static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
 157{
 158        /* disable vector */
 159        hns3_mask_vector_irq(tqp_vector, 0);
 160
 161        disable_irq(tqp_vector->vector_irq);
 162        napi_disable(&tqp_vector->napi);
 163}
 164
 165void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
 166                                 u32 rl_value)
 167{
 168        u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
 169
 170        /* this defines the configuration for RL (Interrupt Rate Limiter).
 171         * Rl defines rate of interrupts i.e. number of interrupts-per-second
 172         * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
 173         */
 174
 175        if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
 176            !tqp_vector->rx_group.coal.gl_adapt_enable)
 177                /* According to the hardware, the range of rl_reg is
 178                 * 0-59 and the unit is 4.
 179                 */
 180                rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
 181
 182        writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
 183}
 184
 185void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
 186                                    u32 gl_value)
 187{
 188        u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
 189
 190        writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
 191}
 192
 193void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
 194                                    u32 gl_value)
 195{
 196        u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
 197
 198        writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
 199}
 200
 201static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
 202                                   struct hns3_nic_priv *priv)
 203{
 204        /* initialize the configuration for interrupt coalescing.
 205         * 1. GL (Interrupt Gap Limiter)
 206         * 2. RL (Interrupt Rate Limiter)
 207         */
 208
 209        /* Default: enable interrupt coalescing self-adaptive and GL */
 210        tqp_vector->tx_group.coal.gl_adapt_enable = 1;
 211        tqp_vector->rx_group.coal.gl_adapt_enable = 1;
 212
 213        tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
 214        tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
 215
 216        tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
 217        tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
 218        tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
 219}
 220
 221static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
 222                                      struct hns3_nic_priv *priv)
 223{
 224        struct hnae3_handle *h = priv->ae_handle;
 225
 226        hns3_set_vector_coalesce_tx_gl(tqp_vector,
 227                                       tqp_vector->tx_group.coal.int_gl);
 228        hns3_set_vector_coalesce_rx_gl(tqp_vector,
 229                                       tqp_vector->rx_group.coal.int_gl);
 230        hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
 231}
 232
 233static int hns3_nic_set_real_num_queue(struct net_device *netdev)
 234{
 235        struct hnae3_handle *h = hns3_get_handle(netdev);
 236        struct hnae3_knic_private_info *kinfo = &h->kinfo;
 237        unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
 238        int i, ret;
 239
 240        if (kinfo->num_tc <= 1) {
 241                netdev_reset_tc(netdev);
 242        } else {
 243                ret = netdev_set_num_tc(netdev, kinfo->num_tc);
 244                if (ret) {
 245                        netdev_err(netdev,
 246                                   "netdev_set_num_tc fail, ret=%d!\n", ret);
 247                        return ret;
 248                }
 249
 250                for (i = 0; i < HNAE3_MAX_TC; i++) {
 251                        if (!kinfo->tc_info[i].enable)
 252                                continue;
 253
 254                        netdev_set_tc_queue(netdev,
 255                                            kinfo->tc_info[i].tc,
 256                                            kinfo->tc_info[i].tqp_count,
 257                                            kinfo->tc_info[i].tqp_offset);
 258                }
 259        }
 260
 261        ret = netif_set_real_num_tx_queues(netdev, queue_size);
 262        if (ret) {
 263                netdev_err(netdev,
 264                           "netif_set_real_num_tx_queues fail, ret=%d!\n",
 265                           ret);
 266                return ret;
 267        }
 268
 269        ret = netif_set_real_num_rx_queues(netdev, queue_size);
 270        if (ret) {
 271                netdev_err(netdev,
 272                           "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
 273                return ret;
 274        }
 275
 276        return 0;
 277}
 278
 279static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
 280{
 281        u16 alloc_tqps, max_rss_size, rss_size;
 282
 283        h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
 284        rss_size = alloc_tqps / h->kinfo.num_tc;
 285
 286        return min_t(u16, rss_size, max_rss_size);
 287}
 288
 289static int hns3_nic_net_up(struct net_device *netdev)
 290{
 291        struct hns3_nic_priv *priv = netdev_priv(netdev);
 292        struct hnae3_handle *h = priv->ae_handle;
 293        int i, j;
 294        int ret;
 295
 296        ret = hns3_nic_reset_all_ring(h);
 297        if (ret)
 298                return ret;
 299
 300        /* get irq resource for all vectors */
 301        ret = hns3_nic_init_irq(priv);
 302        if (ret) {
 303                netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
 304                return ret;
 305        }
 306
 307        /* enable the vectors */
 308        for (i = 0; i < priv->vector_num; i++)
 309                hns3_vector_enable(&priv->tqp_vector[i]);
 310
 311        /* start the ae_dev */
 312        ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
 313        if (ret)
 314                goto out_start_err;
 315
 316        clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
 317
 318        return 0;
 319
 320out_start_err:
 321        for (j = i - 1; j >= 0; j--)
 322                hns3_vector_disable(&priv->tqp_vector[j]);
 323
 324        hns3_nic_uninit_irq(priv);
 325
 326        return ret;
 327}
 328
 329static int hns3_nic_net_open(struct net_device *netdev)
 330{
 331        struct hns3_nic_priv *priv = netdev_priv(netdev);
 332        struct hnae3_handle *h = hns3_get_handle(netdev);
 333        struct hnae3_knic_private_info *kinfo;
 334        int i, ret;
 335
 336        netif_carrier_off(netdev);
 337
 338        ret = hns3_nic_set_real_num_queue(netdev);
 339        if (ret)
 340                return ret;
 341
 342        ret = hns3_nic_net_up(netdev);
 343        if (ret) {
 344                netdev_err(netdev,
 345                           "hns net up fail, ret=%d!\n", ret);
 346                return ret;
 347        }
 348
 349        kinfo = &h->kinfo;
 350        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
 351                netdev_set_prio_tc_map(netdev, i,
 352                                       kinfo->prio_tc[i]);
 353        }
 354
 355        priv->ae_handle->last_reset_time = jiffies;
 356        return 0;
 357}
 358
 359static void hns3_nic_net_down(struct net_device *netdev)
 360{
 361        struct hns3_nic_priv *priv = netdev_priv(netdev);
 362        const struct hnae3_ae_ops *ops;
 363        int i;
 364
 365        if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
 366                return;
 367
 368        /* disable vectors */
 369        for (i = 0; i < priv->vector_num; i++)
 370                hns3_vector_disable(&priv->tqp_vector[i]);
 371
 372        /* stop ae_dev */
 373        ops = priv->ae_handle->ae_algo->ops;
 374        if (ops->stop)
 375                ops->stop(priv->ae_handle);
 376
 377        /* free irq resources */
 378        hns3_nic_uninit_irq(priv);
 379
 380        hns3_clear_all_ring(priv->ae_handle);
 381}
 382
 383static int hns3_nic_net_stop(struct net_device *netdev)
 384{
 385        netif_tx_stop_all_queues(netdev);
 386        netif_carrier_off(netdev);
 387
 388        hns3_nic_net_down(netdev);
 389
 390        return 0;
 391}
 392
 393static int hns3_nic_uc_sync(struct net_device *netdev,
 394                            const unsigned char *addr)
 395{
 396        struct hnae3_handle *h = hns3_get_handle(netdev);
 397
 398        if (h->ae_algo->ops->add_uc_addr)
 399                return h->ae_algo->ops->add_uc_addr(h, addr);
 400
 401        return 0;
 402}
 403
 404static int hns3_nic_uc_unsync(struct net_device *netdev,
 405                              const unsigned char *addr)
 406{
 407        struct hnae3_handle *h = hns3_get_handle(netdev);
 408
 409        if (h->ae_algo->ops->rm_uc_addr)
 410                return h->ae_algo->ops->rm_uc_addr(h, addr);
 411
 412        return 0;
 413}
 414
 415static int hns3_nic_mc_sync(struct net_device *netdev,
 416                            const unsigned char *addr)
 417{
 418        struct hnae3_handle *h = hns3_get_handle(netdev);
 419
 420        if (h->ae_algo->ops->add_mc_addr)
 421                return h->ae_algo->ops->add_mc_addr(h, addr);
 422
 423        return 0;
 424}
 425
 426static int hns3_nic_mc_unsync(struct net_device *netdev,
 427                              const unsigned char *addr)
 428{
 429        struct hnae3_handle *h = hns3_get_handle(netdev);
 430
 431        if (h->ae_algo->ops->rm_mc_addr)
 432                return h->ae_algo->ops->rm_mc_addr(h, addr);
 433
 434        return 0;
 435}
 436
 437static u8 hns3_get_netdev_flags(struct net_device *netdev)
 438{
 439        u8 flags = 0;
 440
 441        if (netdev->flags & IFF_PROMISC) {
 442                flags = HNAE3_USER_UPE | HNAE3_USER_MPE;
 443        } else {
 444                flags |= HNAE3_VLAN_FLTR;
 445                if (netdev->flags & IFF_ALLMULTI)
 446                        flags |= HNAE3_USER_MPE;
 447        }
 448
 449        return flags;
 450}
 451
 452static void hns3_nic_set_rx_mode(struct net_device *netdev)
 453{
 454        struct hnae3_handle *h = hns3_get_handle(netdev);
 455        u8 new_flags;
 456        int ret;
 457
 458        new_flags = hns3_get_netdev_flags(netdev);
 459
 460        ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
 461        if (ret) {
 462                netdev_err(netdev, "sync uc address fail\n");
 463                if (ret == -ENOSPC)
 464                        new_flags |= HNAE3_OVERFLOW_UPE;
 465        }
 466
 467        if (netdev->flags & IFF_MULTICAST) {
 468                ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
 469                                    hns3_nic_mc_unsync);
 470                if (ret) {
 471                        netdev_err(netdev, "sync mc address fail\n");
 472                        if (ret == -ENOSPC)
 473                                new_flags |= HNAE3_OVERFLOW_MPE;
 474                }
 475        }
 476
 477        hns3_update_promisc_mode(netdev, new_flags);
 478        /* User mode Promisc mode enable and vlan filtering is disabled to
 479         * let all packets in. MAC-VLAN Table overflow Promisc enabled and
 480         * vlan fitering is enabled
 481         */
 482        hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
 483        h->netdev_flags = new_flags;
 484}
 485
 486void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
 487{
 488        struct hns3_nic_priv *priv = netdev_priv(netdev);
 489        struct hnae3_handle *h = priv->ae_handle;
 490
 491        if (h->ae_algo->ops->set_promisc_mode) {
 492                h->ae_algo->ops->set_promisc_mode(h,
 493                                                  promisc_flags & HNAE3_UPE,
 494                                                  promisc_flags & HNAE3_MPE);
 495        }
 496}
 497
 498void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
 499{
 500        struct hns3_nic_priv *priv = netdev_priv(netdev);
 501        struct hnae3_handle *h = priv->ae_handle;
 502        bool last_state;
 503
 504        if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
 505                last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
 506                if (enable != last_state) {
 507                        netdev_info(netdev,
 508                                    "%s vlan filter\n",
 509                                    enable ? "enable" : "disable");
 510                        h->ae_algo->ops->enable_vlan_filter(h, enable);
 511                }
 512
 513                if (h->ae_algo->ops->update_mta_status)
 514                        h->ae_algo->ops->update_mta_status(h);
 515        }
 516}
 517
 518static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
 519                        u16 *mss, u32 *type_cs_vlan_tso)
 520{
 521        u32 l4_offset, hdr_len;
 522        union l3_hdr_info l3;
 523        union l4_hdr_info l4;
 524        u32 l4_paylen;
 525        int ret;
 526
 527        if (!skb_is_gso(skb))
 528                return 0;
 529
 530        ret = skb_cow_head(skb, 0);
 531        if (ret)
 532                return ret;
 533
 534        l3.hdr = skb_network_header(skb);
 535        l4.hdr = skb_transport_header(skb);
 536
 537        /* Software should clear the IPv4's checksum field when tso is
 538         * needed.
 539         */
 540        if (l3.v4->version == 4)
 541                l3.v4->check = 0;
 542
 543        /* tunnel packet.*/
 544        if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
 545                                         SKB_GSO_GRE_CSUM |
 546                                         SKB_GSO_UDP_TUNNEL |
 547                                         SKB_GSO_UDP_TUNNEL_CSUM)) {
 548                if ((!(skb_shinfo(skb)->gso_type &
 549                    SKB_GSO_PARTIAL)) &&
 550                    (skb_shinfo(skb)->gso_type &
 551                    SKB_GSO_UDP_TUNNEL_CSUM)) {
 552                        /* Software should clear the udp's checksum
 553                         * field when tso is needed.
 554                         */
 555                        l4.udp->check = 0;
 556                }
 557                /* reset l3&l4 pointers from outer to inner headers */
 558                l3.hdr = skb_inner_network_header(skb);
 559                l4.hdr = skb_inner_transport_header(skb);
 560
 561                /* Software should clear the IPv4's checksum field when
 562                 * tso is needed.
 563                 */
 564                if (l3.v4->version == 4)
 565                        l3.v4->check = 0;
 566        }
 567
 568        /* normal or tunnel packet*/
 569        l4_offset = l4.hdr - skb->data;
 570        hdr_len = (l4.tcp->doff * 4) + l4_offset;
 571
 572        /* remove payload length from inner pseudo checksum when tso*/
 573        l4_paylen = skb->len - l4_offset;
 574        csum_replace_by_diff(&l4.tcp->check,
 575                             (__force __wsum)htonl(l4_paylen));
 576
 577        /* find the txbd field values */
 578        *paylen = skb->len - hdr_len;
 579        hnae3_set_bit(*type_cs_vlan_tso,
 580                      HNS3_TXD_TSO_B, 1);
 581
 582        /* get MSS for TSO */
 583        *mss = skb_shinfo(skb)->gso_size;
 584
 585        return 0;
 586}
 587
 588static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
 589                                u8 *il4_proto)
 590{
 591        union {
 592                struct iphdr *v4;
 593                struct ipv6hdr *v6;
 594                unsigned char *hdr;
 595        } l3;
 596        unsigned char *l4_hdr;
 597        unsigned char *exthdr;
 598        u8 l4_proto_tmp;
 599        __be16 frag_off;
 600
 601        /* find outer header point */
 602        l3.hdr = skb_network_header(skb);
 603        l4_hdr = skb_transport_header(skb);
 604
 605        if (skb->protocol == htons(ETH_P_IPV6)) {
 606                exthdr = l3.hdr + sizeof(*l3.v6);
 607                l4_proto_tmp = l3.v6->nexthdr;
 608                if (l4_hdr != exthdr)
 609                        ipv6_skip_exthdr(skb, exthdr - skb->data,
 610                                         &l4_proto_tmp, &frag_off);
 611        } else if (skb->protocol == htons(ETH_P_IP)) {
 612                l4_proto_tmp = l3.v4->protocol;
 613        } else {
 614                return -EINVAL;
 615        }
 616
 617        *ol4_proto = l4_proto_tmp;
 618
 619        /* tunnel packet */
 620        if (!skb->encapsulation) {
 621                *il4_proto = 0;
 622                return 0;
 623        }
 624
 625        /* find inner header point */
 626        l3.hdr = skb_inner_network_header(skb);
 627        l4_hdr = skb_inner_transport_header(skb);
 628
 629        if (l3.v6->version == 6) {
 630                exthdr = l3.hdr + sizeof(*l3.v6);
 631                l4_proto_tmp = l3.v6->nexthdr;
 632                if (l4_hdr != exthdr)
 633                        ipv6_skip_exthdr(skb, exthdr - skb->data,
 634                                         &l4_proto_tmp, &frag_off);
 635        } else if (l3.v4->version == 4) {
 636                l4_proto_tmp = l3.v4->protocol;
 637        }
 638
 639        *il4_proto = l4_proto_tmp;
 640
 641        return 0;
 642}
 643
 644static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
 645                                u8 il4_proto, u32 *type_cs_vlan_tso,
 646                                u32 *ol_type_vlan_len_msec)
 647{
 648        union {
 649                struct iphdr *v4;
 650                struct ipv6hdr *v6;
 651                unsigned char *hdr;
 652        } l3;
 653        union {
 654                struct tcphdr *tcp;
 655                struct udphdr *udp;
 656                struct gre_base_hdr *gre;
 657                unsigned char *hdr;
 658        } l4;
 659        unsigned char *l2_hdr;
 660        u8 l4_proto = ol4_proto;
 661        u32 ol2_len;
 662        u32 ol3_len;
 663        u32 ol4_len;
 664        u32 l2_len;
 665        u32 l3_len;
 666
 667        l3.hdr = skb_network_header(skb);
 668        l4.hdr = skb_transport_header(skb);
 669
 670        /* compute L2 header size for normal packet, defined in 2 Bytes */
 671        l2_len = l3.hdr - skb->data;
 672        hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
 673                        HNS3_TXD_L2LEN_S, l2_len >> 1);
 674
 675        /* tunnel packet*/
 676        if (skb->encapsulation) {
 677                /* compute OL2 header size, defined in 2 Bytes */
 678                ol2_len = l2_len;
 679                hnae3_set_field(*ol_type_vlan_len_msec,
 680                                HNS3_TXD_L2LEN_M,
 681                                HNS3_TXD_L2LEN_S, ol2_len >> 1);
 682
 683                /* compute OL3 header size, defined in 4 Bytes */
 684                ol3_len = l4.hdr - l3.hdr;
 685                hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
 686                                HNS3_TXD_L3LEN_S, ol3_len >> 2);
 687
 688                /* MAC in UDP, MAC in GRE (0x6558)*/
 689                if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
 690                        /* switch MAC header ptr from outer to inner header.*/
 691                        l2_hdr = skb_inner_mac_header(skb);
 692
 693                        /* compute OL4 header size, defined in 4 Bytes. */
 694                        ol4_len = l2_hdr - l4.hdr;
 695                        hnae3_set_field(*ol_type_vlan_len_msec,
 696                                        HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
 697                                        ol4_len >> 2);
 698
 699                        /* switch IP header ptr from outer to inner header */
 700                        l3.hdr = skb_inner_network_header(skb);
 701
 702                        /* compute inner l2 header size, defined in 2 Bytes. */
 703                        l2_len = l3.hdr - l2_hdr;
 704                        hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
 705                                        HNS3_TXD_L2LEN_S, l2_len >> 1);
 706                } else {
 707                        /* skb packet types not supported by hardware,
 708                         * txbd len fild doesn't be filled.
 709                         */
 710                        return;
 711                }
 712
 713                /* switch L4 header pointer from outer to inner */
 714                l4.hdr = skb_inner_transport_header(skb);
 715
 716                l4_proto = il4_proto;
 717        }
 718
 719        /* compute inner(/normal) L3 header size, defined in 4 Bytes */
 720        l3_len = l4.hdr - l3.hdr;
 721        hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
 722                        HNS3_TXD_L3LEN_S, l3_len >> 2);
 723
 724        /* compute inner(/normal) L4 header size, defined in 4 Bytes */
 725        switch (l4_proto) {
 726        case IPPROTO_TCP:
 727                hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
 728                                HNS3_TXD_L4LEN_S, l4.tcp->doff);
 729                break;
 730        case IPPROTO_SCTP:
 731                hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
 732                                HNS3_TXD_L4LEN_S,
 733                                (sizeof(struct sctphdr) >> 2));
 734                break;
 735        case IPPROTO_UDP:
 736                hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
 737                                HNS3_TXD_L4LEN_S,
 738                                (sizeof(struct udphdr) >> 2));
 739                break;
 740        default:
 741                /* skb packet types not supported by hardware,
 742                 * txbd len fild doesn't be filled.
 743                 */
 744                return;
 745        }
 746}
 747
 748/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
 749 * and it is udp packet, which has a dest port as the IANA assigned.
 750 * the hardware is expected to do the checksum offload, but the
 751 * hardware will not do the checksum offload when udp dest port is
 752 * 4789.
 753 */
 754static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
 755{
 756        union {
 757                struct tcphdr *tcp;
 758                struct udphdr *udp;
 759                struct gre_base_hdr *gre;
 760                unsigned char *hdr;
 761        } l4;
 762
 763        l4.hdr = skb_transport_header(skb);
 764
 765        if (!(!skb->encapsulation &&
 766              l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
 767                return false;
 768
 769        skb_checksum_help(skb);
 770
 771        return true;
 772}
 773
 774static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
 775                                   u8 il4_proto, u32 *type_cs_vlan_tso,
 776                                   u32 *ol_type_vlan_len_msec)
 777{
 778        union {
 779                struct iphdr *v4;
 780                struct ipv6hdr *v6;
 781                unsigned char *hdr;
 782        } l3;
 783        u32 l4_proto = ol4_proto;
 784
 785        l3.hdr = skb_network_header(skb);
 786
 787        /* define OL3 type and tunnel type(OL4).*/
 788        if (skb->encapsulation) {
 789                /* define outer network header type.*/
 790                if (skb->protocol == htons(ETH_P_IP)) {
 791                        if (skb_is_gso(skb))
 792                                hnae3_set_field(*ol_type_vlan_len_msec,
 793                                                HNS3_TXD_OL3T_M,
 794                                                HNS3_TXD_OL3T_S,
 795                                                HNS3_OL3T_IPV4_CSUM);
 796                        else
 797                                hnae3_set_field(*ol_type_vlan_len_msec,
 798                                                HNS3_TXD_OL3T_M,
 799                                                HNS3_TXD_OL3T_S,
 800                                                HNS3_OL3T_IPV4_NO_CSUM);
 801
 802                } else if (skb->protocol == htons(ETH_P_IPV6)) {
 803                        hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
 804                                        HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
 805                }
 806
 807                /* define tunnel type(OL4).*/
 808                switch (l4_proto) {
 809                case IPPROTO_UDP:
 810                        hnae3_set_field(*ol_type_vlan_len_msec,
 811                                        HNS3_TXD_TUNTYPE_M,
 812                                        HNS3_TXD_TUNTYPE_S,
 813                                        HNS3_TUN_MAC_IN_UDP);
 814                        break;
 815                case IPPROTO_GRE:
 816                        hnae3_set_field(*ol_type_vlan_len_msec,
 817                                        HNS3_TXD_TUNTYPE_M,
 818                                        HNS3_TXD_TUNTYPE_S,
 819                                        HNS3_TUN_NVGRE);
 820                        break;
 821                default:
 822                        /* drop the skb tunnel packet if hardware don't support,
 823                         * because hardware can't calculate csum when TSO.
 824                         */
 825                        if (skb_is_gso(skb))
 826                                return -EDOM;
 827
 828                        /* the stack computes the IP header already,
 829                         * driver calculate l4 checksum when not TSO.
 830                         */
 831                        skb_checksum_help(skb);
 832                        return 0;
 833                }
 834
 835                l3.hdr = skb_inner_network_header(skb);
 836                l4_proto = il4_proto;
 837        }
 838
 839        if (l3.v4->version == 4) {
 840                hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
 841                                HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
 842
 843                /* the stack computes the IP header already, the only time we
 844                 * need the hardware to recompute it is in the case of TSO.
 845                 */
 846                if (skb_is_gso(skb))
 847                        hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
 848        } else if (l3.v6->version == 6) {
 849                hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
 850                                HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
 851        }
 852
 853        switch (l4_proto) {
 854        case IPPROTO_TCP:
 855                hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
 856                hnae3_set_field(*type_cs_vlan_tso,
 857                                HNS3_TXD_L4T_M,
 858                                HNS3_TXD_L4T_S,
 859                                HNS3_L4T_TCP);
 860                break;
 861        case IPPROTO_UDP:
 862                if (hns3_tunnel_csum_bug(skb))
 863                        break;
 864
 865                hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
 866                hnae3_set_field(*type_cs_vlan_tso,
 867                                HNS3_TXD_L4T_M,
 868                                HNS3_TXD_L4T_S,
 869                                HNS3_L4T_UDP);
 870                break;
 871        case IPPROTO_SCTP:
 872                hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
 873                hnae3_set_field(*type_cs_vlan_tso,
 874                                HNS3_TXD_L4T_M,
 875                                HNS3_TXD_L4T_S,
 876                                HNS3_L4T_SCTP);
 877                break;
 878        default:
 879                /* drop the skb tunnel packet if hardware don't support,
 880                 * because hardware can't calculate csum when TSO.
 881                 */
 882                if (skb_is_gso(skb))
 883                        return -EDOM;
 884
 885                /* the stack computes the IP header already,
 886                 * driver calculate l4 checksum when not TSO.
 887                 */
 888                skb_checksum_help(skb);
 889                return 0;
 890        }
 891
 892        return 0;
 893}
 894
 895static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
 896{
 897        /* Config bd buffer end */
 898        hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
 899                        HNS3_TXD_BDTYPE_S, 0);
 900        hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
 901        hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
 902        hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
 903}
 904
 905static int hns3_fill_desc_vtags(struct sk_buff *skb,
 906                                struct hns3_enet_ring *tx_ring,
 907                                u32 *inner_vlan_flag,
 908                                u32 *out_vlan_flag,
 909                                u16 *inner_vtag,
 910                                u16 *out_vtag)
 911{
 912#define HNS3_TX_VLAN_PRIO_SHIFT 13
 913
 914        if (skb->protocol == htons(ETH_P_8021Q) &&
 915            !(tx_ring->tqp->handle->kinfo.netdev->features &
 916            NETIF_F_HW_VLAN_CTAG_TX)) {
 917                /* When HW VLAN acceleration is turned off, and the stack
 918                 * sets the protocol to 802.1q, the driver just need to
 919                 * set the protocol to the encapsulated ethertype.
 920                 */
 921                skb->protocol = vlan_get_protocol(skb);
 922                return 0;
 923        }
 924
 925        if (skb_vlan_tag_present(skb)) {
 926                u16 vlan_tag;
 927
 928                vlan_tag = skb_vlan_tag_get(skb);
 929                vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
 930
 931                /* Based on hw strategy, use out_vtag in two layer tag case,
 932                 * and use inner_vtag in one tag case.
 933                 */
 934                if (skb->protocol == htons(ETH_P_8021Q)) {
 935                        hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
 936                        *out_vtag = vlan_tag;
 937                } else {
 938                        hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
 939                        *inner_vtag = vlan_tag;
 940                }
 941        } else if (skb->protocol == htons(ETH_P_8021Q)) {
 942                struct vlan_ethhdr *vhdr;
 943                int rc;
 944
 945                rc = skb_cow_head(skb, 0);
 946                if (rc < 0)
 947                        return rc;
 948                vhdr = (struct vlan_ethhdr *)skb->data;
 949                vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
 950                                        << HNS3_TX_VLAN_PRIO_SHIFT);
 951        }
 952
 953        skb->protocol = vlan_get_protocol(skb);
 954        return 0;
 955}
 956
 957static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 958                          int size, int frag_end, enum hns_desc_type type)
 959{
 960        struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
 961        struct hns3_desc *desc = &ring->desc[ring->next_to_use];
 962        struct device *dev = ring_to_dev(ring);
 963        u32 ol_type_vlan_len_msec = 0;
 964        u16 bdtp_fe_sc_vld_ra_ri = 0;
 965        struct skb_frag_struct *frag;
 966        unsigned int frag_buf_num;
 967        u32 type_cs_vlan_tso = 0;
 968        struct sk_buff *skb;
 969        u16 inner_vtag = 0;
 970        u16 out_vtag = 0;
 971        unsigned int k;
 972        int sizeoflast;
 973        u32 paylen = 0;
 974        dma_addr_t dma;
 975        u16 mss = 0;
 976        __be16 protocol;
 977        u8 ol4_proto;
 978        u8 il4_proto;
 979        int ret;
 980
 981        if (type == DESC_TYPE_SKB) {
 982                skb = (struct sk_buff *)priv;
 983                paylen = skb->len;
 984
 985                ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
 986                                           &ol_type_vlan_len_msec,
 987                                           &inner_vtag, &out_vtag);
 988                if (unlikely(ret))
 989                        return ret;
 990
 991                if (skb->ip_summed == CHECKSUM_PARTIAL) {
 992                        skb_reset_mac_len(skb);
 993                        protocol = skb->protocol;
 994
 995                        ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
 996                        if (ret)
 997                                return ret;
 998                        hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
 999                                            &type_cs_vlan_tso,
1000                                            &ol_type_vlan_len_msec);
1001                        ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
1002                                                      &type_cs_vlan_tso,
1003                                                      &ol_type_vlan_len_msec);
1004                        if (ret)
1005                                return ret;
1006
1007                        ret = hns3_set_tso(skb, &paylen, &mss,
1008                                           &type_cs_vlan_tso);
1009                        if (ret)
1010                                return ret;
1011                }
1012
1013                /* Set txbd */
1014                desc->tx.ol_type_vlan_len_msec =
1015                        cpu_to_le32(ol_type_vlan_len_msec);
1016                desc->tx.type_cs_vlan_tso_len =
1017                        cpu_to_le32(type_cs_vlan_tso);
1018                desc->tx.paylen = cpu_to_le32(paylen);
1019                desc->tx.mss = cpu_to_le16(mss);
1020                desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1021                desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1022
1023                dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1024        } else {
1025                frag = (struct skb_frag_struct *)priv;
1026                dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1027        }
1028
1029        if (dma_mapping_error(ring->dev, dma)) {
1030                ring->stats.sw_err_cnt++;
1031                return -ENOMEM;
1032        }
1033
1034        desc_cb->length = size;
1035
1036        frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1037        sizeoflast = size % HNS3_MAX_BD_SIZE;
1038        sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1039
1040        /* When frag size is bigger than hardware limit, split this frag */
1041        for (k = 0; k < frag_buf_num; k++) {
1042                /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1043                desc_cb->priv = priv;
1044                desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1045                desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1046                                        DESC_TYPE_SKB : DESC_TYPE_PAGE;
1047
1048                /* now, fill the descriptor */
1049                desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1050                desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1051                                (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1052                hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1053                                       frag_end && (k == frag_buf_num - 1) ?
1054                                                1 : 0);
1055                desc->tx.bdtp_fe_sc_vld_ra_ri =
1056                                cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1057
1058                /* move ring pointer to next.*/
1059                ring_ptr_move_fw(ring, next_to_use);
1060
1061                desc_cb = &ring->desc_cb[ring->next_to_use];
1062                desc = &ring->desc[ring->next_to_use];
1063        }
1064
1065        return 0;
1066}
1067
1068static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1069                                   struct hns3_enet_ring *ring)
1070{
1071        struct sk_buff *skb = *out_skb;
1072        struct sk_buff *new_skb = NULL;
1073        struct skb_frag_struct *frag;
1074        int bdnum_for_frag;
1075        int frag_num;
1076        int buf_num;
1077        int size;
1078        int i;
1079
1080        size = skb_headlen(skb);
1081        buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1082
1083        frag_num = skb_shinfo(skb)->nr_frags;
1084        for (i = 0; i < frag_num; i++) {
1085                frag = &skb_shinfo(skb)->frags[i];
1086                size = skb_frag_size(frag);
1087                bdnum_for_frag =
1088                        (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1089                if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
1090                        return -ENOMEM;
1091
1092                buf_num += bdnum_for_frag;
1093        }
1094
1095        if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1096                buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1097                if (ring_space(ring) < buf_num)
1098                        return -EBUSY;
1099                /* manual split the send packet */
1100                new_skb = skb_copy(skb, GFP_ATOMIC);
1101                if (!new_skb)
1102                        return -ENOMEM;
1103                dev_kfree_skb_any(skb);
1104                *out_skb = new_skb;
1105        }
1106
1107        if (unlikely(ring_space(ring) < buf_num))
1108                return -EBUSY;
1109
1110        *bnum = buf_num;
1111        return 0;
1112}
1113
1114static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1115                                  struct hns3_enet_ring *ring)
1116{
1117        struct sk_buff *skb = *out_skb;
1118        struct sk_buff *new_skb = NULL;
1119        int buf_num;
1120
1121        /* No. of segments (plus a header) */
1122        buf_num = skb_shinfo(skb)->nr_frags + 1;
1123
1124        if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1125                buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1126                if (ring_space(ring) < buf_num)
1127                        return -EBUSY;
1128                /* manual split the send packet */
1129                new_skb = skb_copy(skb, GFP_ATOMIC);
1130                if (!new_skb)
1131                        return -ENOMEM;
1132                dev_kfree_skb_any(skb);
1133                *out_skb = new_skb;
1134        }
1135
1136        if (buf_num > ring_space(ring))
1137                return -EBUSY;
1138
1139        *bnum = buf_num;
1140
1141        return 0;
1142}
1143
1144static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1145{
1146        struct device *dev = ring_to_dev(ring);
1147        unsigned int i;
1148
1149        for (i = 0; i < ring->desc_num; i++) {
1150                /* check if this is where we started */
1151                if (ring->next_to_use == next_to_use_orig)
1152                        break;
1153
1154                /* unmap the descriptor dma address */
1155                if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1156                        dma_unmap_single(dev,
1157                                         ring->desc_cb[ring->next_to_use].dma,
1158                                        ring->desc_cb[ring->next_to_use].length,
1159                                        DMA_TO_DEVICE);
1160                else if (ring->desc_cb[ring->next_to_use].length)
1161                        dma_unmap_page(dev,
1162                                       ring->desc_cb[ring->next_to_use].dma,
1163                                       ring->desc_cb[ring->next_to_use].length,
1164                                       DMA_TO_DEVICE);
1165
1166                ring->desc_cb[ring->next_to_use].length = 0;
1167
1168                /* rollback one */
1169                ring_ptr_move_bw(ring, next_to_use);
1170        }
1171}
1172
1173netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1174{
1175        struct hns3_nic_priv *priv = netdev_priv(netdev);
1176        struct hns3_nic_ring_data *ring_data =
1177                &tx_ring_data(priv, skb->queue_mapping);
1178        struct hns3_enet_ring *ring = ring_data->ring;
1179        struct netdev_queue *dev_queue;
1180        struct skb_frag_struct *frag;
1181        int next_to_use_head;
1182        int next_to_use_frag;
1183        int buf_num;
1184        int seg_num;
1185        int size;
1186        int ret;
1187        int i;
1188
1189        /* Prefetch the data used later */
1190        prefetch(skb->data);
1191
1192        switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1193        case -EBUSY:
1194                u64_stats_update_begin(&ring->syncp);
1195                ring->stats.tx_busy++;
1196                u64_stats_update_end(&ring->syncp);
1197
1198                goto out_net_tx_busy;
1199        case -ENOMEM:
1200                u64_stats_update_begin(&ring->syncp);
1201                ring->stats.sw_err_cnt++;
1202                u64_stats_update_end(&ring->syncp);
1203                netdev_err(netdev, "no memory to xmit!\n");
1204
1205                goto out_err_tx_ok;
1206        default:
1207                break;
1208        }
1209
1210        /* No. of segments (plus a header) */
1211        seg_num = skb_shinfo(skb)->nr_frags + 1;
1212        /* Fill the first part */
1213        size = skb_headlen(skb);
1214
1215        next_to_use_head = ring->next_to_use;
1216
1217        ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1218                                  DESC_TYPE_SKB);
1219        if (ret)
1220                goto head_fill_err;
1221
1222        next_to_use_frag = ring->next_to_use;
1223        /* Fill the fragments */
1224        for (i = 1; i < seg_num; i++) {
1225                frag = &skb_shinfo(skb)->frags[i - 1];
1226                size = skb_frag_size(frag);
1227
1228                ret = priv->ops.fill_desc(ring, frag, size,
1229                                          seg_num - 1 == i ? 1 : 0,
1230                                          DESC_TYPE_PAGE);
1231
1232                if (ret)
1233                        goto frag_fill_err;
1234        }
1235
1236        /* Complete translate all packets */
1237        dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1238        netdev_tx_sent_queue(dev_queue, skb->len);
1239
1240        wmb(); /* Commit all data before submit */
1241
1242        hnae3_queue_xmit(ring->tqp, buf_num);
1243
1244        return NETDEV_TX_OK;
1245
1246frag_fill_err:
1247        hns3_clear_desc(ring, next_to_use_frag);
1248
1249head_fill_err:
1250        hns3_clear_desc(ring, next_to_use_head);
1251
1252out_err_tx_ok:
1253        dev_kfree_skb_any(skb);
1254        return NETDEV_TX_OK;
1255
1256out_net_tx_busy:
1257        netif_stop_subqueue(netdev, ring_data->queue_index);
1258        smp_mb(); /* Commit all data before submit */
1259
1260        return NETDEV_TX_BUSY;
1261}
1262
1263static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1264{
1265        struct hnae3_handle *h = hns3_get_handle(netdev);
1266        struct sockaddr *mac_addr = p;
1267        int ret;
1268
1269        if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1270                return -EADDRNOTAVAIL;
1271
1272        if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1273                netdev_info(netdev, "already using mac address %pM\n",
1274                            mac_addr->sa_data);
1275                return 0;
1276        }
1277
1278        ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1279        if (ret) {
1280                netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1281                return ret;
1282        }
1283
1284        ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1285
1286        return 0;
1287}
1288
1289static int hns3_nic_set_features(struct net_device *netdev,
1290                                 netdev_features_t features)
1291{
1292        netdev_features_t changed = netdev->features ^ features;
1293        struct hns3_nic_priv *priv = netdev_priv(netdev);
1294        struct hnae3_handle *h = priv->ae_handle;
1295        int ret;
1296
1297        if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1298                if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1299                        priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1300                else
1301                        priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1302        }
1303
1304        if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1305            h->ae_algo->ops->enable_vlan_filter) {
1306                if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1307                        h->ae_algo->ops->enable_vlan_filter(h, true);
1308                else
1309                        h->ae_algo->ops->enable_vlan_filter(h, false);
1310        }
1311
1312        if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1313            h->ae_algo->ops->enable_hw_strip_rxvtag) {
1314                if (features & NETIF_F_HW_VLAN_CTAG_RX)
1315                        ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1316                else
1317                        ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1318
1319                if (ret)
1320                        return ret;
1321        }
1322
1323        netdev->features = features;
1324        return 0;
1325}
1326
1327static void hns3_nic_get_stats64(struct net_device *netdev,
1328                                 struct rtnl_link_stats64 *stats)
1329{
1330        struct hns3_nic_priv *priv = netdev_priv(netdev);
1331        int queue_num = priv->ae_handle->kinfo.num_tqps;
1332        struct hnae3_handle *handle = priv->ae_handle;
1333        struct hns3_enet_ring *ring;
1334        unsigned int start;
1335        unsigned int idx;
1336        u64 tx_bytes = 0;
1337        u64 rx_bytes = 0;
1338        u64 tx_pkts = 0;
1339        u64 rx_pkts = 0;
1340        u64 tx_drop = 0;
1341        u64 rx_drop = 0;
1342
1343        if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1344                return;
1345
1346        handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1347
1348        for (idx = 0; idx < queue_num; idx++) {
1349                /* fetch the tx stats */
1350                ring = priv->ring_data[idx].ring;
1351                do {
1352                        start = u64_stats_fetch_begin_irq(&ring->syncp);
1353                        tx_bytes += ring->stats.tx_bytes;
1354                        tx_pkts += ring->stats.tx_pkts;
1355                        tx_drop += ring->stats.tx_busy;
1356                        tx_drop += ring->stats.sw_err_cnt;
1357                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1358
1359                /* fetch the rx stats */
1360                ring = priv->ring_data[idx + queue_num].ring;
1361                do {
1362                        start = u64_stats_fetch_begin_irq(&ring->syncp);
1363                        rx_bytes += ring->stats.rx_bytes;
1364                        rx_pkts += ring->stats.rx_pkts;
1365                        rx_drop += ring->stats.non_vld_descs;
1366                        rx_drop += ring->stats.err_pkt_len;
1367                        rx_drop += ring->stats.l2_err;
1368                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1369        }
1370
1371        stats->tx_bytes = tx_bytes;
1372        stats->tx_packets = tx_pkts;
1373        stats->rx_bytes = rx_bytes;
1374        stats->rx_packets = rx_pkts;
1375
1376        stats->rx_errors = netdev->stats.rx_errors;
1377        stats->multicast = netdev->stats.multicast;
1378        stats->rx_length_errors = netdev->stats.rx_length_errors;
1379        stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1380        stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1381
1382        stats->tx_errors = netdev->stats.tx_errors;
1383        stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1384        stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1385        stats->collisions = netdev->stats.collisions;
1386        stats->rx_over_errors = netdev->stats.rx_over_errors;
1387        stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1388        stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1389        stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1390        stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1391        stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1392        stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1393        stats->tx_window_errors = netdev->stats.tx_window_errors;
1394        stats->rx_compressed = netdev->stats.rx_compressed;
1395        stats->tx_compressed = netdev->stats.tx_compressed;
1396}
1397
1398static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1399{
1400        struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1401        struct hnae3_handle *h = hns3_get_handle(netdev);
1402        struct hnae3_knic_private_info *kinfo = &h->kinfo;
1403        u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1404        u8 tc = mqprio_qopt->qopt.num_tc;
1405        u16 mode = mqprio_qopt->mode;
1406        u8 hw = mqprio_qopt->qopt.hw;
1407        bool if_running;
1408        int ret;
1409
1410        if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1411               mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1412                return -EOPNOTSUPP;
1413
1414        if (tc > HNAE3_MAX_TC)
1415                return -EINVAL;
1416
1417        if (!netdev)
1418                return -EINVAL;
1419
1420        if_running = netif_running(netdev);
1421        if (if_running) {
1422                hns3_nic_net_stop(netdev);
1423                msleep(100);
1424        }
1425
1426        ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1427                kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1428        if (ret)
1429                goto out;
1430
1431        ret = hns3_nic_set_real_num_queue(netdev);
1432
1433out:
1434        if (if_running)
1435                hns3_nic_net_open(netdev);
1436
1437        return ret;
1438}
1439
1440static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1441                             void *type_data)
1442{
1443        if (type != TC_SETUP_QDISC_MQPRIO)
1444                return -EOPNOTSUPP;
1445
1446        return hns3_setup_tc(dev, type_data);
1447}
1448
1449static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1450                                __be16 proto, u16 vid)
1451{
1452        struct hnae3_handle *h = hns3_get_handle(netdev);
1453        struct hns3_nic_priv *priv = netdev_priv(netdev);
1454        int ret = -EIO;
1455
1456        if (h->ae_algo->ops->set_vlan_filter)
1457                ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1458
1459        if (!ret)
1460                set_bit(vid, priv->active_vlans);
1461
1462        return ret;
1463}
1464
1465static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1466                                 __be16 proto, u16 vid)
1467{
1468        struct hnae3_handle *h = hns3_get_handle(netdev);
1469        struct hns3_nic_priv *priv = netdev_priv(netdev);
1470        int ret = -EIO;
1471
1472        if (h->ae_algo->ops->set_vlan_filter)
1473                ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1474
1475        if (!ret)
1476                clear_bit(vid, priv->active_vlans);
1477
1478        return ret;
1479}
1480
1481static void hns3_restore_vlan(struct net_device *netdev)
1482{
1483        struct hns3_nic_priv *priv = netdev_priv(netdev);
1484        u16 vid;
1485        int ret;
1486
1487        for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1488                ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1489                if (ret)
1490                        netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
1491                                    vid, ret);
1492        }
1493}
1494
1495static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1496                                u8 qos, __be16 vlan_proto)
1497{
1498        struct hnae3_handle *h = hns3_get_handle(netdev);
1499        int ret = -EIO;
1500
1501        if (h->ae_algo->ops->set_vf_vlan_filter)
1502                ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1503                                                   qos, vlan_proto);
1504
1505        return ret;
1506}
1507
1508static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1509{
1510        struct hnae3_handle *h = hns3_get_handle(netdev);
1511        bool if_running = netif_running(netdev);
1512        int ret;
1513
1514        if (!h->ae_algo->ops->set_mtu)
1515                return -EOPNOTSUPP;
1516
1517        /* if this was called with netdev up then bring netdevice down */
1518        if (if_running) {
1519                (void)hns3_nic_net_stop(netdev);
1520                msleep(100);
1521        }
1522
1523        ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1524        if (ret)
1525                netdev_err(netdev, "failed to change MTU in hardware %d\n",
1526                           ret);
1527        else
1528                netdev->mtu = new_mtu;
1529
1530        /* if the netdev was running earlier, bring it up again */
1531        if (if_running && hns3_nic_net_open(netdev))
1532                ret = -EINVAL;
1533
1534        return ret;
1535}
1536
1537static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1538{
1539        struct hns3_nic_priv *priv = netdev_priv(ndev);
1540        struct hns3_enet_ring *tx_ring = NULL;
1541        int timeout_queue = 0;
1542        int hw_head, hw_tail;
1543        int i;
1544
1545        /* Find the stopped queue the same way the stack does */
1546        for (i = 0; i < ndev->real_num_tx_queues; i++) {
1547                struct netdev_queue *q;
1548                unsigned long trans_start;
1549
1550                q = netdev_get_tx_queue(ndev, i);
1551                trans_start = q->trans_start;
1552                if (netif_xmit_stopped(q) &&
1553                    time_after(jiffies,
1554                               (trans_start + ndev->watchdog_timeo))) {
1555                        timeout_queue = i;
1556                        break;
1557                }
1558        }
1559
1560        if (i == ndev->num_tx_queues) {
1561                netdev_info(ndev,
1562                            "no netdev TX timeout queue found, timeout count: %llu\n",
1563                            priv->tx_timeout_count);
1564                return false;
1565        }
1566
1567        tx_ring = priv->ring_data[timeout_queue].ring;
1568
1569        hw_head = readl_relaxed(tx_ring->tqp->io_base +
1570                                HNS3_RING_TX_RING_HEAD_REG);
1571        hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1572                                HNS3_RING_TX_RING_TAIL_REG);
1573        netdev_info(ndev,
1574                    "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1575                    priv->tx_timeout_count,
1576                    timeout_queue,
1577                    tx_ring->next_to_use,
1578                    tx_ring->next_to_clean,
1579                    hw_head,
1580                    hw_tail,
1581                    readl(tx_ring->tqp_vector->mask_addr));
1582
1583        return true;
1584}
1585
1586static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
1587{
1588        struct hns3_nic_priv *priv = netdev_priv(ndev);
1589        struct hnae3_handle *h = priv->ae_handle;
1590
1591        if (!hns3_get_tx_timeo_queue_info(ndev))
1592                return;
1593
1594        priv->tx_timeout_count++;
1595
1596        if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
1597                return;
1598
1599        /* request the reset */
1600        if (h->ae_algo->ops->reset_event)
1601                h->ae_algo->ops->reset_event(h);
1602}
1603
1604static const struct net_device_ops hns3_nic_netdev_ops = {
1605        .ndo_open               = hns3_nic_net_open,
1606        .ndo_stop               = hns3_nic_net_stop,
1607        .ndo_start_xmit         = hns3_nic_net_xmit,
1608        .ndo_tx_timeout         = hns3_nic_net_timeout,
1609        .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
1610        .ndo_change_mtu         = hns3_nic_change_mtu,
1611        .ndo_set_features       = hns3_nic_set_features,
1612        .ndo_get_stats64        = hns3_nic_get_stats64,
1613        .ndo_setup_tc           = hns3_nic_setup_tc,
1614        .ndo_set_rx_mode        = hns3_nic_set_rx_mode,
1615        .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
1616        .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
1617        .ndo_set_vf_vlan        = hns3_ndo_set_vf_vlan,
1618};
1619
1620static bool hns3_is_phys_func(struct pci_dev *pdev)
1621{
1622        u32 dev_id = pdev->device;
1623
1624        switch (dev_id) {
1625        case HNAE3_DEV_ID_GE:
1626        case HNAE3_DEV_ID_25GE:
1627        case HNAE3_DEV_ID_25GE_RDMA:
1628        case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1629        case HNAE3_DEV_ID_50GE_RDMA:
1630        case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1631        case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1632                return true;
1633        case HNAE3_DEV_ID_100G_VF:
1634        case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1635                return false;
1636        default:
1637                dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1638                         dev_id);
1639        }
1640
1641        return false;
1642}
1643
1644static void hns3_disable_sriov(struct pci_dev *pdev)
1645{
1646        /* If our VFs are assigned we cannot shut down SR-IOV
1647         * without causing issues, so just leave the hardware
1648         * available but disabled
1649         */
1650        if (pci_vfs_assigned(pdev)) {
1651                dev_warn(&pdev->dev,
1652                         "disabling driver while VFs are assigned\n");
1653                return;
1654        }
1655
1656        pci_disable_sriov(pdev);
1657}
1658
1659/* hns3_probe - Device initialization routine
1660 * @pdev: PCI device information struct
1661 * @ent: entry in hns3_pci_tbl
1662 *
1663 * hns3_probe initializes a PF identified by a pci_dev structure.
1664 * The OS initialization, configuring of the PF private structure,
1665 * and a hardware reset occur.
1666 *
1667 * Returns 0 on success, negative on failure
1668 */
1669static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1670{
1671        struct hnae3_ae_dev *ae_dev;
1672        int ret;
1673
1674        ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1675                              GFP_KERNEL);
1676        if (!ae_dev) {
1677                ret = -ENOMEM;
1678                return ret;
1679        }
1680
1681        ae_dev->pdev = pdev;
1682        ae_dev->flag = ent->driver_data;
1683        ae_dev->dev_type = HNAE3_DEV_KNIC;
1684        pci_set_drvdata(pdev, ae_dev);
1685
1686        hnae3_register_ae_dev(ae_dev);
1687
1688        return 0;
1689}
1690
1691/* hns3_remove - Device removal routine
1692 * @pdev: PCI device information struct
1693 */
1694static void hns3_remove(struct pci_dev *pdev)
1695{
1696        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1697
1698        if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1699                hns3_disable_sriov(pdev);
1700
1701        hnae3_unregister_ae_dev(ae_dev);
1702}
1703
1704/**
1705 * hns3_pci_sriov_configure
1706 * @pdev: pointer to a pci_dev structure
1707 * @num_vfs: number of VFs to allocate
1708 *
1709 * Enable or change the number of VFs. Called when the user updates the number
1710 * of VFs in sysfs.
1711 **/
1712static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1713{
1714        int ret;
1715
1716        if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1717                dev_warn(&pdev->dev, "Can not config SRIOV\n");
1718                return -EINVAL;
1719        }
1720
1721        if (num_vfs) {
1722                ret = pci_enable_sriov(pdev, num_vfs);
1723                if (ret)
1724                        dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1725                else
1726                        return num_vfs;
1727        } else if (!pci_vfs_assigned(pdev)) {
1728                pci_disable_sriov(pdev);
1729        } else {
1730                dev_warn(&pdev->dev,
1731                         "Unable to free VFs because some are assigned to VMs.\n");
1732        }
1733
1734        return 0;
1735}
1736
1737static struct pci_driver hns3_driver = {
1738        .name     = hns3_driver_name,
1739        .id_table = hns3_pci_tbl,
1740        .probe    = hns3_probe,
1741        .remove   = hns3_remove,
1742        .sriov_configure = hns3_pci_sriov_configure,
1743};
1744
1745/* set default feature to hns3 */
1746static void hns3_set_default_feature(struct net_device *netdev)
1747{
1748        struct hnae3_handle *h = hns3_get_handle(netdev);
1749        struct pci_dev *pdev = h->pdev;
1750
1751        netdev->priv_flags |= IFF_UNICAST_FLT;
1752
1753        netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1754                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1755                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1756                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1757                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1758
1759        netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1760
1761        netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1762
1763        netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1764                NETIF_F_HW_VLAN_CTAG_FILTER |
1765                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1766                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1767                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1768                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1769                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1770
1771        netdev->vlan_features |=
1772                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1773                NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1774                NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1775                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1776                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1777
1778        netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1779                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1780                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1781                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1782                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1783                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1784
1785        if (pdev->revision != 0x20)
1786                netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1787}
1788
1789static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1790                             struct hns3_desc_cb *cb)
1791{
1792        unsigned int order = hnae3_page_order(ring);
1793        struct page *p;
1794
1795        p = dev_alloc_pages(order);
1796        if (!p)
1797                return -ENOMEM;
1798
1799        cb->priv = p;
1800        cb->page_offset = 0;
1801        cb->reuse_flag = 0;
1802        cb->buf  = page_address(p);
1803        cb->length = hnae3_page_size(ring);
1804        cb->type = DESC_TYPE_PAGE;
1805
1806        return 0;
1807}
1808
1809static void hns3_free_buffer(struct hns3_enet_ring *ring,
1810                             struct hns3_desc_cb *cb)
1811{
1812        if (cb->type == DESC_TYPE_SKB)
1813                dev_kfree_skb_any((struct sk_buff *)cb->priv);
1814        else if (!HNAE3_IS_TX_RING(ring))
1815                put_page((struct page *)cb->priv);
1816        memset(cb, 0, sizeof(*cb));
1817}
1818
1819static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1820{
1821        cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1822                               cb->length, ring_to_dma_dir(ring));
1823
1824        if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1825                return -EIO;
1826
1827        return 0;
1828}
1829
1830static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1831                              struct hns3_desc_cb *cb)
1832{
1833        if (cb->type == DESC_TYPE_SKB)
1834                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1835                                 ring_to_dma_dir(ring));
1836        else if (cb->length)
1837                dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1838                               ring_to_dma_dir(ring));
1839}
1840
1841static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1842{
1843        hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1844        ring->desc[i].addr = 0;
1845}
1846
1847static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1848{
1849        struct hns3_desc_cb *cb = &ring->desc_cb[i];
1850
1851        if (!ring->desc_cb[i].dma)
1852                return;
1853
1854        hns3_buffer_detach(ring, i);
1855        hns3_free_buffer(ring, cb);
1856}
1857
1858static void hns3_free_buffers(struct hns3_enet_ring *ring)
1859{
1860        int i;
1861
1862        for (i = 0; i < ring->desc_num; i++)
1863                hns3_free_buffer_detach(ring, i);
1864}
1865
1866/* free desc along with its attached buffer */
1867static void hns3_free_desc(struct hns3_enet_ring *ring)
1868{
1869        hns3_free_buffers(ring);
1870
1871        dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1872                         ring->desc_num * sizeof(ring->desc[0]),
1873                         DMA_BIDIRECTIONAL);
1874        ring->desc_dma_addr = 0;
1875        kfree(ring->desc);
1876        ring->desc = NULL;
1877}
1878
1879static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1880{
1881        int size = ring->desc_num * sizeof(ring->desc[0]);
1882
1883        ring->desc = kzalloc(size, GFP_KERNEL);
1884        if (!ring->desc)
1885                return -ENOMEM;
1886
1887        ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1888                                             size, DMA_BIDIRECTIONAL);
1889        if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1890                ring->desc_dma_addr = 0;
1891                kfree(ring->desc);
1892                ring->desc = NULL;
1893                return -ENOMEM;
1894        }
1895
1896        return 0;
1897}
1898
1899static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1900                                   struct hns3_desc_cb *cb)
1901{
1902        int ret;
1903
1904        ret = hns3_alloc_buffer(ring, cb);
1905        if (ret)
1906                goto out;
1907
1908        ret = hns3_map_buffer(ring, cb);
1909        if (ret)
1910                goto out_with_buf;
1911
1912        return 0;
1913
1914out_with_buf:
1915        hns3_free_buffer(ring, cb);
1916out:
1917        return ret;
1918}
1919
1920static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1921{
1922        int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1923
1924        if (ret)
1925                return ret;
1926
1927        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1928
1929        return 0;
1930}
1931
1932/* Allocate memory for raw pkg, and map with dma */
1933static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1934{
1935        int i, j, ret;
1936
1937        for (i = 0; i < ring->desc_num; i++) {
1938                ret = hns3_alloc_buffer_attach(ring, i);
1939                if (ret)
1940                        goto out_buffer_fail;
1941        }
1942
1943        return 0;
1944
1945out_buffer_fail:
1946        for (j = i - 1; j >= 0; j--)
1947                hns3_free_buffer_detach(ring, j);
1948        return ret;
1949}
1950
1951/* detach a in-used buffer and replace with a reserved one  */
1952static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1953                                struct hns3_desc_cb *res_cb)
1954{
1955        hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1956        ring->desc_cb[i] = *res_cb;
1957        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1958        ring->desc[i].rx.bd_base_info = 0;
1959}
1960
1961static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1962{
1963        ring->desc_cb[i].reuse_flag = 0;
1964        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1965                + ring->desc_cb[i].page_offset);
1966        ring->desc[i].rx.bd_base_info = 0;
1967}
1968
1969static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1970                                      int *pkts)
1971{
1972        struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1973
1974        (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1975        (*bytes) += desc_cb->length;
1976        /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
1977        hns3_free_buffer_detach(ring, ring->next_to_clean);
1978
1979        ring_ptr_move_fw(ring, next_to_clean);
1980}
1981
1982static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1983{
1984        int u = ring->next_to_use;
1985        int c = ring->next_to_clean;
1986
1987        if (unlikely(h > ring->desc_num))
1988                return 0;
1989
1990        return u > c ? (h > c && h <= u) : (h > c || h <= u);
1991}
1992
1993void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
1994{
1995        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1996        struct hns3_nic_priv *priv = netdev_priv(netdev);
1997        struct netdev_queue *dev_queue;
1998        int bytes, pkts;
1999        int head;
2000
2001        head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2002        rmb(); /* Make sure head is ready before touch any data */
2003
2004        if (is_ring_empty(ring) || head == ring->next_to_clean)
2005                return; /* no data to poll */
2006
2007        if (unlikely(!is_valid_clean_head(ring, head))) {
2008                netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2009                           ring->next_to_use, ring->next_to_clean);
2010
2011                u64_stats_update_begin(&ring->syncp);
2012                ring->stats.io_err_cnt++;
2013                u64_stats_update_end(&ring->syncp);
2014                return;
2015        }
2016
2017        bytes = 0;
2018        pkts = 0;
2019        while (head != ring->next_to_clean) {
2020                hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
2021                /* Issue prefetch for next Tx descriptor */
2022                prefetch(&ring->desc_cb[ring->next_to_clean]);
2023        }
2024
2025        ring->tqp_vector->tx_group.total_bytes += bytes;
2026        ring->tqp_vector->tx_group.total_packets += pkts;
2027
2028        u64_stats_update_begin(&ring->syncp);
2029        ring->stats.tx_bytes += bytes;
2030        ring->stats.tx_pkts += pkts;
2031        u64_stats_update_end(&ring->syncp);
2032
2033        dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2034        netdev_tx_completed_queue(dev_queue, pkts, bytes);
2035
2036        if (unlikely(pkts && netif_carrier_ok(netdev) &&
2037                     (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2038                /* Make sure that anybody stopping the queue after this
2039                 * sees the new next_to_clean.
2040                 */
2041                smp_mb();
2042                if (netif_tx_queue_stopped(dev_queue) &&
2043                    !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2044                        netif_tx_wake_queue(dev_queue);
2045                        ring->stats.restart_queue++;
2046                }
2047        }
2048}
2049
2050static int hns3_desc_unused(struct hns3_enet_ring *ring)
2051{
2052        int ntc = ring->next_to_clean;
2053        int ntu = ring->next_to_use;
2054
2055        return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2056}
2057
2058static void
2059hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2060{
2061        struct hns3_desc_cb *desc_cb;
2062        struct hns3_desc_cb res_cbs;
2063        int i, ret;
2064
2065        for (i = 0; i < cleand_count; i++) {
2066                desc_cb = &ring->desc_cb[ring->next_to_use];
2067                if (desc_cb->reuse_flag) {
2068                        u64_stats_update_begin(&ring->syncp);
2069                        ring->stats.reuse_pg_cnt++;
2070                        u64_stats_update_end(&ring->syncp);
2071
2072                        hns3_reuse_buffer(ring, ring->next_to_use);
2073                } else {
2074                        ret = hns3_reserve_buffer_map(ring, &res_cbs);
2075                        if (ret) {
2076                                u64_stats_update_begin(&ring->syncp);
2077                                ring->stats.sw_err_cnt++;
2078                                u64_stats_update_end(&ring->syncp);
2079
2080                                netdev_err(ring->tqp->handle->kinfo.netdev,
2081                                           "hnae reserve buffer map failed.\n");
2082                                break;
2083                        }
2084                        hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2085                }
2086
2087                ring_ptr_move_fw(ring, next_to_use);
2088        }
2089
2090        wmb(); /* Make all data has been write before submit */
2091        writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2092}
2093
2094static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2095                                struct hns3_enet_ring *ring, int pull_len,
2096                                struct hns3_desc_cb *desc_cb)
2097{
2098        struct hns3_desc *desc;
2099        u32 truesize;
2100        int size;
2101        int last_offset;
2102        bool twobufs;
2103
2104        twobufs = ((PAGE_SIZE < 8192) &&
2105                hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2106
2107        desc = &ring->desc[ring->next_to_clean];
2108        size = le16_to_cpu(desc->rx.size);
2109
2110        truesize = hnae3_buf_size(ring);
2111
2112        if (!twobufs)
2113                last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
2114
2115        skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2116                        size - pull_len, truesize);
2117
2118         /* Avoid re-using remote pages,flag default unreuse */
2119        if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2120                return;
2121
2122        if (twobufs) {
2123                /* If we are only owner of page we can reuse it */
2124                if (likely(page_count(desc_cb->priv) == 1)) {
2125                        /* Flip page offset to other buffer */
2126                        desc_cb->page_offset ^= truesize;
2127
2128                        desc_cb->reuse_flag = 1;
2129                        /* bump ref count on page before it is given*/
2130                        get_page(desc_cb->priv);
2131                }
2132                return;
2133        }
2134
2135        /* Move offset up to the next cache line */
2136        desc_cb->page_offset += truesize;
2137
2138        if (desc_cb->page_offset <= last_offset) {
2139                desc_cb->reuse_flag = 1;
2140                /* Bump ref count on page before it is given*/
2141                get_page(desc_cb->priv);
2142        }
2143}
2144
2145static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2146                             struct hns3_desc *desc)
2147{
2148        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2149        int l3_type, l4_type;
2150        u32 bd_base_info;
2151        int ol4_type;
2152        u32 l234info;
2153
2154        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2155        l234info = le32_to_cpu(desc->rx.l234_info);
2156
2157        skb->ip_summed = CHECKSUM_NONE;
2158
2159        skb_checksum_none_assert(skb);
2160
2161        if (!(netdev->features & NETIF_F_RXCSUM))
2162                return;
2163
2164        /* check if hardware has done checksum */
2165        if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2166                return;
2167
2168        if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
2169                     hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
2170                     hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2171                     hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2172                u64_stats_update_begin(&ring->syncp);
2173                ring->stats.l3l4_csum_err++;
2174                u64_stats_update_end(&ring->syncp);
2175
2176                return;
2177        }
2178
2179        l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2180                                  HNS3_RXD_L3ID_S);
2181        l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2182                                  HNS3_RXD_L4ID_S);
2183
2184        ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2185                                   HNS3_RXD_OL4ID_S);
2186        switch (ol4_type) {
2187        case HNS3_OL4_TYPE_MAC_IN_UDP:
2188        case HNS3_OL4_TYPE_NVGRE:
2189                skb->csum_level = 1;
2190        case HNS3_OL4_TYPE_NO_TUN:
2191                /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2192                if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2193                     l3_type == HNS3_L3_TYPE_IPV6) &&
2194                    (l4_type == HNS3_L4_TYPE_UDP ||
2195                     l4_type == HNS3_L4_TYPE_TCP ||
2196                     l4_type == HNS3_L4_TYPE_SCTP))
2197                        skb->ip_summed = CHECKSUM_UNNECESSARY;
2198                break;
2199        default:
2200                break;
2201        }
2202}
2203
2204static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2205{
2206        napi_gro_receive(&ring->tqp_vector->napi, skb);
2207}
2208
2209static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2210                                struct hns3_desc *desc, u32 l234info,
2211                                u16 *vlan_tag)
2212{
2213        struct pci_dev *pdev = ring->tqp->handle->pdev;
2214
2215        if (pdev->revision == 0x20) {
2216                *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2217                if (!(*vlan_tag & VLAN_VID_MASK))
2218                        *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2219
2220                return (*vlan_tag != 0);
2221        }
2222
2223#define HNS3_STRP_OUTER_VLAN    0x1
2224#define HNS3_STRP_INNER_VLAN    0x2
2225
2226        switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2227                                HNS3_RXD_STRP_TAGP_S)) {
2228        case HNS3_STRP_OUTER_VLAN:
2229                *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2230                return true;
2231        case HNS3_STRP_INNER_VLAN:
2232                *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2233                return true;
2234        default:
2235                return false;
2236        }
2237}
2238
2239static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2240                             struct sk_buff **out_skb, int *out_bnum)
2241{
2242        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2243        struct hns3_desc_cb *desc_cb;
2244        struct hns3_desc *desc;
2245        struct sk_buff *skb;
2246        unsigned char *va;
2247        u32 bd_base_info;
2248        int pull_len;
2249        u32 l234info;
2250        int length;
2251        int bnum;
2252
2253        desc = &ring->desc[ring->next_to_clean];
2254        desc_cb = &ring->desc_cb[ring->next_to_clean];
2255
2256        prefetch(desc);
2257
2258        length = le16_to_cpu(desc->rx.size);
2259        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2260
2261        /* Check valid BD */
2262        if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
2263                return -EFAULT;
2264
2265        va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2266
2267        /* Prefetch first cache line of first page
2268         * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2269         * line size is 64B so need to prefetch twice to make it 128B. But in
2270         * actual we can have greater size of caches with 128B Level 1 cache
2271         * lines. In such a case, single fetch would suffice to cache in the
2272         * relevant part of the header.
2273         */
2274        net_prefetch(va);
2275
2276        skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2277                                        HNS3_RX_HEAD_SIZE);
2278        if (unlikely(!skb)) {
2279                netdev_err(netdev, "alloc rx skb fail\n");
2280
2281                u64_stats_update_begin(&ring->syncp);
2282                ring->stats.sw_err_cnt++;
2283                u64_stats_update_end(&ring->syncp);
2284
2285                return -ENOMEM;
2286        }
2287
2288        prefetchw(skb->data);
2289
2290        bnum = 1;
2291        if (length <= HNS3_RX_HEAD_SIZE) {
2292                memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2293
2294                /* We can reuse buffer as-is, just make sure it is local */
2295                if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2296                        desc_cb->reuse_flag = 1;
2297                else /* This page cannot be reused so discard it */
2298                        put_page(desc_cb->priv);
2299
2300                ring_ptr_move_fw(ring, next_to_clean);
2301        } else {
2302                u64_stats_update_begin(&ring->syncp);
2303                ring->stats.seg_pkt_cnt++;
2304                u64_stats_update_end(&ring->syncp);
2305
2306                pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
2307
2308                memcpy(__skb_put(skb, pull_len), va,
2309                       ALIGN(pull_len, sizeof(long)));
2310
2311                hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2312                ring_ptr_move_fw(ring, next_to_clean);
2313
2314                while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2315                        desc = &ring->desc[ring->next_to_clean];
2316                        desc_cb = &ring->desc_cb[ring->next_to_clean];
2317                        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2318                        hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2319                        ring_ptr_move_fw(ring, next_to_clean);
2320                        bnum++;
2321                }
2322        }
2323
2324        *out_bnum = bnum;
2325
2326        l234info = le32_to_cpu(desc->rx.l234_info);
2327
2328        /* Based on hw strategy, the tag offloaded will be stored at
2329         * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2330         * in one layer tag case.
2331         */
2332        if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2333                u16 vlan_tag;
2334
2335                if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2336                        __vlan_hwaccel_put_tag(skb,
2337                                               htons(ETH_P_8021Q),
2338                                               vlan_tag);
2339        }
2340
2341        if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2342                u64_stats_update_begin(&ring->syncp);
2343                ring->stats.non_vld_descs++;
2344                u64_stats_update_end(&ring->syncp);
2345
2346                dev_kfree_skb_any(skb);
2347                return -EINVAL;
2348        }
2349
2350        if (unlikely((!desc->rx.pkt_len) ||
2351                     hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2352                u64_stats_update_begin(&ring->syncp);
2353                ring->stats.err_pkt_len++;
2354                u64_stats_update_end(&ring->syncp);
2355
2356                dev_kfree_skb_any(skb);
2357                return -EFAULT;
2358        }
2359
2360        if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
2361                u64_stats_update_begin(&ring->syncp);
2362                ring->stats.l2_err++;
2363                u64_stats_update_end(&ring->syncp);
2364
2365                dev_kfree_skb_any(skb);
2366                return -EFAULT;
2367        }
2368
2369        u64_stats_update_begin(&ring->syncp);
2370        ring->stats.rx_pkts++;
2371        ring->stats.rx_bytes += skb->len;
2372        u64_stats_update_end(&ring->syncp);
2373
2374        ring->tqp_vector->rx_group.total_bytes += skb->len;
2375
2376        hns3_rx_checksum(ring, skb, desc);
2377        return 0;
2378}
2379
2380int hns3_clean_rx_ring(
2381                struct hns3_enet_ring *ring, int budget,
2382                void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2383{
2384#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2385        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2386        int recv_pkts, recv_bds, clean_count, err;
2387        int unused_count = hns3_desc_unused(ring);
2388        struct sk_buff *skb = NULL;
2389        int num, bnum = 0;
2390
2391        num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2392        rmb(); /* Make sure num taken effect before the other data is touched */
2393
2394        recv_pkts = 0, recv_bds = 0, clean_count = 0;
2395        num -= unused_count;
2396
2397        while (recv_pkts < budget && recv_bds < num) {
2398                /* Reuse or realloc buffers */
2399                if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2400                        hns3_nic_alloc_rx_buffers(ring,
2401                                                  clean_count + unused_count);
2402                        clean_count = 0;
2403                        unused_count = hns3_desc_unused(ring);
2404                }
2405
2406                /* Poll one pkt */
2407                err = hns3_handle_rx_bd(ring, &skb, &bnum);
2408                if (unlikely(!skb)) /* This fault cannot be repaired */
2409                        goto out;
2410
2411                recv_bds += bnum;
2412                clean_count += bnum;
2413                if (unlikely(err)) {  /* Do jump the err */
2414                        recv_pkts++;
2415                        continue;
2416                }
2417
2418                /* Do update ip stack process */
2419                skb->protocol = eth_type_trans(skb, netdev);
2420                rx_fn(ring, skb);
2421
2422                recv_pkts++;
2423        }
2424
2425out:
2426        /* Make all data has been write before submit */
2427        if (clean_count + unused_count > 0)
2428                hns3_nic_alloc_rx_buffers(ring,
2429                                          clean_count + unused_count);
2430
2431        return recv_pkts;
2432}
2433
2434static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2435{
2436        struct hns3_enet_tqp_vector *tqp_vector =
2437                                        ring_group->ring->tqp_vector;
2438        enum hns3_flow_level_range new_flow_level;
2439        int packets_per_msecs;
2440        int bytes_per_msecs;
2441        u32 time_passed_ms;
2442        u16 new_int_gl;
2443
2444        if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
2445                return false;
2446
2447        if (ring_group->total_packets == 0) {
2448                ring_group->coal.int_gl = HNS3_INT_GL_50K;
2449                ring_group->coal.flow_level = HNS3_FLOW_LOW;
2450                return true;
2451        }
2452
2453        /* Simple throttlerate management
2454         * 0-10MB/s   lower     (50000 ints/s)
2455         * 10-20MB/s   middle    (20000 ints/s)
2456         * 20-1249MB/s high      (18000 ints/s)
2457         * > 40000pps  ultra     (8000 ints/s)
2458         */
2459        new_flow_level = ring_group->coal.flow_level;
2460        new_int_gl = ring_group->coal.int_gl;
2461        time_passed_ms =
2462                jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2463
2464        if (!time_passed_ms)
2465                return false;
2466
2467        do_div(ring_group->total_packets, time_passed_ms);
2468        packets_per_msecs = ring_group->total_packets;
2469
2470        do_div(ring_group->total_bytes, time_passed_ms);
2471        bytes_per_msecs = ring_group->total_bytes;
2472
2473#define HNS3_RX_LOW_BYTE_RATE 10000
2474#define HNS3_RX_MID_BYTE_RATE 20000
2475
2476        switch (new_flow_level) {
2477        case HNS3_FLOW_LOW:
2478                if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2479                        new_flow_level = HNS3_FLOW_MID;
2480                break;
2481        case HNS3_FLOW_MID:
2482                if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2483                        new_flow_level = HNS3_FLOW_HIGH;
2484                else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2485                        new_flow_level = HNS3_FLOW_LOW;
2486                break;
2487        case HNS3_FLOW_HIGH:
2488        case HNS3_FLOW_ULTRA:
2489        default:
2490                if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2491                        new_flow_level = HNS3_FLOW_MID;
2492                break;
2493        }
2494
2495#define HNS3_RX_ULTRA_PACKET_RATE 40
2496
2497        if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2498            &tqp_vector->rx_group == ring_group)
2499                new_flow_level = HNS3_FLOW_ULTRA;
2500
2501        switch (new_flow_level) {
2502        case HNS3_FLOW_LOW:
2503                new_int_gl = HNS3_INT_GL_50K;
2504                break;
2505        case HNS3_FLOW_MID:
2506                new_int_gl = HNS3_INT_GL_20K;
2507                break;
2508        case HNS3_FLOW_HIGH:
2509                new_int_gl = HNS3_INT_GL_18K;
2510                break;
2511        case HNS3_FLOW_ULTRA:
2512                new_int_gl = HNS3_INT_GL_8K;
2513                break;
2514        default:
2515                break;
2516        }
2517
2518        ring_group->total_bytes = 0;
2519        ring_group->total_packets = 0;
2520        ring_group->coal.flow_level = new_flow_level;
2521        if (new_int_gl != ring_group->coal.int_gl) {
2522                ring_group->coal.int_gl = new_int_gl;
2523                return true;
2524        }
2525        return false;
2526}
2527
2528static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2529{
2530        struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2531        struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2532        bool rx_update, tx_update;
2533
2534        if (tqp_vector->int_adapt_down > 0) {
2535                tqp_vector->int_adapt_down--;
2536                return;
2537        }
2538
2539        if (rx_group->coal.gl_adapt_enable) {
2540                rx_update = hns3_get_new_int_gl(rx_group);
2541                if (rx_update)
2542                        hns3_set_vector_coalesce_rx_gl(tqp_vector,
2543                                                       rx_group->coal.int_gl);
2544        }
2545
2546        if (tx_group->coal.gl_adapt_enable) {
2547                tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2548                if (tx_update)
2549                        hns3_set_vector_coalesce_tx_gl(tqp_vector,
2550                                                       tx_group->coal.int_gl);
2551        }
2552
2553        tqp_vector->last_jiffies = jiffies;
2554        tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
2555}
2556
2557static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2558{
2559        struct hns3_enet_ring *ring;
2560        int rx_pkt_total = 0;
2561
2562        struct hns3_enet_tqp_vector *tqp_vector =
2563                container_of(napi, struct hns3_enet_tqp_vector, napi);
2564        bool clean_complete = true;
2565        int rx_budget;
2566
2567        /* Since the actual Tx work is minimal, we can give the Tx a larger
2568         * budget and be more aggressive about cleaning up the Tx descriptors.
2569         */
2570        hns3_for_each_ring(ring, tqp_vector->tx_group)
2571                hns3_clean_tx_ring(ring);
2572
2573        /* make sure rx ring budget not smaller than 1 */
2574        rx_budget = max(budget / tqp_vector->num_tqps, 1);
2575
2576        hns3_for_each_ring(ring, tqp_vector->rx_group) {
2577                int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2578                                                    hns3_rx_skb);
2579
2580                if (rx_cleaned >= rx_budget)
2581                        clean_complete = false;
2582
2583                rx_pkt_total += rx_cleaned;
2584        }
2585
2586        tqp_vector->rx_group.total_packets += rx_pkt_total;
2587
2588        if (!clean_complete)
2589                return budget;
2590
2591        napi_complete(napi);
2592        hns3_update_new_int_gl(tqp_vector);
2593        hns3_mask_vector_irq(tqp_vector, 1);
2594
2595        return rx_pkt_total;
2596}
2597
2598static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2599                                      struct hnae3_ring_chain_node *head)
2600{
2601        struct pci_dev *pdev = tqp_vector->handle->pdev;
2602        struct hnae3_ring_chain_node *cur_chain = head;
2603        struct hnae3_ring_chain_node *chain;
2604        struct hns3_enet_ring *tx_ring;
2605        struct hns3_enet_ring *rx_ring;
2606
2607        tx_ring = tqp_vector->tx_group.ring;
2608        if (tx_ring) {
2609                cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2610                hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2611                              HNAE3_RING_TYPE_TX);
2612                hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2613                                HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2614
2615                cur_chain->next = NULL;
2616
2617                while (tx_ring->next) {
2618                        tx_ring = tx_ring->next;
2619
2620                        chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2621                                             GFP_KERNEL);
2622                        if (!chain)
2623                                return -ENOMEM;
2624
2625                        cur_chain->next = chain;
2626                        chain->tqp_index = tx_ring->tqp->tqp_index;
2627                        hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2628                                      HNAE3_RING_TYPE_TX);
2629                        hnae3_set_field(chain->int_gl_idx,
2630                                        HNAE3_RING_GL_IDX_M,
2631                                        HNAE3_RING_GL_IDX_S,
2632                                        HNAE3_RING_GL_TX);
2633
2634                        cur_chain = chain;
2635                }
2636        }
2637
2638        rx_ring = tqp_vector->rx_group.ring;
2639        if (!tx_ring && rx_ring) {
2640                cur_chain->next = NULL;
2641                cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2642                hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2643                              HNAE3_RING_TYPE_RX);
2644                hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2645                                HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2646
2647                rx_ring = rx_ring->next;
2648        }
2649
2650        while (rx_ring) {
2651                chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2652                if (!chain)
2653                        return -ENOMEM;
2654
2655                cur_chain->next = chain;
2656                chain->tqp_index = rx_ring->tqp->tqp_index;
2657                hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2658                              HNAE3_RING_TYPE_RX);
2659                hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2660                                HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2661
2662                cur_chain = chain;
2663
2664                rx_ring = rx_ring->next;
2665        }
2666
2667        return 0;
2668}
2669
2670static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2671                                        struct hnae3_ring_chain_node *head)
2672{
2673        struct pci_dev *pdev = tqp_vector->handle->pdev;
2674        struct hnae3_ring_chain_node *chain_tmp, *chain;
2675
2676        chain = head->next;
2677
2678        while (chain) {
2679                chain_tmp = chain->next;
2680                devm_kfree(&pdev->dev, chain);
2681                chain = chain_tmp;
2682        }
2683}
2684
2685static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2686                                   struct hns3_enet_ring *ring)
2687{
2688        ring->next = group->ring;
2689        group->ring = ring;
2690
2691        group->count++;
2692}
2693
2694static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2695{
2696        struct hnae3_ring_chain_node vector_ring_chain;
2697        struct hnae3_handle *h = priv->ae_handle;
2698        struct hns3_enet_tqp_vector *tqp_vector;
2699        int ret = 0;
2700        u16 i;
2701
2702        for (i = 0; i < priv->vector_num; i++) {
2703                tqp_vector = &priv->tqp_vector[i];
2704                hns3_vector_gl_rl_init_hw(tqp_vector, priv);
2705                tqp_vector->num_tqps = 0;
2706        }
2707
2708        for (i = 0; i < h->kinfo.num_tqps; i++) {
2709                u16 vector_i = i % priv->vector_num;
2710                u16 tqp_num = h->kinfo.num_tqps;
2711
2712                tqp_vector = &priv->tqp_vector[vector_i];
2713
2714                hns3_add_ring_to_group(&tqp_vector->tx_group,
2715                                       priv->ring_data[i].ring);
2716
2717                hns3_add_ring_to_group(&tqp_vector->rx_group,
2718                                       priv->ring_data[i + tqp_num].ring);
2719
2720                priv->ring_data[i].ring->tqp_vector = tqp_vector;
2721                priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2722                tqp_vector->num_tqps++;
2723        }
2724
2725        for (i = 0; i < priv->vector_num; i++) {
2726                tqp_vector = &priv->tqp_vector[i];
2727
2728                tqp_vector->rx_group.total_bytes = 0;
2729                tqp_vector->rx_group.total_packets = 0;
2730                tqp_vector->tx_group.total_bytes = 0;
2731                tqp_vector->tx_group.total_packets = 0;
2732                tqp_vector->handle = h;
2733
2734                ret = hns3_get_vector_ring_chain(tqp_vector,
2735                                                 &vector_ring_chain);
2736                if (ret)
2737                        return ret;
2738
2739                ret = h->ae_algo->ops->map_ring_to_vector(h,
2740                        tqp_vector->vector_irq, &vector_ring_chain);
2741
2742                hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2743
2744                if (ret)
2745                        return ret;
2746
2747                netif_napi_add(priv->netdev, &tqp_vector->napi,
2748                               hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2749        }
2750
2751        return 0;
2752}
2753
2754static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
2755{
2756        struct hnae3_handle *h = priv->ae_handle;
2757        struct hns3_enet_tqp_vector *tqp_vector;
2758        struct hnae3_vector_info *vector;
2759        struct pci_dev *pdev = h->pdev;
2760        u16 tqp_num = h->kinfo.num_tqps;
2761        u16 vector_num;
2762        int ret = 0;
2763        u16 i;
2764
2765        /* RSS size, cpu online and vector_num should be the same */
2766        /* Should consider 2p/4p later */
2767        vector_num = min_t(u16, num_online_cpus(), tqp_num);
2768        vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2769                              GFP_KERNEL);
2770        if (!vector)
2771                return -ENOMEM;
2772
2773        vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2774
2775        priv->vector_num = vector_num;
2776        priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2777                devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2778                             GFP_KERNEL);
2779        if (!priv->tqp_vector) {
2780                ret = -ENOMEM;
2781                goto out;
2782        }
2783
2784        for (i = 0; i < priv->vector_num; i++) {
2785                tqp_vector = &priv->tqp_vector[i];
2786                tqp_vector->idx = i;
2787                tqp_vector->mask_addr = vector[i].io_addr;
2788                tqp_vector->vector_irq = vector[i].vector;
2789                hns3_vector_gl_rl_init(tqp_vector, priv);
2790        }
2791
2792out:
2793        devm_kfree(&pdev->dev, vector);
2794        return ret;
2795}
2796
2797static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
2798{
2799        group->ring = NULL;
2800        group->count = 0;
2801}
2802
2803static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2804{
2805        struct hnae3_ring_chain_node vector_ring_chain;
2806        struct hnae3_handle *h = priv->ae_handle;
2807        struct hns3_enet_tqp_vector *tqp_vector;
2808        int i;
2809
2810        for (i = 0; i < priv->vector_num; i++) {
2811                tqp_vector = &priv->tqp_vector[i];
2812
2813                hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
2814
2815                h->ae_algo->ops->unmap_ring_from_vector(h,
2816                        tqp_vector->vector_irq, &vector_ring_chain);
2817
2818                hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2819
2820                if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2821                        (void)irq_set_affinity_hint(
2822                                priv->tqp_vector[i].vector_irq,
2823                                                    NULL);
2824                        free_irq(priv->tqp_vector[i].vector_irq,
2825                                 &priv->tqp_vector[i]);
2826                }
2827
2828                priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2829                hns3_clear_ring_group(&tqp_vector->rx_group);
2830                hns3_clear_ring_group(&tqp_vector->tx_group);
2831                netif_napi_del(&priv->tqp_vector[i].napi);
2832        }
2833}
2834
2835static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
2836{
2837        struct hnae3_handle *h = priv->ae_handle;
2838        struct pci_dev *pdev = h->pdev;
2839        int i, ret;
2840
2841        for (i = 0; i < priv->vector_num; i++) {
2842                struct hns3_enet_tqp_vector *tqp_vector;
2843
2844                tqp_vector = &priv->tqp_vector[i];
2845                ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2846                if (ret)
2847                        return ret;
2848        }
2849
2850        devm_kfree(&pdev->dev, priv->tqp_vector);
2851        return 0;
2852}
2853
2854static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2855                             int ring_type)
2856{
2857        struct hns3_nic_ring_data *ring_data = priv->ring_data;
2858        int queue_num = priv->ae_handle->kinfo.num_tqps;
2859        struct pci_dev *pdev = priv->ae_handle->pdev;
2860        struct hns3_enet_ring *ring;
2861
2862        ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2863        if (!ring)
2864                return -ENOMEM;
2865
2866        if (ring_type == HNAE3_RING_TYPE_TX) {
2867                ring_data[q->tqp_index].ring = ring;
2868                ring_data[q->tqp_index].queue_index = q->tqp_index;
2869                ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2870        } else {
2871                ring_data[q->tqp_index + queue_num].ring = ring;
2872                ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
2873                ring->io_base = q->io_base;
2874        }
2875
2876        hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2877
2878        ring->tqp = q;
2879        ring->desc = NULL;
2880        ring->desc_cb = NULL;
2881        ring->dev = priv->dev;
2882        ring->desc_dma_addr = 0;
2883        ring->buf_size = q->buf_size;
2884        ring->desc_num = q->desc_num;
2885        ring->next_to_use = 0;
2886        ring->next_to_clean = 0;
2887
2888        return 0;
2889}
2890
2891static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2892                              struct hns3_nic_priv *priv)
2893{
2894        int ret;
2895
2896        ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2897        if (ret)
2898                return ret;
2899
2900        ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2901        if (ret)
2902                return ret;
2903
2904        return 0;
2905}
2906
2907static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2908{
2909        struct hnae3_handle *h = priv->ae_handle;
2910        struct pci_dev *pdev = h->pdev;
2911        int i, ret;
2912
2913        priv->ring_data =  devm_kzalloc(&pdev->dev,
2914                                        array3_size(h->kinfo.num_tqps,
2915                                                    sizeof(*priv->ring_data),
2916                                                    2),
2917                                        GFP_KERNEL);
2918        if (!priv->ring_data)
2919                return -ENOMEM;
2920
2921        for (i = 0; i < h->kinfo.num_tqps; i++) {
2922                ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2923                if (ret)
2924                        goto err;
2925        }
2926
2927        return 0;
2928err:
2929        devm_kfree(&pdev->dev, priv->ring_data);
2930        return ret;
2931}
2932
2933static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2934{
2935        struct hnae3_handle *h = priv->ae_handle;
2936        int i;
2937
2938        for (i = 0; i < h->kinfo.num_tqps; i++) {
2939                devm_kfree(priv->dev, priv->ring_data[i].ring);
2940                devm_kfree(priv->dev,
2941                           priv->ring_data[i + h->kinfo.num_tqps].ring);
2942        }
2943        devm_kfree(priv->dev, priv->ring_data);
2944}
2945
2946static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2947{
2948        int ret;
2949
2950        if (ring->desc_num <= 0 || ring->buf_size <= 0)
2951                return -EINVAL;
2952
2953        ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2954                                GFP_KERNEL);
2955        if (!ring->desc_cb) {
2956                ret = -ENOMEM;
2957                goto out;
2958        }
2959
2960        ret = hns3_alloc_desc(ring);
2961        if (ret)
2962                goto out_with_desc_cb;
2963
2964        if (!HNAE3_IS_TX_RING(ring)) {
2965                ret = hns3_alloc_ring_buffers(ring);
2966                if (ret)
2967                        goto out_with_desc;
2968        }
2969
2970        return 0;
2971
2972out_with_desc:
2973        hns3_free_desc(ring);
2974out_with_desc_cb:
2975        kfree(ring->desc_cb);
2976        ring->desc_cb = NULL;
2977out:
2978        return ret;
2979}
2980
2981static void hns3_fini_ring(struct hns3_enet_ring *ring)
2982{
2983        hns3_free_desc(ring);
2984        kfree(ring->desc_cb);
2985        ring->desc_cb = NULL;
2986        ring->next_to_clean = 0;
2987        ring->next_to_use = 0;
2988}
2989
2990static int hns3_buf_size2type(u32 buf_size)
2991{
2992        int bd_size_type;
2993
2994        switch (buf_size) {
2995        case 512:
2996                bd_size_type = HNS3_BD_SIZE_512_TYPE;
2997                break;
2998        case 1024:
2999                bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3000                break;
3001        case 2048:
3002                bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3003                break;
3004        case 4096:
3005                bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3006                break;
3007        default:
3008                bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3009        }
3010
3011        return bd_size_type;
3012}
3013
3014static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3015{
3016        dma_addr_t dma = ring->desc_dma_addr;
3017        struct hnae3_queue *q = ring->tqp;
3018
3019        if (!HNAE3_IS_TX_RING(ring)) {
3020                hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3021                               (u32)dma);
3022                hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3023                               (u32)((dma >> 31) >> 1));
3024
3025                hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3026                               hns3_buf_size2type(ring->buf_size));
3027                hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3028                               ring->desc_num / 8 - 1);
3029
3030        } else {
3031                hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3032                               (u32)dma);
3033                hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3034                               (u32)((dma >> 31) >> 1));
3035
3036                hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
3037                               hns3_buf_size2type(ring->buf_size));
3038                hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3039                               ring->desc_num / 8 - 1);
3040        }
3041}
3042
3043static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3044{
3045        struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3046        int i;
3047
3048        for (i = 0; i < HNAE3_MAX_TC; i++) {
3049                struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3050                int j;
3051
3052                if (!tc_info->enable)
3053                        continue;
3054
3055                for (j = 0; j < tc_info->tqp_count; j++) {
3056                        struct hnae3_queue *q;
3057
3058                        q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3059                        hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3060                                       tc_info->tc);
3061                }
3062        }
3063}
3064
3065int hns3_init_all_ring(struct hns3_nic_priv *priv)
3066{
3067        struct hnae3_handle *h = priv->ae_handle;
3068        int ring_num = h->kinfo.num_tqps * 2;
3069        int i, j;
3070        int ret;
3071
3072        for (i = 0; i < ring_num; i++) {
3073                ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3074                if (ret) {
3075                        dev_err(priv->dev,
3076                                "Alloc ring memory fail! ret=%d\n", ret);
3077                        goto out_when_alloc_ring_memory;
3078                }
3079
3080                u64_stats_init(&priv->ring_data[i].ring->syncp);
3081        }
3082
3083        return 0;
3084
3085out_when_alloc_ring_memory:
3086        for (j = i - 1; j >= 0; j--)
3087                hns3_fini_ring(priv->ring_data[j].ring);
3088
3089        return -ENOMEM;
3090}
3091
3092int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3093{
3094        struct hnae3_handle *h = priv->ae_handle;
3095        int i;
3096
3097        for (i = 0; i < h->kinfo.num_tqps; i++) {
3098                if (h->ae_algo->ops->reset_queue)
3099                        h->ae_algo->ops->reset_queue(h, i);
3100
3101                hns3_fini_ring(priv->ring_data[i].ring);
3102                hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3103        }
3104        return 0;
3105}
3106
3107/* Set mac addr if it is configured. or leave it to the AE driver */
3108static void hns3_init_mac_addr(struct net_device *netdev, bool init)
3109{
3110        struct hns3_nic_priv *priv = netdev_priv(netdev);
3111        struct hnae3_handle *h = priv->ae_handle;
3112        u8 mac_addr_temp[ETH_ALEN];
3113
3114        if (h->ae_algo->ops->get_mac_addr && init) {
3115                h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3116                ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3117        }
3118
3119        /* Check if the MAC address is valid, if not get a random one */
3120        if (!is_valid_ether_addr(netdev->dev_addr)) {
3121                eth_hw_addr_random(netdev);
3122                dev_warn(priv->dev, "using random MAC address %pM\n",
3123                         netdev->dev_addr);
3124        }
3125
3126        if (h->ae_algo->ops->set_mac_addr)
3127                h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3128
3129}
3130
3131static void hns3_uninit_mac_addr(struct net_device *netdev)
3132{
3133        struct hns3_nic_priv *priv = netdev_priv(netdev);
3134        struct hnae3_handle *h = priv->ae_handle;
3135
3136        if (h->ae_algo->ops->rm_uc_addr)
3137                h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
3138}
3139
3140static int hns3_init_phy(struct net_device *netdev)
3141{
3142        struct hnae3_handle *h = hns3_get_handle(netdev);
3143        int ret = 0;
3144
3145        if (h->ae_algo->ops->mac_connect_phy)
3146                ret = h->ae_algo->ops->mac_connect_phy(h);
3147
3148        return ret;
3149}
3150
3151static void hns3_uninit_phy(struct net_device *netdev)
3152{
3153        struct hnae3_handle *h = hns3_get_handle(netdev);
3154
3155        if (h->ae_algo->ops->mac_disconnect_phy)
3156                h->ae_algo->ops->mac_disconnect_phy(h);
3157}
3158
3159static void hns3_nic_set_priv_ops(struct net_device *netdev)
3160{
3161        struct hns3_nic_priv *priv = netdev_priv(netdev);
3162
3163        priv->ops.fill_desc = hns3_fill_desc;
3164        if ((netdev->features & NETIF_F_TSO) ||
3165            (netdev->features & NETIF_F_TSO6))
3166                priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3167        else
3168                priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3169}
3170
3171static int hns3_client_init(struct hnae3_handle *handle)
3172{
3173        struct pci_dev *pdev = handle->pdev;
3174        u16 alloc_tqps, max_rss_size;
3175        struct hns3_nic_priv *priv;
3176        struct net_device *netdev;
3177        int ret;
3178
3179        handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3180                                                    &max_rss_size);
3181        netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3182        if (!netdev)
3183                return -ENOMEM;
3184
3185        priv = netdev_priv(netdev);
3186        priv->dev = &pdev->dev;
3187        priv->netdev = netdev;
3188        priv->ae_handle = handle;
3189        priv->ae_handle->last_reset_time = jiffies;
3190        priv->tx_timeout_count = 0;
3191
3192        handle->kinfo.netdev = netdev;
3193        handle->priv = (void *)priv;
3194
3195        hns3_init_mac_addr(netdev, true);
3196
3197        hns3_set_default_feature(netdev);
3198
3199        netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3200        netdev->priv_flags |= IFF_UNICAST_FLT;
3201        netdev->netdev_ops = &hns3_nic_netdev_ops;
3202        SET_NETDEV_DEV(netdev, &pdev->dev);
3203        hns3_ethtool_set_ops(netdev);
3204        hns3_nic_set_priv_ops(netdev);
3205
3206        /* Carrier off reporting is important to ethtool even BEFORE open */
3207        netif_carrier_off(netdev);
3208
3209        if (handle->flags & HNAE3_SUPPORT_VF)
3210                handle->reset_level = HNAE3_VF_RESET;
3211        else
3212                handle->reset_level = HNAE3_FUNC_RESET;
3213
3214        ret = hns3_get_ring_config(priv);
3215        if (ret) {
3216                ret = -ENOMEM;
3217                goto out_get_ring_cfg;
3218        }
3219
3220        ret = hns3_nic_alloc_vector_data(priv);
3221        if (ret) {
3222                ret = -ENOMEM;
3223                goto out_alloc_vector_data;
3224        }
3225
3226        ret = hns3_nic_init_vector_data(priv);
3227        if (ret) {
3228                ret = -ENOMEM;
3229                goto out_init_vector_data;
3230        }
3231
3232        ret = hns3_init_all_ring(priv);
3233        if (ret) {
3234                ret = -ENOMEM;
3235                goto out_init_ring_data;
3236        }
3237
3238        ret = hns3_init_phy(netdev);
3239        if (ret)
3240                goto out_init_phy;
3241
3242        ret = register_netdev(netdev);
3243        if (ret) {
3244                dev_err(priv->dev, "probe register netdev fail!\n");
3245                goto out_reg_netdev_fail;
3246        }
3247
3248        hns3_dcbnl_setup(handle);
3249
3250        /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3251        netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3252
3253        return ret;
3254
3255out_reg_netdev_fail:
3256        hns3_uninit_phy(netdev);
3257out_init_phy:
3258        hns3_uninit_all_ring(priv);
3259out_init_ring_data:
3260        hns3_nic_uninit_vector_data(priv);
3261out_init_vector_data:
3262        hns3_nic_dealloc_vector_data(priv);
3263out_alloc_vector_data:
3264        priv->ring_data = NULL;
3265out_get_ring_cfg:
3266        priv->ae_handle = NULL;
3267        free_netdev(netdev);
3268        return ret;
3269}
3270
3271static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3272{
3273        struct net_device *netdev = handle->kinfo.netdev;
3274        struct hns3_nic_priv *priv = netdev_priv(netdev);
3275        int ret;
3276
3277        if (netdev->reg_state != NETREG_UNINITIALIZED)
3278                unregister_netdev(netdev);
3279
3280        hns3_force_clear_all_rx_ring(handle);
3281
3282        hns3_uninit_phy(netdev);
3283
3284        hns3_nic_uninit_vector_data(priv);
3285
3286        ret = hns3_nic_dealloc_vector_data(priv);
3287        if (ret)
3288                netdev_err(netdev, "dealloc vector error\n");
3289
3290        ret = hns3_uninit_all_ring(priv);
3291        if (ret)
3292                netdev_err(netdev, "uninit ring error\n");
3293
3294        hns3_put_ring_config(priv);
3295
3296        priv->ring_data = NULL;
3297
3298        hns3_uninit_mac_addr(netdev);
3299
3300        free_netdev(netdev);
3301}
3302
3303static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3304{
3305        struct net_device *netdev = handle->kinfo.netdev;
3306
3307        if (!netdev)
3308                return;
3309
3310        if (linkup) {
3311                netif_carrier_on(netdev);
3312                netif_tx_wake_all_queues(netdev);
3313                netdev_info(netdev, "link up\n");
3314        } else {
3315                netif_carrier_off(netdev);
3316                netif_tx_stop_all_queues(netdev);
3317                netdev_info(netdev, "link down\n");
3318        }
3319}
3320
3321static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3322{
3323        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3324        struct net_device *ndev = kinfo->netdev;
3325        bool if_running;
3326        int ret;
3327
3328        if (tc > HNAE3_MAX_TC)
3329                return -EINVAL;
3330
3331        if (!ndev)
3332                return -ENODEV;
3333
3334        if_running = netif_running(ndev);
3335
3336        if (if_running) {
3337                (void)hns3_nic_net_stop(ndev);
3338                msleep(100);
3339        }
3340
3341        ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3342                kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3343        if (ret)
3344                goto err_out;
3345
3346        ret = hns3_nic_set_real_num_queue(ndev);
3347
3348err_out:
3349        if (if_running)
3350                (void)hns3_nic_net_open(ndev);
3351
3352        return ret;
3353}
3354
3355static void hns3_recover_hw_addr(struct net_device *ndev)
3356{
3357        struct netdev_hw_addr_list *list;
3358        struct netdev_hw_addr *ha, *tmp;
3359
3360        /* go through and sync uc_addr entries to the device */
3361        list = &ndev->uc;
3362        list_for_each_entry_safe(ha, tmp, &list->list, list)
3363                hns3_nic_uc_sync(ndev, ha->addr);
3364
3365        /* go through and sync mc_addr entries to the device */
3366        list = &ndev->mc;
3367        list_for_each_entry_safe(ha, tmp, &list->list, list)
3368                hns3_nic_mc_sync(ndev, ha->addr);
3369}
3370
3371static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
3372{
3373        while (ring->next_to_clean != ring->next_to_use) {
3374                ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
3375                hns3_free_buffer_detach(ring, ring->next_to_clean);
3376                ring_ptr_move_fw(ring, next_to_clean);
3377        }
3378}
3379
3380static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3381{
3382        struct hns3_desc_cb res_cbs;
3383        int ret;
3384
3385        while (ring->next_to_use != ring->next_to_clean) {
3386                /* When a buffer is not reused, it's memory has been
3387                 * freed in hns3_handle_rx_bd or will be freed by
3388                 * stack, so we need to replace the buffer here.
3389                 */
3390                if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3391                        ret = hns3_reserve_buffer_map(ring, &res_cbs);
3392                        if (ret) {
3393                                u64_stats_update_begin(&ring->syncp);
3394                                ring->stats.sw_err_cnt++;
3395                                u64_stats_update_end(&ring->syncp);
3396                                /* if alloc new buffer fail, exit directly
3397                                 * and reclear in up flow.
3398                                 */
3399                                netdev_warn(ring->tqp->handle->kinfo.netdev,
3400                                            "reserve buffer map failed, ret = %d\n",
3401                                            ret);
3402                                return ret;
3403                        }
3404                        hns3_replace_buffer(ring, ring->next_to_use,
3405                                            &res_cbs);
3406                }
3407                ring_ptr_move_fw(ring, next_to_use);
3408        }
3409
3410        return 0;
3411}
3412
3413static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
3414{
3415        while (ring->next_to_use != ring->next_to_clean) {
3416                /* When a buffer is not reused, it's memory has been
3417                 * freed in hns3_handle_rx_bd or will be freed by
3418                 * stack, so only need to unmap the buffer here.
3419                 */
3420                if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3421                        hns3_unmap_buffer(ring,
3422                                          &ring->desc_cb[ring->next_to_use]);
3423                        ring->desc_cb[ring->next_to_use].dma = 0;
3424                }
3425
3426                ring_ptr_move_fw(ring, next_to_use);
3427        }
3428}
3429
3430static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
3431{
3432        struct net_device *ndev = h->kinfo.netdev;
3433        struct hns3_nic_priv *priv = netdev_priv(ndev);
3434        struct hns3_enet_ring *ring;
3435        u32 i;
3436
3437        for (i = 0; i < h->kinfo.num_tqps; i++) {
3438                ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3439                hns3_force_clear_rx_ring(ring);
3440        }
3441}
3442
3443static void hns3_clear_all_ring(struct hnae3_handle *h)
3444{
3445        struct net_device *ndev = h->kinfo.netdev;
3446        struct hns3_nic_priv *priv = netdev_priv(ndev);
3447        u32 i;
3448
3449        for (i = 0; i < h->kinfo.num_tqps; i++) {
3450                struct netdev_queue *dev_queue;
3451                struct hns3_enet_ring *ring;
3452
3453                ring = priv->ring_data[i].ring;
3454                hns3_clear_tx_ring(ring);
3455                dev_queue = netdev_get_tx_queue(ndev,
3456                                                priv->ring_data[i].queue_index);
3457                netdev_tx_reset_queue(dev_queue);
3458
3459                ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3460                /* Continue to clear other rings even if clearing some
3461                 * rings failed.
3462                 */
3463                hns3_clear_rx_ring(ring);
3464        }
3465}
3466
3467int hns3_nic_reset_all_ring(struct hnae3_handle *h)
3468{
3469        struct net_device *ndev = h->kinfo.netdev;
3470        struct hns3_nic_priv *priv = netdev_priv(ndev);
3471        struct hns3_enet_ring *rx_ring;
3472        int i, j;
3473        int ret;
3474
3475        for (i = 0; i < h->kinfo.num_tqps; i++) {
3476                h->ae_algo->ops->reset_queue(h, i);
3477                hns3_init_ring_hw(priv->ring_data[i].ring);
3478
3479                /* We need to clear tx ring here because self test will
3480                 * use the ring and will not run down before up
3481                 */
3482                hns3_clear_tx_ring(priv->ring_data[i].ring);
3483                priv->ring_data[i].ring->next_to_clean = 0;
3484                priv->ring_data[i].ring->next_to_use = 0;
3485
3486                rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3487                hns3_init_ring_hw(rx_ring);
3488                ret = hns3_clear_rx_ring(rx_ring);
3489                if (ret)
3490                        return ret;
3491
3492                /* We can not know the hardware head and tail when this
3493                 * function is called in reset flow, so we reuse all desc.
3494                 */
3495                for (j = 0; j < rx_ring->desc_num; j++)
3496                        hns3_reuse_buffer(rx_ring, j);
3497
3498                rx_ring->next_to_clean = 0;
3499                rx_ring->next_to_use = 0;
3500        }
3501
3502        hns3_init_tx_ring_tc(priv);
3503
3504        return 0;
3505}
3506
3507static void hns3_store_coal(struct hns3_nic_priv *priv)
3508{
3509        /* ethtool only support setting and querying one coal
3510         * configuation for now, so save the vector 0' coal
3511         * configuation here in order to restore it.
3512         */
3513        memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
3514               sizeof(struct hns3_enet_coalesce));
3515        memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
3516               sizeof(struct hns3_enet_coalesce));
3517}
3518
3519static void hns3_restore_coal(struct hns3_nic_priv *priv)
3520{
3521        u16 vector_num = priv->vector_num;
3522        int i;
3523
3524        for (i = 0; i < vector_num; i++) {
3525                memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
3526                       sizeof(struct hns3_enet_coalesce));
3527                memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
3528                       sizeof(struct hns3_enet_coalesce));
3529        }
3530}
3531
3532static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3533{
3534        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3535        struct net_device *ndev = kinfo->netdev;
3536
3537        if (!netif_running(ndev))
3538                return 0;
3539
3540        return hns3_nic_net_stop(ndev);
3541}
3542
3543static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3544{
3545        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3546        int ret = 0;
3547
3548        if (netif_running(kinfo->netdev)) {
3549                ret = hns3_nic_net_up(kinfo->netdev);
3550                if (ret) {
3551                        netdev_err(kinfo->netdev,
3552                                   "hns net up fail, ret=%d!\n", ret);
3553                        return ret;
3554                }
3555                handle->last_reset_time = jiffies;
3556        }
3557
3558        return ret;
3559}
3560
3561static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3562{
3563        struct net_device *netdev = handle->kinfo.netdev;
3564        struct hns3_nic_priv *priv = netdev_priv(netdev);
3565        bool vlan_filter_enable;
3566        int ret;
3567
3568        hns3_init_mac_addr(netdev, false);
3569        hns3_recover_hw_addr(netdev);
3570        hns3_update_promisc_mode(netdev, handle->netdev_flags);
3571        vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
3572        hns3_enable_vlan_filter(netdev, vlan_filter_enable);
3573
3574
3575        /* Hardware table is only clear when pf resets */
3576        if (!(handle->flags & HNAE3_SUPPORT_VF))
3577                hns3_restore_vlan(netdev);
3578
3579        /* Carrier off reporting is important to ethtool even BEFORE open */
3580        netif_carrier_off(netdev);
3581
3582        hns3_restore_coal(priv);
3583
3584        ret = hns3_get_ring_config(priv);
3585        if (ret)
3586                return ret;
3587
3588        ret = hns3_nic_init_vector_data(priv);
3589        if (ret)
3590                return ret;
3591
3592        ret = hns3_init_all_ring(priv);
3593        if (ret) {
3594                hns3_nic_uninit_vector_data(priv);
3595                priv->ring_data = NULL;
3596        }
3597
3598        return ret;
3599}
3600
3601static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3602{
3603        struct net_device *netdev = handle->kinfo.netdev;
3604        struct hns3_nic_priv *priv = netdev_priv(netdev);
3605        int ret;
3606
3607        hns3_force_clear_all_rx_ring(handle);
3608
3609        hns3_nic_uninit_vector_data(priv);
3610
3611        hns3_store_coal(priv);
3612
3613        ret = hns3_uninit_all_ring(priv);
3614        if (ret)
3615                netdev_err(netdev, "uninit ring error\n");
3616
3617        hns3_put_ring_config(priv);
3618
3619        priv->ring_data = NULL;
3620
3621        hns3_uninit_mac_addr(netdev);
3622
3623        return ret;
3624}
3625
3626static int hns3_reset_notify(struct hnae3_handle *handle,
3627                             enum hnae3_reset_notify_type type)
3628{
3629        int ret = 0;
3630
3631        switch (type) {
3632        case HNAE3_UP_CLIENT:
3633                ret = hns3_reset_notify_up_enet(handle);
3634                break;
3635        case HNAE3_DOWN_CLIENT:
3636                ret = hns3_reset_notify_down_enet(handle);
3637                break;
3638        case HNAE3_INIT_CLIENT:
3639                ret = hns3_reset_notify_init_enet(handle);
3640                break;
3641        case HNAE3_UNINIT_CLIENT:
3642                ret = hns3_reset_notify_uninit_enet(handle);
3643                break;
3644        default:
3645                break;
3646        }
3647
3648        return ret;
3649}
3650
3651static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3652{
3653        return (new_tqp_num / num_tc) * num_tc;
3654}
3655
3656int hns3_set_channels(struct net_device *netdev,
3657                      struct ethtool_channels *ch)
3658{
3659        struct hnae3_handle *h = hns3_get_handle(netdev);
3660        struct hnae3_knic_private_info *kinfo = &h->kinfo;
3661        u32 new_tqp_num = ch->combined_count;
3662        u16 org_tqp_num;
3663        int ret;
3664
3665        if (ch->rx_count || ch->tx_count)
3666                return -EINVAL;
3667
3668        if (new_tqp_num > hns3_get_max_available_channels(h) ||
3669            new_tqp_num < kinfo->num_tc) {
3670                dev_err(&netdev->dev,
3671                        "Change tqps fail, the tqp range is from %d to %d",
3672                        kinfo->num_tc,
3673                        hns3_get_max_available_channels(h));
3674                return -EINVAL;
3675        }
3676
3677        new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3678        if (kinfo->num_tqps == new_tqp_num)
3679                return 0;
3680
3681        ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
3682        if (ret)
3683                return ret;
3684
3685        ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
3686        if (ret)
3687                return ret;
3688
3689        org_tqp_num = h->kinfo.num_tqps;
3690        ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3691        if (ret) {
3692                ret = h->ae_algo->ops->set_channels(h, org_tqp_num);
3693                if (ret) {
3694                        /* If revert to old tqp failed, fatal error occurred */
3695                        dev_err(&netdev->dev,
3696                                "Revert to old tqp num fail, ret=%d", ret);
3697                        return ret;
3698                }
3699                dev_info(&netdev->dev,
3700                         "Change tqp num fail, Revert to old tqp num");
3701        }
3702        ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
3703        if (ret)
3704                return ret;
3705
3706        return hns3_reset_notify(h, HNAE3_UP_CLIENT);
3707}
3708
3709static const struct hnae3_client_ops client_ops = {
3710        .init_instance = hns3_client_init,
3711        .uninit_instance = hns3_client_uninit,
3712        .link_status_change = hns3_link_status_change,
3713        .setup_tc = hns3_client_setup_tc,
3714        .reset_notify = hns3_reset_notify,
3715};
3716
3717/* hns3_init_module - Driver registration routine
3718 * hns3_init_module is the first routine called when the driver is
3719 * loaded. All it does is register with the PCI subsystem.
3720 */
3721static int __init hns3_init_module(void)
3722{
3723        int ret;
3724
3725        pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3726        pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3727
3728        client.type = HNAE3_CLIENT_KNIC;
3729        snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3730                 hns3_driver_name);
3731
3732        client.ops = &client_ops;
3733
3734        INIT_LIST_HEAD(&client.node);
3735
3736        ret = hnae3_register_client(&client);
3737        if (ret)
3738                return ret;
3739
3740        ret = pci_register_driver(&hns3_driver);
3741        if (ret)
3742                hnae3_unregister_client(&client);
3743
3744        return ret;
3745}
3746module_init(hns3_init_module);
3747
3748/* hns3_exit_module - Driver exit cleanup routine
3749 * hns3_exit_module is called just before the driver is removed
3750 * from memory.
3751 */
3752static void __exit hns3_exit_module(void)
3753{
3754        pci_unregister_driver(&hns3_driver);
3755        hnae3_unregister_client(&client);
3756}
3757module_exit(hns3_exit_module);
3758
3759MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3760MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3761MODULE_LICENSE("GPL");
3762MODULE_ALIAS("pci:hns-nic");
3763MODULE_VERSION(HNS3_MOD_VERSION);
3764