linux/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016~2017 Hisilicon Limited.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 */
   9
  10#include <linux/dma-mapping.h>
  11#include <linux/etherdevice.h>
  12#include <linux/interrupt.h>
  13#include <linux/if_vlan.h>
  14#include <linux/ip.h>
  15#include <linux/ipv6.h>
  16#include <linux/module.h>
  17#include <linux/pci.h>
  18#include <linux/skbuff.h>
  19#include <linux/sctp.h>
  20#include <linux/vermagic.h>
  21#include <net/gre.h>
  22#include <net/pkt_cls.h>
  23#include <net/vxlan.h>
  24
  25#include "hnae3.h"
  26#include "hns3_enet.h"
  27
  28static const char hns3_driver_name[] = "hns3";
  29const char hns3_driver_version[] = VERMAGIC_STRING;
  30static const char hns3_driver_string[] =
  31                        "Hisilicon Ethernet Network Driver for Hip08 Family";
  32static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
  33static struct hnae3_client client;
  34
  35/* hns3_pci_tbl - PCI Device ID Table
  36 *
  37 * Last entry must be all 0s
  38 *
  39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  40 *   Class, Class Mask, private data (not used) }
  41 */
  42static const struct pci_device_id hns3_pci_tbl[] = {
  43        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
  44        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
  45        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
  46         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  47        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
  48         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  49        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
  50         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  51        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
  52         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  53        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
  54         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  55        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
  56        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
  57        /* required last entry */
  58        {0, }
  59};
  60MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
  61
  62static irqreturn_t hns3_irq_handle(int irq, void *dev)
  63{
  64        struct hns3_enet_tqp_vector *tqp_vector = dev;
  65
  66        napi_schedule(&tqp_vector->napi);
  67
  68        return IRQ_HANDLED;
  69}
  70
  71static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
  72{
  73        struct hns3_enet_tqp_vector *tqp_vectors;
  74        unsigned int i;
  75
  76        for (i = 0; i < priv->vector_num; i++) {
  77                tqp_vectors = &priv->tqp_vector[i];
  78
  79                if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
  80                        continue;
  81
  82                /* release the irq resource */
  83                free_irq(tqp_vectors->vector_irq, tqp_vectors);
  84                tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
  85        }
  86}
  87
  88static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
  89{
  90        struct hns3_enet_tqp_vector *tqp_vectors;
  91        int txrx_int_idx = 0;
  92        int rx_int_idx = 0;
  93        int tx_int_idx = 0;
  94        unsigned int i;
  95        int ret;
  96
  97        for (i = 0; i < priv->vector_num; i++) {
  98                tqp_vectors = &priv->tqp_vector[i];
  99
 100                if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
 101                        continue;
 102
 103                if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
 104                        snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 105                                 "%s-%s-%d", priv->netdev->name, "TxRx",
 106                                 txrx_int_idx++);
 107                        txrx_int_idx++;
 108                } else if (tqp_vectors->rx_group.ring) {
 109                        snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 110                                 "%s-%s-%d", priv->netdev->name, "Rx",
 111                                 rx_int_idx++);
 112                } else if (tqp_vectors->tx_group.ring) {
 113                        snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 114                                 "%s-%s-%d", priv->netdev->name, "Tx",
 115                                 tx_int_idx++);
 116                } else {
 117                        /* Skip this unused q_vector */
 118                        continue;
 119                }
 120
 121                tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
 122
 123                ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
 124                                  tqp_vectors->name,
 125                                       tqp_vectors);
 126                if (ret) {
 127                        netdev_err(priv->netdev, "request irq(%d) fail\n",
 128                                   tqp_vectors->vector_irq);
 129                        return ret;
 130                }
 131
 132                tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
 133        }
 134
 135        return 0;
 136}
 137
 138static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
 139                                 u32 mask_en)
 140{
 141        writel(mask_en, tqp_vector->mask_addr);
 142}
 143
 144static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
 145{
 146        napi_enable(&tqp_vector->napi);
 147
 148        /* enable vector */
 149        hns3_mask_vector_irq(tqp_vector, 1);
 150}
 151
 152static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
 153{
 154        /* disable vector */
 155        hns3_mask_vector_irq(tqp_vector, 0);
 156
 157        disable_irq(tqp_vector->vector_irq);
 158        napi_disable(&tqp_vector->napi);
 159}
 160
 161void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
 162                                 u32 rl_value)
 163{
 164        u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
 165
 166        /* this defines the configuration for RL (Interrupt Rate Limiter).
 167         * Rl defines rate of interrupts i.e. number of interrupts-per-second
 168         * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
 169         */
 170
 171        if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
 172            !tqp_vector->rx_group.coal.gl_adapt_enable)
 173                /* According to the hardware, the range of rl_reg is
 174                 * 0-59 and the unit is 4.
 175                 */
 176                rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
 177
 178        writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
 179}
 180
 181void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
 182                                    u32 gl_value)
 183{
 184        u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
 185
 186        writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
 187}
 188
 189void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
 190                                    u32 gl_value)
 191{
 192        u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
 193
 194        writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
 195}
 196
 197static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
 198                                   struct hns3_nic_priv *priv)
 199{
 200        struct hnae3_handle *h = priv->ae_handle;
 201
 202        /* initialize the configuration for interrupt coalescing.
 203         * 1. GL (Interrupt Gap Limiter)
 204         * 2. RL (Interrupt Rate Limiter)
 205         */
 206
 207        /* Default: enable interrupt coalescing self-adaptive and GL */
 208        tqp_vector->tx_group.coal.gl_adapt_enable = 1;
 209        tqp_vector->rx_group.coal.gl_adapt_enable = 1;
 210
 211        tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
 212        tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
 213
 214        /* Default: disable RL */
 215        h->kinfo.int_rl_setting = 0;
 216
 217        tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
 218        tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
 219        tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
 220}
 221
 222static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
 223                                      struct hns3_nic_priv *priv)
 224{
 225        struct hnae3_handle *h = priv->ae_handle;
 226
 227        hns3_set_vector_coalesce_tx_gl(tqp_vector,
 228                                       tqp_vector->tx_group.coal.int_gl);
 229        hns3_set_vector_coalesce_rx_gl(tqp_vector,
 230                                       tqp_vector->rx_group.coal.int_gl);
 231        hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
 232}
 233
 234static int hns3_nic_set_real_num_queue(struct net_device *netdev)
 235{
 236        struct hnae3_handle *h = hns3_get_handle(netdev);
 237        struct hnae3_knic_private_info *kinfo = &h->kinfo;
 238        unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
 239        int ret;
 240
 241        ret = netif_set_real_num_tx_queues(netdev, queue_size);
 242        if (ret) {
 243                netdev_err(netdev,
 244                           "netif_set_real_num_tx_queues fail, ret=%d!\n",
 245                           ret);
 246                return ret;
 247        }
 248
 249        ret = netif_set_real_num_rx_queues(netdev, queue_size);
 250        if (ret) {
 251                netdev_err(netdev,
 252                           "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
 253                return ret;
 254        }
 255
 256        return 0;
 257}
 258
 259static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
 260{
 261        u16 free_tqps, max_rss_size, max_tqps;
 262
 263        h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
 264        max_tqps = h->kinfo.num_tc * max_rss_size;
 265
 266        return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
 267}
 268
 269static int hns3_nic_net_up(struct net_device *netdev)
 270{
 271        struct hns3_nic_priv *priv = netdev_priv(netdev);
 272        struct hnae3_handle *h = priv->ae_handle;
 273        int i, j;
 274        int ret;
 275
 276        /* get irq resource for all vectors */
 277        ret = hns3_nic_init_irq(priv);
 278        if (ret) {
 279                netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
 280                return ret;
 281        }
 282
 283        /* enable the vectors */
 284        for (i = 0; i < priv->vector_num; i++)
 285                hns3_vector_enable(&priv->tqp_vector[i]);
 286
 287        /* start the ae_dev */
 288        ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
 289        if (ret)
 290                goto out_start_err;
 291
 292        clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
 293
 294        return 0;
 295
 296out_start_err:
 297        for (j = i - 1; j >= 0; j--)
 298                hns3_vector_disable(&priv->tqp_vector[j]);
 299
 300        hns3_nic_uninit_irq(priv);
 301
 302        return ret;
 303}
 304
 305static int hns3_nic_net_open(struct net_device *netdev)
 306{
 307        struct hns3_nic_priv *priv = netdev_priv(netdev);
 308        int ret;
 309
 310        netif_carrier_off(netdev);
 311
 312        ret = hns3_nic_set_real_num_queue(netdev);
 313        if (ret)
 314                return ret;
 315
 316        ret = hns3_nic_net_up(netdev);
 317        if (ret) {
 318                netdev_err(netdev,
 319                           "hns net up fail, ret=%d!\n", ret);
 320                return ret;
 321        }
 322
 323        priv->ae_handle->last_reset_time = jiffies;
 324        return 0;
 325}
 326
 327static void hns3_nic_net_down(struct net_device *netdev)
 328{
 329        struct hns3_nic_priv *priv = netdev_priv(netdev);
 330        const struct hnae3_ae_ops *ops;
 331        int i;
 332
 333        if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
 334                return;
 335
 336        /* stop ae_dev */
 337        ops = priv->ae_handle->ae_algo->ops;
 338        if (ops->stop)
 339                ops->stop(priv->ae_handle);
 340
 341        /* disable vectors */
 342        for (i = 0; i < priv->vector_num; i++)
 343                hns3_vector_disable(&priv->tqp_vector[i]);
 344
 345        /* free irq resources */
 346        hns3_nic_uninit_irq(priv);
 347}
 348
 349static int hns3_nic_net_stop(struct net_device *netdev)
 350{
 351        netif_tx_stop_all_queues(netdev);
 352        netif_carrier_off(netdev);
 353
 354        hns3_nic_net_down(netdev);
 355
 356        return 0;
 357}
 358
 359static int hns3_nic_uc_sync(struct net_device *netdev,
 360                            const unsigned char *addr)
 361{
 362        struct hnae3_handle *h = hns3_get_handle(netdev);
 363
 364        if (h->ae_algo->ops->add_uc_addr)
 365                return h->ae_algo->ops->add_uc_addr(h, addr);
 366
 367        return 0;
 368}
 369
 370static int hns3_nic_uc_unsync(struct net_device *netdev,
 371                              const unsigned char *addr)
 372{
 373        struct hnae3_handle *h = hns3_get_handle(netdev);
 374
 375        if (h->ae_algo->ops->rm_uc_addr)
 376                return h->ae_algo->ops->rm_uc_addr(h, addr);
 377
 378        return 0;
 379}
 380
 381static int hns3_nic_mc_sync(struct net_device *netdev,
 382                            const unsigned char *addr)
 383{
 384        struct hnae3_handle *h = hns3_get_handle(netdev);
 385
 386        if (h->ae_algo->ops->add_mc_addr)
 387                return h->ae_algo->ops->add_mc_addr(h, addr);
 388
 389        return 0;
 390}
 391
 392static int hns3_nic_mc_unsync(struct net_device *netdev,
 393                              const unsigned char *addr)
 394{
 395        struct hnae3_handle *h = hns3_get_handle(netdev);
 396
 397        if (h->ae_algo->ops->rm_mc_addr)
 398                return h->ae_algo->ops->rm_mc_addr(h, addr);
 399
 400        return 0;
 401}
 402
 403static void hns3_nic_set_rx_mode(struct net_device *netdev)
 404{
 405        struct hnae3_handle *h = hns3_get_handle(netdev);
 406
 407        if (h->ae_algo->ops->set_promisc_mode) {
 408                if (netdev->flags & IFF_PROMISC)
 409                        h->ae_algo->ops->set_promisc_mode(h, 1);
 410                else
 411                        h->ae_algo->ops->set_promisc_mode(h, 0);
 412        }
 413        if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
 414                netdev_err(netdev, "sync uc address fail\n");
 415        if (netdev->flags & IFF_MULTICAST)
 416                if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
 417                        netdev_err(netdev, "sync mc address fail\n");
 418}
 419
 420static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
 421                        u16 *mss, u32 *type_cs_vlan_tso)
 422{
 423        u32 l4_offset, hdr_len;
 424        union l3_hdr_info l3;
 425        union l4_hdr_info l4;
 426        u32 l4_paylen;
 427        int ret;
 428
 429        if (!skb_is_gso(skb))
 430                return 0;
 431
 432        ret = skb_cow_head(skb, 0);
 433        if (ret)
 434                return ret;
 435
 436        l3.hdr = skb_network_header(skb);
 437        l4.hdr = skb_transport_header(skb);
 438
 439        /* Software should clear the IPv4's checksum field when tso is
 440         * needed.
 441         */
 442        if (l3.v4->version == 4)
 443                l3.v4->check = 0;
 444
 445        /* tunnel packet.*/
 446        if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
 447                                         SKB_GSO_GRE_CSUM |
 448                                         SKB_GSO_UDP_TUNNEL |
 449                                         SKB_GSO_UDP_TUNNEL_CSUM)) {
 450                if ((!(skb_shinfo(skb)->gso_type &
 451                    SKB_GSO_PARTIAL)) &&
 452                    (skb_shinfo(skb)->gso_type &
 453                    SKB_GSO_UDP_TUNNEL_CSUM)) {
 454                        /* Software should clear the udp's checksum
 455                         * field when tso is needed.
 456                         */
 457                        l4.udp->check = 0;
 458                }
 459                /* reset l3&l4 pointers from outer to inner headers */
 460                l3.hdr = skb_inner_network_header(skb);
 461                l4.hdr = skb_inner_transport_header(skb);
 462
 463                /* Software should clear the IPv4's checksum field when
 464                 * tso is needed.
 465                 */
 466                if (l3.v4->version == 4)
 467                        l3.v4->check = 0;
 468        }
 469
 470        /* normal or tunnel packet*/
 471        l4_offset = l4.hdr - skb->data;
 472        hdr_len = (l4.tcp->doff * 4) + l4_offset;
 473
 474        /* remove payload length from inner pseudo checksum when tso*/
 475        l4_paylen = skb->len - l4_offset;
 476        csum_replace_by_diff(&l4.tcp->check,
 477                             (__force __wsum)htonl(l4_paylen));
 478
 479        /* find the txbd field values */
 480        *paylen = skb->len - hdr_len;
 481        hnae_set_bit(*type_cs_vlan_tso,
 482                     HNS3_TXD_TSO_B, 1);
 483
 484        /* get MSS for TSO */
 485        *mss = skb_shinfo(skb)->gso_size;
 486
 487        return 0;
 488}
 489
 490static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
 491                                u8 *il4_proto)
 492{
 493        union {
 494                struct iphdr *v4;
 495                struct ipv6hdr *v6;
 496                unsigned char *hdr;
 497        } l3;
 498        unsigned char *l4_hdr;
 499        unsigned char *exthdr;
 500        u8 l4_proto_tmp;
 501        __be16 frag_off;
 502
 503        /* find outer header point */
 504        l3.hdr = skb_network_header(skb);
 505        l4_hdr = skb_inner_transport_header(skb);
 506
 507        if (skb->protocol == htons(ETH_P_IPV6)) {
 508                exthdr = l3.hdr + sizeof(*l3.v6);
 509                l4_proto_tmp = l3.v6->nexthdr;
 510                if (l4_hdr != exthdr)
 511                        ipv6_skip_exthdr(skb, exthdr - skb->data,
 512                                         &l4_proto_tmp, &frag_off);
 513        } else if (skb->protocol == htons(ETH_P_IP)) {
 514                l4_proto_tmp = l3.v4->protocol;
 515        } else {
 516                return -EINVAL;
 517        }
 518
 519        *ol4_proto = l4_proto_tmp;
 520
 521        /* tunnel packet */
 522        if (!skb->encapsulation) {
 523                *il4_proto = 0;
 524                return 0;
 525        }
 526
 527        /* find inner header point */
 528        l3.hdr = skb_inner_network_header(skb);
 529        l4_hdr = skb_inner_transport_header(skb);
 530
 531        if (l3.v6->version == 6) {
 532                exthdr = l3.hdr + sizeof(*l3.v6);
 533                l4_proto_tmp = l3.v6->nexthdr;
 534                if (l4_hdr != exthdr)
 535                        ipv6_skip_exthdr(skb, exthdr - skb->data,
 536                                         &l4_proto_tmp, &frag_off);
 537        } else if (l3.v4->version == 4) {
 538                l4_proto_tmp = l3.v4->protocol;
 539        }
 540
 541        *il4_proto = l4_proto_tmp;
 542
 543        return 0;
 544}
 545
 546static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
 547                                u8 il4_proto, u32 *type_cs_vlan_tso,
 548                                u32 *ol_type_vlan_len_msec)
 549{
 550        union {
 551                struct iphdr *v4;
 552                struct ipv6hdr *v6;
 553                unsigned char *hdr;
 554        } l3;
 555        union {
 556                struct tcphdr *tcp;
 557                struct udphdr *udp;
 558                struct gre_base_hdr *gre;
 559                unsigned char *hdr;
 560        } l4;
 561        unsigned char *l2_hdr;
 562        u8 l4_proto = ol4_proto;
 563        u32 ol2_len;
 564        u32 ol3_len;
 565        u32 ol4_len;
 566        u32 l2_len;
 567        u32 l3_len;
 568
 569        l3.hdr = skb_network_header(skb);
 570        l4.hdr = skb_transport_header(skb);
 571
 572        /* compute L2 header size for normal packet, defined in 2 Bytes */
 573        l2_len = l3.hdr - skb->data;
 574        hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
 575                       HNS3_TXD_L2LEN_S, l2_len >> 1);
 576
 577        /* tunnel packet*/
 578        if (skb->encapsulation) {
 579                /* compute OL2 header size, defined in 2 Bytes */
 580                ol2_len = l2_len;
 581                hnae_set_field(*ol_type_vlan_len_msec,
 582                               HNS3_TXD_L2LEN_M,
 583                               HNS3_TXD_L2LEN_S, ol2_len >> 1);
 584
 585                /* compute OL3 header size, defined in 4 Bytes */
 586                ol3_len = l4.hdr - l3.hdr;
 587                hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
 588                               HNS3_TXD_L3LEN_S, ol3_len >> 2);
 589
 590                /* MAC in UDP, MAC in GRE (0x6558)*/
 591                if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
 592                        /* switch MAC header ptr from outer to inner header.*/
 593                        l2_hdr = skb_inner_mac_header(skb);
 594
 595                        /* compute OL4 header size, defined in 4 Bytes. */
 596                        ol4_len = l2_hdr - l4.hdr;
 597                        hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
 598                                       HNS3_TXD_L4LEN_S, ol4_len >> 2);
 599
 600                        /* switch IP header ptr from outer to inner header */
 601                        l3.hdr = skb_inner_network_header(skb);
 602
 603                        /* compute inner l2 header size, defined in 2 Bytes. */
 604                        l2_len = l3.hdr - l2_hdr;
 605                        hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
 606                                       HNS3_TXD_L2LEN_S, l2_len >> 1);
 607                } else {
 608                        /* skb packet types not supported by hardware,
 609                         * txbd len fild doesn't be filled.
 610                         */
 611                        return;
 612                }
 613
 614                /* switch L4 header pointer from outer to inner */
 615                l4.hdr = skb_inner_transport_header(skb);
 616
 617                l4_proto = il4_proto;
 618        }
 619
 620        /* compute inner(/normal) L3 header size, defined in 4 Bytes */
 621        l3_len = l4.hdr - l3.hdr;
 622        hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
 623                       HNS3_TXD_L3LEN_S, l3_len >> 2);
 624
 625        /* compute inner(/normal) L4 header size, defined in 4 Bytes */
 626        switch (l4_proto) {
 627        case IPPROTO_TCP:
 628                hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
 629                               HNS3_TXD_L4LEN_S, l4.tcp->doff);
 630                break;
 631        case IPPROTO_SCTP:
 632                hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
 633                               HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
 634                break;
 635        case IPPROTO_UDP:
 636                hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
 637                               HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
 638                break;
 639        default:
 640                /* skb packet types not supported by hardware,
 641                 * txbd len fild doesn't be filled.
 642                 */
 643                return;
 644        }
 645}
 646
 647static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
 648                                   u8 il4_proto, u32 *type_cs_vlan_tso,
 649                                   u32 *ol_type_vlan_len_msec)
 650{
 651        union {
 652                struct iphdr *v4;
 653                struct ipv6hdr *v6;
 654                unsigned char *hdr;
 655        } l3;
 656        u32 l4_proto = ol4_proto;
 657
 658        l3.hdr = skb_network_header(skb);
 659
 660        /* define OL3 type and tunnel type(OL4).*/
 661        if (skb->encapsulation) {
 662                /* define outer network header type.*/
 663                if (skb->protocol == htons(ETH_P_IP)) {
 664                        if (skb_is_gso(skb))
 665                                hnae_set_field(*ol_type_vlan_len_msec,
 666                                               HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
 667                                               HNS3_OL3T_IPV4_CSUM);
 668                        else
 669                                hnae_set_field(*ol_type_vlan_len_msec,
 670                                               HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
 671                                               HNS3_OL3T_IPV4_NO_CSUM);
 672
 673                } else if (skb->protocol == htons(ETH_P_IPV6)) {
 674                        hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
 675                                       HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
 676                }
 677
 678                /* define tunnel type(OL4).*/
 679                switch (l4_proto) {
 680                case IPPROTO_UDP:
 681                        hnae_set_field(*ol_type_vlan_len_msec,
 682                                       HNS3_TXD_TUNTYPE_M,
 683                                       HNS3_TXD_TUNTYPE_S,
 684                                       HNS3_TUN_MAC_IN_UDP);
 685                        break;
 686                case IPPROTO_GRE:
 687                        hnae_set_field(*ol_type_vlan_len_msec,
 688                                       HNS3_TXD_TUNTYPE_M,
 689                                       HNS3_TXD_TUNTYPE_S,
 690                                       HNS3_TUN_NVGRE);
 691                        break;
 692                default:
 693                        /* drop the skb tunnel packet if hardware don't support,
 694                         * because hardware can't calculate csum when TSO.
 695                         */
 696                        if (skb_is_gso(skb))
 697                                return -EDOM;
 698
 699                        /* the stack computes the IP header already,
 700                         * driver calculate l4 checksum when not TSO.
 701                         */
 702                        skb_checksum_help(skb);
 703                        return 0;
 704                }
 705
 706                l3.hdr = skb_inner_network_header(skb);
 707                l4_proto = il4_proto;
 708        }
 709
 710        if (l3.v4->version == 4) {
 711                hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
 712                               HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
 713
 714                /* the stack computes the IP header already, the only time we
 715                 * need the hardware to recompute it is in the case of TSO.
 716                 */
 717                if (skb_is_gso(skb))
 718                        hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
 719
 720                hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
 721        } else if (l3.v6->version == 6) {
 722                hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
 723                               HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
 724                hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
 725        }
 726
 727        switch (l4_proto) {
 728        case IPPROTO_TCP:
 729                hnae_set_field(*type_cs_vlan_tso,
 730                               HNS3_TXD_L4T_M,
 731                               HNS3_TXD_L4T_S,
 732                               HNS3_L4T_TCP);
 733                break;
 734        case IPPROTO_UDP:
 735                hnae_set_field(*type_cs_vlan_tso,
 736                               HNS3_TXD_L4T_M,
 737                               HNS3_TXD_L4T_S,
 738                               HNS3_L4T_UDP);
 739                break;
 740        case IPPROTO_SCTP:
 741                hnae_set_field(*type_cs_vlan_tso,
 742                               HNS3_TXD_L4T_M,
 743                               HNS3_TXD_L4T_S,
 744                               HNS3_L4T_SCTP);
 745                break;
 746        default:
 747                /* drop the skb tunnel packet if hardware don't support,
 748                 * because hardware can't calculate csum when TSO.
 749                 */
 750                if (skb_is_gso(skb))
 751                        return -EDOM;
 752
 753                /* the stack computes the IP header already,
 754                 * driver calculate l4 checksum when not TSO.
 755                 */
 756                skb_checksum_help(skb);
 757                return 0;
 758        }
 759
 760        return 0;
 761}
 762
 763static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
 764{
 765        /* Config bd buffer end */
 766        hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
 767                       HNS3_TXD_BDTYPE_S, 0);
 768        hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
 769        hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
 770        hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
 771}
 772
 773static int hns3_fill_desc_vtags(struct sk_buff *skb,
 774                                struct hns3_enet_ring *tx_ring,
 775                                u32 *inner_vlan_flag,
 776                                u32 *out_vlan_flag,
 777                                u16 *inner_vtag,
 778                                u16 *out_vtag)
 779{
 780#define HNS3_TX_VLAN_PRIO_SHIFT 13
 781
 782        if (skb->protocol == htons(ETH_P_8021Q) &&
 783            !(tx_ring->tqp->handle->kinfo.netdev->features &
 784            NETIF_F_HW_VLAN_CTAG_TX)) {
 785                /* When HW VLAN acceleration is turned off, and the stack
 786                 * sets the protocol to 802.1q, the driver just need to
 787                 * set the protocol to the encapsulated ethertype.
 788                 */
 789                skb->protocol = vlan_get_protocol(skb);
 790                return 0;
 791        }
 792
 793        if (skb_vlan_tag_present(skb)) {
 794                u16 vlan_tag;
 795
 796                vlan_tag = skb_vlan_tag_get(skb);
 797                vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
 798
 799                /* Based on hw strategy, use out_vtag in two layer tag case,
 800                 * and use inner_vtag in one tag case.
 801                 */
 802                if (skb->protocol == htons(ETH_P_8021Q)) {
 803                        hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
 804                        *out_vtag = vlan_tag;
 805                } else {
 806                        hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
 807                        *inner_vtag = vlan_tag;
 808                }
 809        } else if (skb->protocol == htons(ETH_P_8021Q)) {
 810                struct vlan_ethhdr *vhdr;
 811                int rc;
 812
 813                rc = skb_cow_head(skb, 0);
 814                if (rc < 0)
 815                        return rc;
 816                vhdr = (struct vlan_ethhdr *)skb->data;
 817                vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
 818                                        << HNS3_TX_VLAN_PRIO_SHIFT);
 819        }
 820
 821        skb->protocol = vlan_get_protocol(skb);
 822        return 0;
 823}
 824
 825static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 826                          int size, dma_addr_t dma, int frag_end,
 827                          enum hns_desc_type type)
 828{
 829        struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
 830        struct hns3_desc *desc = &ring->desc[ring->next_to_use];
 831        u32 ol_type_vlan_len_msec = 0;
 832        u16 bdtp_fe_sc_vld_ra_ri = 0;
 833        u32 type_cs_vlan_tso = 0;
 834        struct sk_buff *skb;
 835        u16 inner_vtag = 0;
 836        u16 out_vtag = 0;
 837        u32 paylen = 0;
 838        u16 mss = 0;
 839        __be16 protocol;
 840        u8 ol4_proto;
 841        u8 il4_proto;
 842        int ret;
 843
 844        /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
 845        desc_cb->priv = priv;
 846        desc_cb->length = size;
 847        desc_cb->dma = dma;
 848        desc_cb->type = type;
 849
 850        /* now, fill the descriptor */
 851        desc->addr = cpu_to_le64(dma);
 852        desc->tx.send_size = cpu_to_le16((u16)size);
 853        hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
 854        desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
 855
 856        if (type == DESC_TYPE_SKB) {
 857                skb = (struct sk_buff *)priv;
 858                paylen = skb->len;
 859
 860                ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
 861                                           &ol_type_vlan_len_msec,
 862                                           &inner_vtag, &out_vtag);
 863                if (unlikely(ret))
 864                        return ret;
 865
 866                if (skb->ip_summed == CHECKSUM_PARTIAL) {
 867                        skb_reset_mac_len(skb);
 868                        protocol = skb->protocol;
 869
 870                        ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
 871                        if (ret)
 872                                return ret;
 873                        hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
 874                                            &type_cs_vlan_tso,
 875                                            &ol_type_vlan_len_msec);
 876                        ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
 877                                                      &type_cs_vlan_tso,
 878                                                      &ol_type_vlan_len_msec);
 879                        if (ret)
 880                                return ret;
 881
 882                        ret = hns3_set_tso(skb, &paylen, &mss,
 883                                           &type_cs_vlan_tso);
 884                        if (ret)
 885                                return ret;
 886                }
 887
 888                /* Set txbd */
 889                desc->tx.ol_type_vlan_len_msec =
 890                        cpu_to_le32(ol_type_vlan_len_msec);
 891                desc->tx.type_cs_vlan_tso_len =
 892                        cpu_to_le32(type_cs_vlan_tso);
 893                desc->tx.paylen = cpu_to_le32(paylen);
 894                desc->tx.mss = cpu_to_le16(mss);
 895                desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
 896                desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
 897        }
 898
 899        /* move ring pointer to next.*/
 900        ring_ptr_move_fw(ring, next_to_use);
 901
 902        return 0;
 903}
 904
 905static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
 906                              int size, dma_addr_t dma, int frag_end,
 907                              enum hns_desc_type type)
 908{
 909        unsigned int frag_buf_num;
 910        unsigned int k;
 911        int sizeoflast;
 912        int ret;
 913
 914        frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
 915        sizeoflast = size % HNS3_MAX_BD_SIZE;
 916        sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
 917
 918        /* When the frag size is bigger than hardware, split this frag */
 919        for (k = 0; k < frag_buf_num; k++) {
 920                ret = hns3_fill_desc(ring, priv,
 921                                     (k == frag_buf_num - 1) ?
 922                                sizeoflast : HNS3_MAX_BD_SIZE,
 923                                dma + HNS3_MAX_BD_SIZE * k,
 924                                frag_end && (k == frag_buf_num - 1) ? 1 : 0,
 925                                (type == DESC_TYPE_SKB && !k) ?
 926                                        DESC_TYPE_SKB : DESC_TYPE_PAGE);
 927                if (ret)
 928                        return ret;
 929        }
 930
 931        return 0;
 932}
 933
 934static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
 935                                   struct hns3_enet_ring *ring)
 936{
 937        struct sk_buff *skb = *out_skb;
 938        struct skb_frag_struct *frag;
 939        int bdnum_for_frag;
 940        int frag_num;
 941        int buf_num;
 942        int size;
 943        int i;
 944
 945        size = skb_headlen(skb);
 946        buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
 947
 948        frag_num = skb_shinfo(skb)->nr_frags;
 949        for (i = 0; i < frag_num; i++) {
 950                frag = &skb_shinfo(skb)->frags[i];
 951                size = skb_frag_size(frag);
 952                bdnum_for_frag =
 953                        (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
 954                if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
 955                        return -ENOMEM;
 956
 957                buf_num += bdnum_for_frag;
 958        }
 959
 960        if (buf_num > ring_space(ring))
 961                return -EBUSY;
 962
 963        *bnum = buf_num;
 964        return 0;
 965}
 966
 967static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
 968                                  struct hns3_enet_ring *ring)
 969{
 970        struct sk_buff *skb = *out_skb;
 971        int buf_num;
 972
 973        /* No. of segments (plus a header) */
 974        buf_num = skb_shinfo(skb)->nr_frags + 1;
 975
 976        if (buf_num > ring_space(ring))
 977                return -EBUSY;
 978
 979        *bnum = buf_num;
 980
 981        return 0;
 982}
 983
 984static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
 985{
 986        struct device *dev = ring_to_dev(ring);
 987        unsigned int i;
 988
 989        for (i = 0; i < ring->desc_num; i++) {
 990                /* check if this is where we started */
 991                if (ring->next_to_use == next_to_use_orig)
 992                        break;
 993
 994                /* unmap the descriptor dma address */
 995                if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
 996                        dma_unmap_single(dev,
 997                                         ring->desc_cb[ring->next_to_use].dma,
 998                                        ring->desc_cb[ring->next_to_use].length,
 999                                        DMA_TO_DEVICE);
1000                else
1001                        dma_unmap_page(dev,
1002                                       ring->desc_cb[ring->next_to_use].dma,
1003                                       ring->desc_cb[ring->next_to_use].length,
1004                                       DMA_TO_DEVICE);
1005
1006                /* rollback one */
1007                ring_ptr_move_bw(ring, next_to_use);
1008        }
1009}
1010
1011netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1012{
1013        struct hns3_nic_priv *priv = netdev_priv(netdev);
1014        struct hns3_nic_ring_data *ring_data =
1015                &tx_ring_data(priv, skb->queue_mapping);
1016        struct hns3_enet_ring *ring = ring_data->ring;
1017        struct device *dev = priv->dev;
1018        struct netdev_queue *dev_queue;
1019        struct skb_frag_struct *frag;
1020        int next_to_use_head;
1021        int next_to_use_frag;
1022        dma_addr_t dma;
1023        int buf_num;
1024        int seg_num;
1025        int size;
1026        int ret;
1027        int i;
1028
1029        /* Prefetch the data used later */
1030        prefetch(skb->data);
1031
1032        switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1033        case -EBUSY:
1034                u64_stats_update_begin(&ring->syncp);
1035                ring->stats.tx_busy++;
1036                u64_stats_update_end(&ring->syncp);
1037
1038                goto out_net_tx_busy;
1039        case -ENOMEM:
1040                u64_stats_update_begin(&ring->syncp);
1041                ring->stats.sw_err_cnt++;
1042                u64_stats_update_end(&ring->syncp);
1043                netdev_err(netdev, "no memory to xmit!\n");
1044
1045                goto out_err_tx_ok;
1046        default:
1047                break;
1048        }
1049
1050        /* No. of segments (plus a header) */
1051        seg_num = skb_shinfo(skb)->nr_frags + 1;
1052        /* Fill the first part */
1053        size = skb_headlen(skb);
1054
1055        next_to_use_head = ring->next_to_use;
1056
1057        dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1058        if (dma_mapping_error(dev, dma)) {
1059                netdev_err(netdev, "TX head DMA map failed\n");
1060                ring->stats.sw_err_cnt++;
1061                goto out_err_tx_ok;
1062        }
1063
1064        ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1065                           DESC_TYPE_SKB);
1066        if (ret)
1067                goto head_dma_map_err;
1068
1069        next_to_use_frag = ring->next_to_use;
1070        /* Fill the fragments */
1071        for (i = 1; i < seg_num; i++) {
1072                frag = &skb_shinfo(skb)->frags[i - 1];
1073                size = skb_frag_size(frag);
1074                dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1075                if (dma_mapping_error(dev, dma)) {
1076                        netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
1077                        ring->stats.sw_err_cnt++;
1078                        goto frag_dma_map_err;
1079                }
1080                ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1081                                    seg_num - 1 == i ? 1 : 0,
1082                                    DESC_TYPE_PAGE);
1083
1084                if (ret)
1085                        goto frag_dma_map_err;
1086        }
1087
1088        /* Complete translate all packets */
1089        dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1090        netdev_tx_sent_queue(dev_queue, skb->len);
1091
1092        wmb(); /* Commit all data before submit */
1093
1094        hnae_queue_xmit(ring->tqp, buf_num);
1095
1096        return NETDEV_TX_OK;
1097
1098frag_dma_map_err:
1099        hns_nic_dma_unmap(ring, next_to_use_frag);
1100
1101head_dma_map_err:
1102        hns_nic_dma_unmap(ring, next_to_use_head);
1103
1104out_err_tx_ok:
1105        dev_kfree_skb_any(skb);
1106        return NETDEV_TX_OK;
1107
1108out_net_tx_busy:
1109        netif_stop_subqueue(netdev, ring_data->queue_index);
1110        smp_mb(); /* Commit all data before submit */
1111
1112        return NETDEV_TX_BUSY;
1113}
1114
1115static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1116{
1117        struct hnae3_handle *h = hns3_get_handle(netdev);
1118        struct sockaddr *mac_addr = p;
1119        int ret;
1120
1121        if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1122                return -EADDRNOTAVAIL;
1123
1124        ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1125        if (ret) {
1126                netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1127                return ret;
1128        }
1129
1130        ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1131
1132        return 0;
1133}
1134
1135static int hns3_nic_set_features(struct net_device *netdev,
1136                                 netdev_features_t features)
1137{
1138        netdev_features_t changed = netdev->features ^ features;
1139        struct hns3_nic_priv *priv = netdev_priv(netdev);
1140        struct hnae3_handle *h = priv->ae_handle;
1141        int ret;
1142
1143        if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1144                if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1145                        priv->ops.fill_desc = hns3_fill_desc_tso;
1146                        priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1147                } else {
1148                        priv->ops.fill_desc = hns3_fill_desc;
1149                        priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1150                }
1151        }
1152
1153        if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1154            h->ae_algo->ops->enable_vlan_filter) {
1155                if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1156                        h->ae_algo->ops->enable_vlan_filter(h, true);
1157                else
1158                        h->ae_algo->ops->enable_vlan_filter(h, false);
1159        }
1160
1161        if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1162            h->ae_algo->ops->enable_hw_strip_rxvtag) {
1163                if (features & NETIF_F_HW_VLAN_CTAG_RX)
1164                        ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1165                else
1166                        ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1167
1168                if (ret)
1169                        return ret;
1170        }
1171
1172        netdev->features = features;
1173        return 0;
1174}
1175
1176static void hns3_nic_get_stats64(struct net_device *netdev,
1177                                 struct rtnl_link_stats64 *stats)
1178{
1179        struct hns3_nic_priv *priv = netdev_priv(netdev);
1180        int queue_num = priv->ae_handle->kinfo.num_tqps;
1181        struct hnae3_handle *handle = priv->ae_handle;
1182        struct hns3_enet_ring *ring;
1183        unsigned int start;
1184        unsigned int idx;
1185        u64 tx_bytes = 0;
1186        u64 rx_bytes = 0;
1187        u64 tx_pkts = 0;
1188        u64 rx_pkts = 0;
1189        u64 tx_drop = 0;
1190        u64 rx_drop = 0;
1191
1192        if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1193                return;
1194
1195        handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1196
1197        for (idx = 0; idx < queue_num; idx++) {
1198                /* fetch the tx stats */
1199                ring = priv->ring_data[idx].ring;
1200                do {
1201                        start = u64_stats_fetch_begin_irq(&ring->syncp);
1202                        tx_bytes += ring->stats.tx_bytes;
1203                        tx_pkts += ring->stats.tx_pkts;
1204                        tx_drop += ring->stats.tx_busy;
1205                        tx_drop += ring->stats.sw_err_cnt;
1206                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1207
1208                /* fetch the rx stats */
1209                ring = priv->ring_data[idx + queue_num].ring;
1210                do {
1211                        start = u64_stats_fetch_begin_irq(&ring->syncp);
1212                        rx_bytes += ring->stats.rx_bytes;
1213                        rx_pkts += ring->stats.rx_pkts;
1214                        rx_drop += ring->stats.non_vld_descs;
1215                        rx_drop += ring->stats.err_pkt_len;
1216                        rx_drop += ring->stats.l2_err;
1217                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1218        }
1219
1220        stats->tx_bytes = tx_bytes;
1221        stats->tx_packets = tx_pkts;
1222        stats->rx_bytes = rx_bytes;
1223        stats->rx_packets = rx_pkts;
1224
1225        stats->rx_errors = netdev->stats.rx_errors;
1226        stats->multicast = netdev->stats.multicast;
1227        stats->rx_length_errors = netdev->stats.rx_length_errors;
1228        stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1229        stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1230
1231        stats->tx_errors = netdev->stats.tx_errors;
1232        stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1233        stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1234        stats->collisions = netdev->stats.collisions;
1235        stats->rx_over_errors = netdev->stats.rx_over_errors;
1236        stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1237        stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1238        stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1239        stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1240        stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1241        stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1242        stats->tx_window_errors = netdev->stats.tx_window_errors;
1243        stats->rx_compressed = netdev->stats.rx_compressed;
1244        stats->tx_compressed = netdev->stats.tx_compressed;
1245}
1246
1247static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1248                                 enum hns3_udp_tnl_type type)
1249{
1250        struct hns3_nic_priv *priv = netdev_priv(netdev);
1251        struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1252        struct hnae3_handle *h = priv->ae_handle;
1253
1254        if (udp_tnl->used && udp_tnl->dst_port == port) {
1255                udp_tnl->used++;
1256                return;
1257        }
1258
1259        if (udp_tnl->used) {
1260                netdev_warn(netdev,
1261                            "UDP tunnel [%d], port [%d] offload\n", type, port);
1262                return;
1263        }
1264
1265        udp_tnl->dst_port = port;
1266        udp_tnl->used = 1;
1267        /* TBD send command to hardware to add port */
1268        if (h->ae_algo->ops->add_tunnel_udp)
1269                h->ae_algo->ops->add_tunnel_udp(h, port);
1270}
1271
1272static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1273                                 enum hns3_udp_tnl_type type)
1274{
1275        struct hns3_nic_priv *priv = netdev_priv(netdev);
1276        struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1277        struct hnae3_handle *h = priv->ae_handle;
1278
1279        if (!udp_tnl->used || udp_tnl->dst_port != port) {
1280                netdev_warn(netdev,
1281                            "Invalid UDP tunnel port %d\n", port);
1282                return;
1283        }
1284
1285        udp_tnl->used--;
1286        if (udp_tnl->used)
1287                return;
1288
1289        udp_tnl->dst_port = 0;
1290        /* TBD send command to hardware to del port  */
1291        if (h->ae_algo->ops->del_tunnel_udp)
1292                h->ae_algo->ops->del_tunnel_udp(h, port);
1293}
1294
1295/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1296 * @netdev: This physical ports's netdev
1297 * @ti: Tunnel information
1298 */
1299static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1300                                    struct udp_tunnel_info *ti)
1301{
1302        u16 port_n = ntohs(ti->port);
1303
1304        switch (ti->type) {
1305        case UDP_TUNNEL_TYPE_VXLAN:
1306                hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1307                break;
1308        case UDP_TUNNEL_TYPE_GENEVE:
1309                hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1310                break;
1311        default:
1312                netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1313                break;
1314        }
1315}
1316
1317static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1318                                    struct udp_tunnel_info *ti)
1319{
1320        u16 port_n = ntohs(ti->port);
1321
1322        switch (ti->type) {
1323        case UDP_TUNNEL_TYPE_VXLAN:
1324                hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1325                break;
1326        case UDP_TUNNEL_TYPE_GENEVE:
1327                hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1328                break;
1329        default:
1330                break;
1331        }
1332}
1333
1334static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1335{
1336        struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1337        struct hnae3_handle *h = hns3_get_handle(netdev);
1338        struct hnae3_knic_private_info *kinfo = &h->kinfo;
1339        u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1340        u8 tc = mqprio_qopt->qopt.num_tc;
1341        u16 mode = mqprio_qopt->mode;
1342        u8 hw = mqprio_qopt->qopt.hw;
1343        bool if_running;
1344        unsigned int i;
1345        int ret;
1346
1347        if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1348               mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1349                return -EOPNOTSUPP;
1350
1351        if (tc > HNAE3_MAX_TC)
1352                return -EINVAL;
1353
1354        if (!netdev)
1355                return -EINVAL;
1356
1357        if_running = netif_running(netdev);
1358        if (if_running) {
1359                hns3_nic_net_stop(netdev);
1360                msleep(100);
1361        }
1362
1363        ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1364                kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1365        if (ret)
1366                goto out;
1367
1368        if (tc <= 1) {
1369                netdev_reset_tc(netdev);
1370        } else {
1371                ret = netdev_set_num_tc(netdev, tc);
1372                if (ret)
1373                        goto out;
1374
1375                for (i = 0; i < HNAE3_MAX_TC; i++) {
1376                        if (!kinfo->tc_info[i].enable)
1377                                continue;
1378
1379                        netdev_set_tc_queue(netdev,
1380                                            kinfo->tc_info[i].tc,
1381                                            kinfo->tc_info[i].tqp_count,
1382                                            kinfo->tc_info[i].tqp_offset);
1383                }
1384        }
1385
1386        ret = hns3_nic_set_real_num_queue(netdev);
1387
1388out:
1389        if (if_running)
1390                hns3_nic_net_open(netdev);
1391
1392        return ret;
1393}
1394
1395static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1396                             void *type_data)
1397{
1398        if (type != TC_SETUP_QDISC_MQPRIO)
1399                return -EOPNOTSUPP;
1400
1401        return hns3_setup_tc(dev, type_data);
1402}
1403
1404static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1405                                __be16 proto, u16 vid)
1406{
1407        struct hnae3_handle *h = hns3_get_handle(netdev);
1408        struct hns3_nic_priv *priv = netdev_priv(netdev);
1409        int ret = -EIO;
1410
1411        if (h->ae_algo->ops->set_vlan_filter)
1412                ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1413
1414        if (!ret)
1415                set_bit(vid, priv->active_vlans);
1416
1417        return ret;
1418}
1419
1420static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1421                                 __be16 proto, u16 vid)
1422{
1423        struct hnae3_handle *h = hns3_get_handle(netdev);
1424        struct hns3_nic_priv *priv = netdev_priv(netdev);
1425        int ret = -EIO;
1426
1427        if (h->ae_algo->ops->set_vlan_filter)
1428                ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1429
1430        if (!ret)
1431                clear_bit(vid, priv->active_vlans);
1432
1433        return ret;
1434}
1435
1436static void hns3_restore_vlan(struct net_device *netdev)
1437{
1438        struct hns3_nic_priv *priv = netdev_priv(netdev);
1439        u16 vid;
1440        int ret;
1441
1442        for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1443                ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1444                if (ret)
1445                        netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
1446                                    vid, ret);
1447        }
1448}
1449
1450static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1451                                u8 qos, __be16 vlan_proto)
1452{
1453        struct hnae3_handle *h = hns3_get_handle(netdev);
1454        int ret = -EIO;
1455
1456        if (h->ae_algo->ops->set_vf_vlan_filter)
1457                ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1458                                                   qos, vlan_proto);
1459
1460        return ret;
1461}
1462
1463static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1464{
1465        struct hnae3_handle *h = hns3_get_handle(netdev);
1466        bool if_running = netif_running(netdev);
1467        int ret;
1468
1469        if (!h->ae_algo->ops->set_mtu)
1470                return -EOPNOTSUPP;
1471
1472        /* if this was called with netdev up then bring netdevice down */
1473        if (if_running) {
1474                (void)hns3_nic_net_stop(netdev);
1475                msleep(100);
1476        }
1477
1478        ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1479        if (ret) {
1480                netdev_err(netdev, "failed to change MTU in hardware %d\n",
1481                           ret);
1482                return ret;
1483        }
1484
1485        netdev->mtu = new_mtu;
1486
1487        /* if the netdev was running earlier, bring it up again */
1488        if (if_running && hns3_nic_net_open(netdev))
1489                ret = -EINVAL;
1490
1491        return ret;
1492}
1493
1494static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1495{
1496        struct hns3_nic_priv *priv = netdev_priv(ndev);
1497        struct hns3_enet_ring *tx_ring = NULL;
1498        int timeout_queue = 0;
1499        int hw_head, hw_tail;
1500        int i;
1501
1502        /* Find the stopped queue the same way the stack does */
1503        for (i = 0; i < ndev->real_num_tx_queues; i++) {
1504                struct netdev_queue *q;
1505                unsigned long trans_start;
1506
1507                q = netdev_get_tx_queue(ndev, i);
1508                trans_start = q->trans_start;
1509                if (netif_xmit_stopped(q) &&
1510                    time_after(jiffies,
1511                               (trans_start + ndev->watchdog_timeo))) {
1512                        timeout_queue = i;
1513                        break;
1514                }
1515        }
1516
1517        if (i == ndev->num_tx_queues) {
1518                netdev_info(ndev,
1519                            "no netdev TX timeout queue found, timeout count: %llu\n",
1520                            priv->tx_timeout_count);
1521                return false;
1522        }
1523
1524        tx_ring = priv->ring_data[timeout_queue].ring;
1525
1526        hw_head = readl_relaxed(tx_ring->tqp->io_base +
1527                                HNS3_RING_TX_RING_HEAD_REG);
1528        hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1529                                HNS3_RING_TX_RING_TAIL_REG);
1530        netdev_info(ndev,
1531                    "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1532                    priv->tx_timeout_count,
1533                    timeout_queue,
1534                    tx_ring->next_to_use,
1535                    tx_ring->next_to_clean,
1536                    hw_head,
1537                    hw_tail,
1538                    readl(tx_ring->tqp_vector->mask_addr));
1539
1540        return true;
1541}
1542
1543static void hns3_nic_net_timeout(struct net_device *ndev)
1544{
1545        struct hns3_nic_priv *priv = netdev_priv(ndev);
1546        struct hnae3_handle *h = priv->ae_handle;
1547
1548        if (!hns3_get_tx_timeo_queue_info(ndev))
1549                return;
1550
1551        priv->tx_timeout_count++;
1552
1553        if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
1554                return;
1555
1556        /* request the reset */
1557        if (h->ae_algo->ops->reset_event)
1558                h->ae_algo->ops->reset_event(h);
1559}
1560
1561static const struct net_device_ops hns3_nic_netdev_ops = {
1562        .ndo_open               = hns3_nic_net_open,
1563        .ndo_stop               = hns3_nic_net_stop,
1564        .ndo_start_xmit         = hns3_nic_net_xmit,
1565        .ndo_tx_timeout         = hns3_nic_net_timeout,
1566        .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
1567        .ndo_change_mtu         = hns3_nic_change_mtu,
1568        .ndo_set_features       = hns3_nic_set_features,
1569        .ndo_get_stats64        = hns3_nic_get_stats64,
1570        .ndo_setup_tc           = hns3_nic_setup_tc,
1571        .ndo_set_rx_mode        = hns3_nic_set_rx_mode,
1572        .ndo_udp_tunnel_add     = hns3_nic_udp_tunnel_add,
1573        .ndo_udp_tunnel_del     = hns3_nic_udp_tunnel_del,
1574        .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
1575        .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
1576        .ndo_set_vf_vlan        = hns3_ndo_set_vf_vlan,
1577};
1578
1579/* hns3_probe - Device initialization routine
1580 * @pdev: PCI device information struct
1581 * @ent: entry in hns3_pci_tbl
1582 *
1583 * hns3_probe initializes a PF identified by a pci_dev structure.
1584 * The OS initialization, configuring of the PF private structure,
1585 * and a hardware reset occur.
1586 *
1587 * Returns 0 on success, negative on failure
1588 */
1589static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1590{
1591        struct hnae3_ae_dev *ae_dev;
1592        int ret;
1593
1594        ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1595                              GFP_KERNEL);
1596        if (!ae_dev) {
1597                ret = -ENOMEM;
1598                return ret;
1599        }
1600
1601        ae_dev->pdev = pdev;
1602        ae_dev->flag = ent->driver_data;
1603        ae_dev->dev_type = HNAE3_DEV_KNIC;
1604        pci_set_drvdata(pdev, ae_dev);
1605
1606        return hnae3_register_ae_dev(ae_dev);
1607}
1608
1609/* hns3_remove - Device removal routine
1610 * @pdev: PCI device information struct
1611 */
1612static void hns3_remove(struct pci_dev *pdev)
1613{
1614        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1615
1616        hnae3_unregister_ae_dev(ae_dev);
1617}
1618
1619static struct pci_driver hns3_driver = {
1620        .name     = hns3_driver_name,
1621        .id_table = hns3_pci_tbl,
1622        .probe    = hns3_probe,
1623        .remove   = hns3_remove,
1624};
1625
1626/* set default feature to hns3 */
1627static void hns3_set_default_feature(struct net_device *netdev)
1628{
1629        struct hnae3_handle *h = hns3_get_handle(netdev);
1630
1631        netdev->priv_flags |= IFF_UNICAST_FLT;
1632
1633        netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1634                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1635                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1636                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1637                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1638
1639        netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1640
1641        netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1642
1643        netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1644                NETIF_F_HW_VLAN_CTAG_FILTER |
1645                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1646                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1647                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1648                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1649                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1650
1651        netdev->vlan_features |=
1652                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1653                NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1654                NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1655                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1656                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1657
1658        netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1659                NETIF_F_HW_VLAN_CTAG_TX |
1660                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1661                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1662                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1663                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1664
1665        if (!(h->flags & HNAE3_SUPPORT_VF))
1666                netdev->hw_features |=
1667                        NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
1668}
1669
1670static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1671                             struct hns3_desc_cb *cb)
1672{
1673        unsigned int order = hnae_page_order(ring);
1674        struct page *p;
1675
1676        p = dev_alloc_pages(order);
1677        if (!p)
1678                return -ENOMEM;
1679
1680        cb->priv = p;
1681        cb->page_offset = 0;
1682        cb->reuse_flag = 0;
1683        cb->buf  = page_address(p);
1684        cb->length = hnae_page_size(ring);
1685        cb->type = DESC_TYPE_PAGE;
1686
1687        return 0;
1688}
1689
1690static void hns3_free_buffer(struct hns3_enet_ring *ring,
1691                             struct hns3_desc_cb *cb)
1692{
1693        if (cb->type == DESC_TYPE_SKB)
1694                dev_kfree_skb_any((struct sk_buff *)cb->priv);
1695        else if (!HNAE3_IS_TX_RING(ring))
1696                put_page((struct page *)cb->priv);
1697        memset(cb, 0, sizeof(*cb));
1698}
1699
1700static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1701{
1702        cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1703                               cb->length, ring_to_dma_dir(ring));
1704
1705        if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1706                return -EIO;
1707
1708        return 0;
1709}
1710
1711static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1712                              struct hns3_desc_cb *cb)
1713{
1714        if (cb->type == DESC_TYPE_SKB)
1715                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1716                                 ring_to_dma_dir(ring));
1717        else
1718                dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1719                               ring_to_dma_dir(ring));
1720}
1721
1722static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1723{
1724        hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1725        ring->desc[i].addr = 0;
1726}
1727
1728static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1729{
1730        struct hns3_desc_cb *cb = &ring->desc_cb[i];
1731
1732        if (!ring->desc_cb[i].dma)
1733                return;
1734
1735        hns3_buffer_detach(ring, i);
1736        hns3_free_buffer(ring, cb);
1737}
1738
1739static void hns3_free_buffers(struct hns3_enet_ring *ring)
1740{
1741        int i;
1742
1743        for (i = 0; i < ring->desc_num; i++)
1744                hns3_free_buffer_detach(ring, i);
1745}
1746
1747/* free desc along with its attached buffer */
1748static void hns3_free_desc(struct hns3_enet_ring *ring)
1749{
1750        hns3_free_buffers(ring);
1751
1752        dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1753                         ring->desc_num * sizeof(ring->desc[0]),
1754                         DMA_BIDIRECTIONAL);
1755        ring->desc_dma_addr = 0;
1756        kfree(ring->desc);
1757        ring->desc = NULL;
1758}
1759
1760static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1761{
1762        int size = ring->desc_num * sizeof(ring->desc[0]);
1763
1764        ring->desc = kzalloc(size, GFP_KERNEL);
1765        if (!ring->desc)
1766                return -ENOMEM;
1767
1768        ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1769                                             size, DMA_BIDIRECTIONAL);
1770        if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1771                ring->desc_dma_addr = 0;
1772                kfree(ring->desc);
1773                ring->desc = NULL;
1774                return -ENOMEM;
1775        }
1776
1777        return 0;
1778}
1779
1780static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1781                                   struct hns3_desc_cb *cb)
1782{
1783        int ret;
1784
1785        ret = hns3_alloc_buffer(ring, cb);
1786        if (ret)
1787                goto out;
1788
1789        ret = hns3_map_buffer(ring, cb);
1790        if (ret)
1791                goto out_with_buf;
1792
1793        return 0;
1794
1795out_with_buf:
1796        hns3_free_buffer(ring, cb);
1797out:
1798        return ret;
1799}
1800
1801static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1802{
1803        int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1804
1805        if (ret)
1806                return ret;
1807
1808        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1809
1810        return 0;
1811}
1812
1813/* Allocate memory for raw pkg, and map with dma */
1814static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1815{
1816        int i, j, ret;
1817
1818        for (i = 0; i < ring->desc_num; i++) {
1819                ret = hns3_alloc_buffer_attach(ring, i);
1820                if (ret)
1821                        goto out_buffer_fail;
1822        }
1823
1824        return 0;
1825
1826out_buffer_fail:
1827        for (j = i - 1; j >= 0; j--)
1828                hns3_free_buffer_detach(ring, j);
1829        return ret;
1830}
1831
1832/* detach a in-used buffer and replace with a reserved one  */
1833static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1834                                struct hns3_desc_cb *res_cb)
1835{
1836        hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1837        ring->desc_cb[i] = *res_cb;
1838        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1839}
1840
1841static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1842{
1843        ring->desc_cb[i].reuse_flag = 0;
1844        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1845                + ring->desc_cb[i].page_offset);
1846}
1847
1848static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1849                                      int *pkts)
1850{
1851        struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1852
1853        (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1854        (*bytes) += desc_cb->length;
1855        /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1856        hns3_free_buffer_detach(ring, ring->next_to_clean);
1857
1858        ring_ptr_move_fw(ring, next_to_clean);
1859}
1860
1861static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1862{
1863        int u = ring->next_to_use;
1864        int c = ring->next_to_clean;
1865
1866        if (unlikely(h > ring->desc_num))
1867                return 0;
1868
1869        return u > c ? (h > c && h <= u) : (h > c || h <= u);
1870}
1871
1872bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1873{
1874        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1875        struct netdev_queue *dev_queue;
1876        int bytes, pkts;
1877        int head;
1878
1879        head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1880        rmb(); /* Make sure head is ready before touch any data */
1881
1882        if (is_ring_empty(ring) || head == ring->next_to_clean)
1883                return true; /* no data to poll */
1884
1885        if (!is_valid_clean_head(ring, head)) {
1886                netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1887                           ring->next_to_use, ring->next_to_clean);
1888
1889                u64_stats_update_begin(&ring->syncp);
1890                ring->stats.io_err_cnt++;
1891                u64_stats_update_end(&ring->syncp);
1892                return true;
1893        }
1894
1895        bytes = 0;
1896        pkts = 0;
1897        while (head != ring->next_to_clean && budget) {
1898                hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1899                /* Issue prefetch for next Tx descriptor */
1900                prefetch(&ring->desc_cb[ring->next_to_clean]);
1901                budget--;
1902        }
1903
1904        ring->tqp_vector->tx_group.total_bytes += bytes;
1905        ring->tqp_vector->tx_group.total_packets += pkts;
1906
1907        u64_stats_update_begin(&ring->syncp);
1908        ring->stats.tx_bytes += bytes;
1909        ring->stats.tx_pkts += pkts;
1910        u64_stats_update_end(&ring->syncp);
1911
1912        dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1913        netdev_tx_completed_queue(dev_queue, pkts, bytes);
1914
1915        if (unlikely(pkts && netif_carrier_ok(netdev) &&
1916                     (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1917                /* Make sure that anybody stopping the queue after this
1918                 * sees the new next_to_clean.
1919                 */
1920                smp_mb();
1921                if (netif_tx_queue_stopped(dev_queue)) {
1922                        netif_tx_wake_queue(dev_queue);
1923                        ring->stats.restart_queue++;
1924                }
1925        }
1926
1927        return !!budget;
1928}
1929
1930static int hns3_desc_unused(struct hns3_enet_ring *ring)
1931{
1932        int ntc = ring->next_to_clean;
1933        int ntu = ring->next_to_use;
1934
1935        return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1936}
1937
1938static void
1939hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1940{
1941        struct hns3_desc_cb *desc_cb;
1942        struct hns3_desc_cb res_cbs;
1943        int i, ret;
1944
1945        for (i = 0; i < cleand_count; i++) {
1946                desc_cb = &ring->desc_cb[ring->next_to_use];
1947                if (desc_cb->reuse_flag) {
1948                        u64_stats_update_begin(&ring->syncp);
1949                        ring->stats.reuse_pg_cnt++;
1950                        u64_stats_update_end(&ring->syncp);
1951
1952                        hns3_reuse_buffer(ring, ring->next_to_use);
1953                } else {
1954                        ret = hns3_reserve_buffer_map(ring, &res_cbs);
1955                        if (ret) {
1956                                u64_stats_update_begin(&ring->syncp);
1957                                ring->stats.sw_err_cnt++;
1958                                u64_stats_update_end(&ring->syncp);
1959
1960                                netdev_err(ring->tqp->handle->kinfo.netdev,
1961                                           "hnae reserve buffer map failed.\n");
1962                                break;
1963                        }
1964                        hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1965                }
1966
1967                ring_ptr_move_fw(ring, next_to_use);
1968        }
1969
1970        wmb(); /* Make all data has been write before submit */
1971        writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1972}
1973
1974/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1975 * @data: pointer to the start of the headers
1976 * @max: total length of section to find headers in
1977 *
1978 * This function is meant to determine the length of headers that will
1979 * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
1980 * motivation of doing this is to only perform one pull for IPv4 TCP
1981 * packets so that we can do basic things like calculating the gso_size
1982 * based on the average data per packet.
1983 */
1984static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1985                                         unsigned int max_size)
1986{
1987        unsigned char *network;
1988        u8 hlen;
1989
1990        /* This should never happen, but better safe than sorry */
1991        if (max_size < ETH_HLEN)
1992                return max_size;
1993
1994        /* Initialize network frame pointer */
1995        network = data;
1996
1997        /* Set first protocol and move network header forward */
1998        network += ETH_HLEN;
1999
2000        /* Handle any vlan tag if present */
2001        if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
2002                == HNS3_RX_FLAG_VLAN_PRESENT) {
2003                if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
2004                        return max_size;
2005
2006                network += VLAN_HLEN;
2007        }
2008
2009        /* Handle L3 protocols */
2010        if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
2011                == HNS3_RX_FLAG_L3ID_IPV4) {
2012                if ((typeof(max_size))(network - data) >
2013                    (max_size - sizeof(struct iphdr)))
2014                        return max_size;
2015
2016                /* Access ihl as a u8 to avoid unaligned access on ia64 */
2017                hlen = (network[0] & 0x0F) << 2;
2018
2019                /* Verify hlen meets minimum size requirements */
2020                if (hlen < sizeof(struct iphdr))
2021                        return network - data;
2022
2023                /* Record next protocol if header is present */
2024        } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
2025                == HNS3_RX_FLAG_L3ID_IPV6) {
2026                if ((typeof(max_size))(network - data) >
2027                    (max_size - sizeof(struct ipv6hdr)))
2028                        return max_size;
2029
2030                /* Record next protocol */
2031                hlen = sizeof(struct ipv6hdr);
2032        } else {
2033                return network - data;
2034        }
2035
2036        /* Relocate pointer to start of L4 header */
2037        network += hlen;
2038
2039        /* Finally sort out TCP/UDP */
2040        if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
2041                == HNS3_RX_FLAG_L4ID_TCP) {
2042                if ((typeof(max_size))(network - data) >
2043                    (max_size - sizeof(struct tcphdr)))
2044                        return max_size;
2045
2046                /* Access doff as a u8 to avoid unaligned access on ia64 */
2047                hlen = (network[12] & 0xF0) >> 2;
2048
2049                /* Verify hlen meets minimum size requirements */
2050                if (hlen < sizeof(struct tcphdr))
2051                        return network - data;
2052
2053                network += hlen;
2054        } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
2055                == HNS3_RX_FLAG_L4ID_UDP) {
2056                if ((typeof(max_size))(network - data) >
2057                    (max_size - sizeof(struct udphdr)))
2058                        return max_size;
2059
2060                network += sizeof(struct udphdr);
2061        }
2062
2063        /* If everything has gone correctly network should be the
2064         * data section of the packet and will be the end of the header.
2065         * If not then it probably represents the end of the last recognized
2066         * header.
2067         */
2068        if ((typeof(max_size))(network - data) < max_size)
2069                return network - data;
2070        else
2071                return max_size;
2072}
2073
2074static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2075                                struct hns3_enet_ring *ring, int pull_len,
2076                                struct hns3_desc_cb *desc_cb)
2077{
2078        struct hns3_desc *desc;
2079        int truesize, size;
2080        int last_offset;
2081        bool twobufs;
2082
2083        twobufs = ((PAGE_SIZE < 8192) &&
2084                hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2085
2086        desc = &ring->desc[ring->next_to_clean];
2087        size = le16_to_cpu(desc->rx.size);
2088
2089        truesize = hnae_buf_size(ring);
2090
2091        if (!twobufs)
2092                last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
2093
2094        skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2095                        size - pull_len, truesize);
2096
2097         /* Avoid re-using remote pages,flag default unreuse */
2098        if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2099                return;
2100
2101        if (twobufs) {
2102                /* If we are only owner of page we can reuse it */
2103                if (likely(page_count(desc_cb->priv) == 1)) {
2104                        /* Flip page offset to other buffer */
2105                        desc_cb->page_offset ^= truesize;
2106
2107                        desc_cb->reuse_flag = 1;
2108                        /* bump ref count on page before it is given*/
2109                        get_page(desc_cb->priv);
2110                }
2111                return;
2112        }
2113
2114        /* Move offset up to the next cache line */
2115        desc_cb->page_offset += truesize;
2116
2117        if (desc_cb->page_offset <= last_offset) {
2118                desc_cb->reuse_flag = 1;
2119                /* Bump ref count on page before it is given*/
2120                get_page(desc_cb->priv);
2121        }
2122}
2123
2124static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2125                             struct hns3_desc *desc)
2126{
2127        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2128        int l3_type, l4_type;
2129        u32 bd_base_info;
2130        int ol4_type;
2131        u32 l234info;
2132
2133        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2134        l234info = le32_to_cpu(desc->rx.l234_info);
2135
2136        skb->ip_summed = CHECKSUM_NONE;
2137
2138        skb_checksum_none_assert(skb);
2139
2140        if (!(netdev->features & NETIF_F_RXCSUM))
2141                return;
2142
2143        /* check if hardware has done checksum */
2144        if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2145                return;
2146
2147        if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
2148                     hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
2149                     hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2150                     hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2151                netdev_err(netdev, "L3/L4 error pkt\n");
2152                u64_stats_update_begin(&ring->syncp);
2153                ring->stats.l3l4_csum_err++;
2154                u64_stats_update_end(&ring->syncp);
2155
2156                return;
2157        }
2158
2159        l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
2160                                 HNS3_RXD_L3ID_S);
2161        l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
2162                                 HNS3_RXD_L4ID_S);
2163
2164        ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
2165        switch (ol4_type) {
2166        case HNS3_OL4_TYPE_MAC_IN_UDP:
2167        case HNS3_OL4_TYPE_NVGRE:
2168                skb->csum_level = 1;
2169        case HNS3_OL4_TYPE_NO_TUN:
2170                /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2171                if (l3_type == HNS3_L3_TYPE_IPV4 ||
2172                    (l3_type == HNS3_L3_TYPE_IPV6 &&
2173                     (l4_type == HNS3_L4_TYPE_UDP ||
2174                      l4_type == HNS3_L4_TYPE_TCP ||
2175                      l4_type == HNS3_L4_TYPE_SCTP)))
2176                        skb->ip_summed = CHECKSUM_UNNECESSARY;
2177                break;
2178        }
2179}
2180
2181static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2182{
2183        napi_gro_receive(&ring->tqp_vector->napi, skb);
2184}
2185
2186static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2187                             struct sk_buff **out_skb, int *out_bnum)
2188{
2189        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2190        struct hns3_desc_cb *desc_cb;
2191        struct hns3_desc *desc;
2192        struct sk_buff *skb;
2193        unsigned char *va;
2194        u32 bd_base_info;
2195        int pull_len;
2196        u32 l234info;
2197        int length;
2198        int bnum;
2199
2200        desc = &ring->desc[ring->next_to_clean];
2201        desc_cb = &ring->desc_cb[ring->next_to_clean];
2202
2203        prefetch(desc);
2204
2205        length = le16_to_cpu(desc->rx.pkt_len);
2206        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2207        l234info = le32_to_cpu(desc->rx.l234_info);
2208
2209        /* Check valid BD */
2210        if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
2211                return -EFAULT;
2212
2213        va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2214
2215        /* Prefetch first cache line of first page
2216         * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2217         * line size is 64B so need to prefetch twice to make it 128B. But in
2218         * actual we can have greater size of caches with 128B Level 1 cache
2219         * lines. In such a case, single fetch would suffice to cache in the
2220         * relevant part of the header.
2221         */
2222        prefetch(va);
2223#if L1_CACHE_BYTES < 128
2224        prefetch(va + L1_CACHE_BYTES);
2225#endif
2226
2227        skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2228                                        HNS3_RX_HEAD_SIZE);
2229        if (unlikely(!skb)) {
2230                netdev_err(netdev, "alloc rx skb fail\n");
2231
2232                u64_stats_update_begin(&ring->syncp);
2233                ring->stats.sw_err_cnt++;
2234                u64_stats_update_end(&ring->syncp);
2235
2236                return -ENOMEM;
2237        }
2238
2239        prefetchw(skb->data);
2240
2241        /* Based on hw strategy, the tag offloaded will be stored at
2242         * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2243         * in one layer tag case.
2244         */
2245        if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2246                u16 vlan_tag;
2247
2248                vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2249                if (!(vlan_tag & VLAN_VID_MASK))
2250                        vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2251                if (vlan_tag & VLAN_VID_MASK)
2252                        __vlan_hwaccel_put_tag(skb,
2253                                               htons(ETH_P_8021Q),
2254                                               vlan_tag);
2255        }
2256
2257        bnum = 1;
2258        if (length <= HNS3_RX_HEAD_SIZE) {
2259                memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2260
2261                /* We can reuse buffer as-is, just make sure it is local */
2262                if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2263                        desc_cb->reuse_flag = 1;
2264                else /* This page cannot be reused so discard it */
2265                        put_page(desc_cb->priv);
2266
2267                ring_ptr_move_fw(ring, next_to_clean);
2268        } else {
2269                u64_stats_update_begin(&ring->syncp);
2270                ring->stats.seg_pkt_cnt++;
2271                u64_stats_update_end(&ring->syncp);
2272
2273                pull_len = hns3_nic_get_headlen(va, l234info,
2274                                                HNS3_RX_HEAD_SIZE);
2275                memcpy(__skb_put(skb, pull_len), va,
2276                       ALIGN(pull_len, sizeof(long)));
2277
2278                hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2279                ring_ptr_move_fw(ring, next_to_clean);
2280
2281                while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2282                        desc = &ring->desc[ring->next_to_clean];
2283                        desc_cb = &ring->desc_cb[ring->next_to_clean];
2284                        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2285                        hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2286                        ring_ptr_move_fw(ring, next_to_clean);
2287                        bnum++;
2288                }
2289        }
2290
2291        *out_bnum = bnum;
2292
2293        if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2294                netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2295                           ((u64 *)desc)[0], ((u64 *)desc)[1]);
2296                u64_stats_update_begin(&ring->syncp);
2297                ring->stats.non_vld_descs++;
2298                u64_stats_update_end(&ring->syncp);
2299
2300                dev_kfree_skb_any(skb);
2301                return -EINVAL;
2302        }
2303
2304        if (unlikely((!desc->rx.pkt_len) ||
2305                     hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2306                netdev_err(netdev, "truncated pkt\n");
2307                u64_stats_update_begin(&ring->syncp);
2308                ring->stats.err_pkt_len++;
2309                u64_stats_update_end(&ring->syncp);
2310
2311                dev_kfree_skb_any(skb);
2312                return -EFAULT;
2313        }
2314
2315        if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2316                netdev_err(netdev, "L2 error pkt\n");
2317                u64_stats_update_begin(&ring->syncp);
2318                ring->stats.l2_err++;
2319                u64_stats_update_end(&ring->syncp);
2320
2321                dev_kfree_skb_any(skb);
2322                return -EFAULT;
2323        }
2324
2325        u64_stats_update_begin(&ring->syncp);
2326        ring->stats.rx_pkts++;
2327        ring->stats.rx_bytes += skb->len;
2328        u64_stats_update_end(&ring->syncp);
2329
2330        ring->tqp_vector->rx_group.total_bytes += skb->len;
2331
2332        hns3_rx_checksum(ring, skb, desc);
2333        return 0;
2334}
2335
2336int hns3_clean_rx_ring(
2337                struct hns3_enet_ring *ring, int budget,
2338                void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2339{
2340#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2341        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2342        int recv_pkts, recv_bds, clean_count, err;
2343        int unused_count = hns3_desc_unused(ring);
2344        struct sk_buff *skb = NULL;
2345        int num, bnum = 0;
2346
2347        num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2348        rmb(); /* Make sure num taken effect before the other data is touched */
2349
2350        recv_pkts = 0, recv_bds = 0, clean_count = 0;
2351        num -= unused_count;
2352
2353        while (recv_pkts < budget && recv_bds < num) {
2354                /* Reuse or realloc buffers */
2355                if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2356                        hns3_nic_alloc_rx_buffers(ring,
2357                                                  clean_count + unused_count);
2358                        clean_count = 0;
2359                        unused_count = hns3_desc_unused(ring);
2360                }
2361
2362                /* Poll one pkt */
2363                err = hns3_handle_rx_bd(ring, &skb, &bnum);
2364                if (unlikely(!skb)) /* This fault cannot be repaired */
2365                        goto out;
2366
2367                recv_bds += bnum;
2368                clean_count += bnum;
2369                if (unlikely(err)) {  /* Do jump the err */
2370                        recv_pkts++;
2371                        continue;
2372                }
2373
2374                /* Do update ip stack process */
2375                skb->protocol = eth_type_trans(skb, netdev);
2376                rx_fn(ring, skb);
2377
2378                recv_pkts++;
2379        }
2380
2381out:
2382        /* Make all data has been write before submit */
2383        if (clean_count + unused_count > 0)
2384                hns3_nic_alloc_rx_buffers(ring,
2385                                          clean_count + unused_count);
2386
2387        return recv_pkts;
2388}
2389
2390static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2391{
2392        struct hns3_enet_tqp_vector *tqp_vector =
2393                                        ring_group->ring->tqp_vector;
2394        enum hns3_flow_level_range new_flow_level;
2395        int packets_per_msecs;
2396        int bytes_per_msecs;
2397        u32 time_passed_ms;
2398        u16 new_int_gl;
2399
2400        if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
2401                return false;
2402
2403        if (ring_group->total_packets == 0) {
2404                ring_group->coal.int_gl = HNS3_INT_GL_50K;
2405                ring_group->coal.flow_level = HNS3_FLOW_LOW;
2406                return true;
2407        }
2408
2409        /* Simple throttlerate management
2410         * 0-10MB/s   lower     (50000 ints/s)
2411         * 10-20MB/s   middle    (20000 ints/s)
2412         * 20-1249MB/s high      (18000 ints/s)
2413         * > 40000pps  ultra     (8000 ints/s)
2414         */
2415        new_flow_level = ring_group->coal.flow_level;
2416        new_int_gl = ring_group->coal.int_gl;
2417        time_passed_ms =
2418                jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2419
2420        if (!time_passed_ms)
2421                return false;
2422
2423        do_div(ring_group->total_packets, time_passed_ms);
2424        packets_per_msecs = ring_group->total_packets;
2425
2426        do_div(ring_group->total_bytes, time_passed_ms);
2427        bytes_per_msecs = ring_group->total_bytes;
2428
2429#define HNS3_RX_LOW_BYTE_RATE 10000
2430#define HNS3_RX_MID_BYTE_RATE 20000
2431
2432        switch (new_flow_level) {
2433        case HNS3_FLOW_LOW:
2434                if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2435                        new_flow_level = HNS3_FLOW_MID;
2436                break;
2437        case HNS3_FLOW_MID:
2438                if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2439                        new_flow_level = HNS3_FLOW_HIGH;
2440                else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2441                        new_flow_level = HNS3_FLOW_LOW;
2442                break;
2443        case HNS3_FLOW_HIGH:
2444        case HNS3_FLOW_ULTRA:
2445        default:
2446                if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2447                        new_flow_level = HNS3_FLOW_MID;
2448                break;
2449        }
2450
2451#define HNS3_RX_ULTRA_PACKET_RATE 40
2452
2453        if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2454            &tqp_vector->rx_group == ring_group)
2455                new_flow_level = HNS3_FLOW_ULTRA;
2456
2457        switch (new_flow_level) {
2458        case HNS3_FLOW_LOW:
2459                new_int_gl = HNS3_INT_GL_50K;
2460                break;
2461        case HNS3_FLOW_MID:
2462                new_int_gl = HNS3_INT_GL_20K;
2463                break;
2464        case HNS3_FLOW_HIGH:
2465                new_int_gl = HNS3_INT_GL_18K;
2466                break;
2467        case HNS3_FLOW_ULTRA:
2468                new_int_gl = HNS3_INT_GL_8K;
2469                break;
2470        default:
2471                break;
2472        }
2473
2474        ring_group->total_bytes = 0;
2475        ring_group->total_packets = 0;
2476        ring_group->coal.flow_level = new_flow_level;
2477        if (new_int_gl != ring_group->coal.int_gl) {
2478                ring_group->coal.int_gl = new_int_gl;
2479                return true;
2480        }
2481        return false;
2482}
2483
2484static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2485{
2486        struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2487        struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2488        bool rx_update, tx_update;
2489
2490        if (tqp_vector->int_adapt_down > 0) {
2491                tqp_vector->int_adapt_down--;
2492                return;
2493        }
2494
2495        if (rx_group->coal.gl_adapt_enable) {
2496                rx_update = hns3_get_new_int_gl(rx_group);
2497                if (rx_update)
2498                        hns3_set_vector_coalesce_rx_gl(tqp_vector,
2499                                                       rx_group->coal.int_gl);
2500        }
2501
2502        if (tx_group->coal.gl_adapt_enable) {
2503                tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2504                if (tx_update)
2505                        hns3_set_vector_coalesce_tx_gl(tqp_vector,
2506                                                       tx_group->coal.int_gl);
2507        }
2508
2509        tqp_vector->last_jiffies = jiffies;
2510        tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
2511}
2512
2513static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2514{
2515        struct hns3_enet_ring *ring;
2516        int rx_pkt_total = 0;
2517
2518        struct hns3_enet_tqp_vector *tqp_vector =
2519                container_of(napi, struct hns3_enet_tqp_vector, napi);
2520        bool clean_complete = true;
2521        int rx_budget;
2522
2523        /* Since the actual Tx work is minimal, we can give the Tx a larger
2524         * budget and be more aggressive about cleaning up the Tx descriptors.
2525         */
2526        hns3_for_each_ring(ring, tqp_vector->tx_group) {
2527                if (!hns3_clean_tx_ring(ring, budget))
2528                        clean_complete = false;
2529        }
2530
2531        /* make sure rx ring budget not smaller than 1 */
2532        rx_budget = max(budget / tqp_vector->num_tqps, 1);
2533
2534        hns3_for_each_ring(ring, tqp_vector->rx_group) {
2535                int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2536                                                    hns3_rx_skb);
2537
2538                if (rx_cleaned >= rx_budget)
2539                        clean_complete = false;
2540
2541                rx_pkt_total += rx_cleaned;
2542        }
2543
2544        tqp_vector->rx_group.total_packets += rx_pkt_total;
2545
2546        if (!clean_complete)
2547                return budget;
2548
2549        napi_complete(napi);
2550        hns3_update_new_int_gl(tqp_vector);
2551        hns3_mask_vector_irq(tqp_vector, 1);
2552
2553        return rx_pkt_total;
2554}
2555
2556static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2557                                      struct hnae3_ring_chain_node *head)
2558{
2559        struct pci_dev *pdev = tqp_vector->handle->pdev;
2560        struct hnae3_ring_chain_node *cur_chain = head;
2561        struct hnae3_ring_chain_node *chain;
2562        struct hns3_enet_ring *tx_ring;
2563        struct hns3_enet_ring *rx_ring;
2564
2565        tx_ring = tqp_vector->tx_group.ring;
2566        if (tx_ring) {
2567                cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2568                hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2569                             HNAE3_RING_TYPE_TX);
2570                hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2571                               HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2572
2573                cur_chain->next = NULL;
2574
2575                while (tx_ring->next) {
2576                        tx_ring = tx_ring->next;
2577
2578                        chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2579                                             GFP_KERNEL);
2580                        if (!chain)
2581                                return -ENOMEM;
2582
2583                        cur_chain->next = chain;
2584                        chain->tqp_index = tx_ring->tqp->tqp_index;
2585                        hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2586                                     HNAE3_RING_TYPE_TX);
2587                        hnae_set_field(chain->int_gl_idx,
2588                                       HNAE3_RING_GL_IDX_M,
2589                                       HNAE3_RING_GL_IDX_S,
2590                                       HNAE3_RING_GL_TX);
2591
2592                        cur_chain = chain;
2593                }
2594        }
2595
2596        rx_ring = tqp_vector->rx_group.ring;
2597        if (!tx_ring && rx_ring) {
2598                cur_chain->next = NULL;
2599                cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2600                hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2601                             HNAE3_RING_TYPE_RX);
2602                hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2603                               HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2604
2605                rx_ring = rx_ring->next;
2606        }
2607
2608        while (rx_ring) {
2609                chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2610                if (!chain)
2611                        return -ENOMEM;
2612
2613                cur_chain->next = chain;
2614                chain->tqp_index = rx_ring->tqp->tqp_index;
2615                hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2616                             HNAE3_RING_TYPE_RX);
2617                hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2618                               HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2619
2620                cur_chain = chain;
2621
2622                rx_ring = rx_ring->next;
2623        }
2624
2625        return 0;
2626}
2627
2628static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2629                                        struct hnae3_ring_chain_node *head)
2630{
2631        struct pci_dev *pdev = tqp_vector->handle->pdev;
2632        struct hnae3_ring_chain_node *chain_tmp, *chain;
2633
2634        chain = head->next;
2635
2636        while (chain) {
2637                chain_tmp = chain->next;
2638                devm_kfree(&pdev->dev, chain);
2639                chain = chain_tmp;
2640        }
2641}
2642
2643static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2644                                   struct hns3_enet_ring *ring)
2645{
2646        ring->next = group->ring;
2647        group->ring = ring;
2648
2649        group->count++;
2650}
2651
2652static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2653{
2654        struct hnae3_ring_chain_node vector_ring_chain;
2655        struct hnae3_handle *h = priv->ae_handle;
2656        struct hns3_enet_tqp_vector *tqp_vector;
2657        int ret = 0;
2658        u16 i;
2659
2660        for (i = 0; i < priv->vector_num; i++) {
2661                tqp_vector = &priv->tqp_vector[i];
2662                hns3_vector_gl_rl_init_hw(tqp_vector, priv);
2663                tqp_vector->num_tqps = 0;
2664        }
2665
2666        for (i = 0; i < h->kinfo.num_tqps; i++) {
2667                u16 vector_i = i % priv->vector_num;
2668                u16 tqp_num = h->kinfo.num_tqps;
2669
2670                tqp_vector = &priv->tqp_vector[vector_i];
2671
2672                hns3_add_ring_to_group(&tqp_vector->tx_group,
2673                                       priv->ring_data[i].ring);
2674
2675                hns3_add_ring_to_group(&tqp_vector->rx_group,
2676                                       priv->ring_data[i + tqp_num].ring);
2677
2678                priv->ring_data[i].ring->tqp_vector = tqp_vector;
2679                priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2680                tqp_vector->num_tqps++;
2681        }
2682
2683        for (i = 0; i < priv->vector_num; i++) {
2684                tqp_vector = &priv->tqp_vector[i];
2685
2686                tqp_vector->rx_group.total_bytes = 0;
2687                tqp_vector->rx_group.total_packets = 0;
2688                tqp_vector->tx_group.total_bytes = 0;
2689                tqp_vector->tx_group.total_packets = 0;
2690                tqp_vector->handle = h;
2691
2692                ret = hns3_get_vector_ring_chain(tqp_vector,
2693                                                 &vector_ring_chain);
2694                if (ret)
2695                        return ret;
2696
2697                ret = h->ae_algo->ops->map_ring_to_vector(h,
2698                        tqp_vector->vector_irq, &vector_ring_chain);
2699
2700                hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2701
2702                if (ret)
2703                        return ret;
2704
2705                netif_napi_add(priv->netdev, &tqp_vector->napi,
2706                               hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2707        }
2708
2709        return 0;
2710}
2711
2712static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
2713{
2714        struct hnae3_handle *h = priv->ae_handle;
2715        struct hns3_enet_tqp_vector *tqp_vector;
2716        struct hnae3_vector_info *vector;
2717        struct pci_dev *pdev = h->pdev;
2718        u16 tqp_num = h->kinfo.num_tqps;
2719        u16 vector_num;
2720        int ret = 0;
2721        u16 i;
2722
2723        /* RSS size, cpu online and vector_num should be the same */
2724        /* Should consider 2p/4p later */
2725        vector_num = min_t(u16, num_online_cpus(), tqp_num);
2726        vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2727                              GFP_KERNEL);
2728        if (!vector)
2729                return -ENOMEM;
2730
2731        vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2732
2733        priv->vector_num = vector_num;
2734        priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2735                devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2736                             GFP_KERNEL);
2737        if (!priv->tqp_vector) {
2738                ret = -ENOMEM;
2739                goto out;
2740        }
2741
2742        for (i = 0; i < priv->vector_num; i++) {
2743                tqp_vector = &priv->tqp_vector[i];
2744                tqp_vector->idx = i;
2745                tqp_vector->mask_addr = vector[i].io_addr;
2746                tqp_vector->vector_irq = vector[i].vector;
2747                hns3_vector_gl_rl_init(tqp_vector, priv);
2748        }
2749
2750out:
2751        devm_kfree(&pdev->dev, vector);
2752        return ret;
2753}
2754
2755static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
2756{
2757        group->ring = NULL;
2758        group->count = 0;
2759}
2760
2761static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2762{
2763        struct hnae3_ring_chain_node vector_ring_chain;
2764        struct hnae3_handle *h = priv->ae_handle;
2765        struct hns3_enet_tqp_vector *tqp_vector;
2766        int i, ret;
2767
2768        for (i = 0; i < priv->vector_num; i++) {
2769                tqp_vector = &priv->tqp_vector[i];
2770
2771                ret = hns3_get_vector_ring_chain(tqp_vector,
2772                                                 &vector_ring_chain);
2773                if (ret)
2774                        return ret;
2775
2776                ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2777                        tqp_vector->vector_irq, &vector_ring_chain);
2778                if (ret)
2779                        return ret;
2780
2781                ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2782                if (ret)
2783                        return ret;
2784
2785                hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2786
2787                if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2788                        (void)irq_set_affinity_hint(
2789                                priv->tqp_vector[i].vector_irq,
2790                                                    NULL);
2791                        free_irq(priv->tqp_vector[i].vector_irq,
2792                                 &priv->tqp_vector[i]);
2793                }
2794
2795                priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2796                hns3_clear_ring_group(&tqp_vector->rx_group);
2797                hns3_clear_ring_group(&tqp_vector->tx_group);
2798                netif_napi_del(&priv->tqp_vector[i].napi);
2799        }
2800
2801        return 0;
2802}
2803
2804static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
2805{
2806        struct hnae3_handle *h = priv->ae_handle;
2807        struct pci_dev *pdev = h->pdev;
2808        int i, ret;
2809
2810        for (i = 0; i < priv->vector_num; i++) {
2811                struct hns3_enet_tqp_vector *tqp_vector;
2812
2813                tqp_vector = &priv->tqp_vector[i];
2814                ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2815                if (ret)
2816                        return ret;
2817        }
2818
2819        devm_kfree(&pdev->dev, priv->tqp_vector);
2820        return 0;
2821}
2822
2823static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2824                             int ring_type)
2825{
2826        struct hns3_nic_ring_data *ring_data = priv->ring_data;
2827        int queue_num = priv->ae_handle->kinfo.num_tqps;
2828        struct pci_dev *pdev = priv->ae_handle->pdev;
2829        struct hns3_enet_ring *ring;
2830
2831        ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2832        if (!ring)
2833                return -ENOMEM;
2834
2835        if (ring_type == HNAE3_RING_TYPE_TX) {
2836                ring_data[q->tqp_index].ring = ring;
2837                ring_data[q->tqp_index].queue_index = q->tqp_index;
2838                ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2839        } else {
2840                ring_data[q->tqp_index + queue_num].ring = ring;
2841                ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
2842                ring->io_base = q->io_base;
2843        }
2844
2845        hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2846
2847        ring->tqp = q;
2848        ring->desc = NULL;
2849        ring->desc_cb = NULL;
2850        ring->dev = priv->dev;
2851        ring->desc_dma_addr = 0;
2852        ring->buf_size = q->buf_size;
2853        ring->desc_num = q->desc_num;
2854        ring->next_to_use = 0;
2855        ring->next_to_clean = 0;
2856
2857        return 0;
2858}
2859
2860static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2861                              struct hns3_nic_priv *priv)
2862{
2863        int ret;
2864
2865        ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2866        if (ret)
2867                return ret;
2868
2869        ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2870        if (ret)
2871                return ret;
2872
2873        return 0;
2874}
2875
2876static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2877{
2878        struct hnae3_handle *h = priv->ae_handle;
2879        struct pci_dev *pdev = h->pdev;
2880        int i, ret;
2881
2882        priv->ring_data =  devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2883                                        sizeof(*priv->ring_data) * 2,
2884                                        GFP_KERNEL);
2885        if (!priv->ring_data)
2886                return -ENOMEM;
2887
2888        for (i = 0; i < h->kinfo.num_tqps; i++) {
2889                ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2890                if (ret)
2891                        goto err;
2892        }
2893
2894        return 0;
2895err:
2896        devm_kfree(&pdev->dev, priv->ring_data);
2897        return ret;
2898}
2899
2900static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2901{
2902        struct hnae3_handle *h = priv->ae_handle;
2903        int i;
2904
2905        for (i = 0; i < h->kinfo.num_tqps; i++) {
2906                devm_kfree(priv->dev, priv->ring_data[i].ring);
2907                devm_kfree(priv->dev,
2908                           priv->ring_data[i + h->kinfo.num_tqps].ring);
2909        }
2910        devm_kfree(priv->dev, priv->ring_data);
2911}
2912
2913static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2914{
2915        int ret;
2916
2917        if (ring->desc_num <= 0 || ring->buf_size <= 0)
2918                return -EINVAL;
2919
2920        ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2921                                GFP_KERNEL);
2922        if (!ring->desc_cb) {
2923                ret = -ENOMEM;
2924                goto out;
2925        }
2926
2927        ret = hns3_alloc_desc(ring);
2928        if (ret)
2929                goto out_with_desc_cb;
2930
2931        if (!HNAE3_IS_TX_RING(ring)) {
2932                ret = hns3_alloc_ring_buffers(ring);
2933                if (ret)
2934                        goto out_with_desc;
2935        }
2936
2937        return 0;
2938
2939out_with_desc:
2940        hns3_free_desc(ring);
2941out_with_desc_cb:
2942        kfree(ring->desc_cb);
2943        ring->desc_cb = NULL;
2944out:
2945        return ret;
2946}
2947
2948static void hns3_fini_ring(struct hns3_enet_ring *ring)
2949{
2950        hns3_free_desc(ring);
2951        kfree(ring->desc_cb);
2952        ring->desc_cb = NULL;
2953        ring->next_to_clean = 0;
2954        ring->next_to_use = 0;
2955}
2956
2957static int hns3_buf_size2type(u32 buf_size)
2958{
2959        int bd_size_type;
2960
2961        switch (buf_size) {
2962        case 512:
2963                bd_size_type = HNS3_BD_SIZE_512_TYPE;
2964                break;
2965        case 1024:
2966                bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2967                break;
2968        case 2048:
2969                bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2970                break;
2971        case 4096:
2972                bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2973                break;
2974        default:
2975                bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2976        }
2977
2978        return bd_size_type;
2979}
2980
2981static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2982{
2983        dma_addr_t dma = ring->desc_dma_addr;
2984        struct hnae3_queue *q = ring->tqp;
2985
2986        if (!HNAE3_IS_TX_RING(ring)) {
2987                hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2988                               (u32)dma);
2989                hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2990                               (u32)((dma >> 31) >> 1));
2991
2992                hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2993                               hns3_buf_size2type(ring->buf_size));
2994                hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2995                               ring->desc_num / 8 - 1);
2996
2997        } else {
2998                hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2999                               (u32)dma);
3000                hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3001                               (u32)((dma >> 31) >> 1));
3002
3003                hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
3004                               hns3_buf_size2type(ring->buf_size));
3005                hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3006                               ring->desc_num / 8 - 1);
3007        }
3008}
3009
3010int hns3_init_all_ring(struct hns3_nic_priv *priv)
3011{
3012        struct hnae3_handle *h = priv->ae_handle;
3013        int ring_num = h->kinfo.num_tqps * 2;
3014        int i, j;
3015        int ret;
3016
3017        for (i = 0; i < ring_num; i++) {
3018                ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3019                if (ret) {
3020                        dev_err(priv->dev,
3021                                "Alloc ring memory fail! ret=%d\n", ret);
3022                        goto out_when_alloc_ring_memory;
3023                }
3024
3025                hns3_init_ring_hw(priv->ring_data[i].ring);
3026
3027                u64_stats_init(&priv->ring_data[i].ring->syncp);
3028        }
3029
3030        return 0;
3031
3032out_when_alloc_ring_memory:
3033        for (j = i - 1; j >= 0; j--)
3034                hns3_fini_ring(priv->ring_data[j].ring);
3035
3036        return -ENOMEM;
3037}
3038
3039int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3040{
3041        struct hnae3_handle *h = priv->ae_handle;
3042        int i;
3043
3044        for (i = 0; i < h->kinfo.num_tqps; i++) {
3045                if (h->ae_algo->ops->reset_queue)
3046                        h->ae_algo->ops->reset_queue(h, i);
3047
3048                hns3_fini_ring(priv->ring_data[i].ring);
3049                hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3050        }
3051        return 0;
3052}
3053
3054/* Set mac addr if it is configured. or leave it to the AE driver */
3055static void hns3_init_mac_addr(struct net_device *netdev)
3056{
3057        struct hns3_nic_priv *priv = netdev_priv(netdev);
3058        struct hnae3_handle *h = priv->ae_handle;
3059        u8 mac_addr_temp[ETH_ALEN];
3060
3061        if (h->ae_algo->ops->get_mac_addr) {
3062                h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3063                ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3064        }
3065
3066        /* Check if the MAC address is valid, if not get a random one */
3067        if (!is_valid_ether_addr(netdev->dev_addr)) {
3068                eth_hw_addr_random(netdev);
3069                dev_warn(priv->dev, "using random MAC address %pM\n",
3070                         netdev->dev_addr);
3071        }
3072
3073        if (h->ae_algo->ops->set_mac_addr)
3074                h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3075
3076}
3077
3078static void hns3_nic_set_priv_ops(struct net_device *netdev)
3079{
3080        struct hns3_nic_priv *priv = netdev_priv(netdev);
3081
3082        if ((netdev->features & NETIF_F_TSO) ||
3083            (netdev->features & NETIF_F_TSO6)) {
3084                priv->ops.fill_desc = hns3_fill_desc_tso;
3085                priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3086        } else {
3087                priv->ops.fill_desc = hns3_fill_desc;
3088                priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3089        }
3090}
3091
3092static int hns3_client_init(struct hnae3_handle *handle)
3093{
3094        struct pci_dev *pdev = handle->pdev;
3095        struct hns3_nic_priv *priv;
3096        struct net_device *netdev;
3097        int ret;
3098
3099        netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
3100                                   hns3_get_max_available_channels(handle));
3101        if (!netdev)
3102                return -ENOMEM;
3103
3104        priv = netdev_priv(netdev);
3105        priv->dev = &pdev->dev;
3106        priv->netdev = netdev;
3107        priv->ae_handle = handle;
3108        priv->ae_handle->reset_level = HNAE3_NONE_RESET;
3109        priv->ae_handle->last_reset_time = jiffies;
3110        priv->tx_timeout_count = 0;
3111
3112        handle->kinfo.netdev = netdev;
3113        handle->priv = (void *)priv;
3114
3115        hns3_init_mac_addr(netdev);
3116
3117        hns3_set_default_feature(netdev);
3118
3119        netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3120        netdev->priv_flags |= IFF_UNICAST_FLT;
3121        netdev->netdev_ops = &hns3_nic_netdev_ops;
3122        SET_NETDEV_DEV(netdev, &pdev->dev);
3123        hns3_ethtool_set_ops(netdev);
3124        hns3_nic_set_priv_ops(netdev);
3125
3126        /* Carrier off reporting is important to ethtool even BEFORE open */
3127        netif_carrier_off(netdev);
3128
3129        ret = hns3_get_ring_config(priv);
3130        if (ret) {
3131                ret = -ENOMEM;
3132                goto out_get_ring_cfg;
3133        }
3134
3135        ret = hns3_nic_alloc_vector_data(priv);
3136        if (ret) {
3137                ret = -ENOMEM;
3138                goto out_alloc_vector_data;
3139        }
3140
3141        ret = hns3_nic_init_vector_data(priv);
3142        if (ret) {
3143                ret = -ENOMEM;
3144                goto out_init_vector_data;
3145        }
3146
3147        ret = hns3_init_all_ring(priv);
3148        if (ret) {
3149                ret = -ENOMEM;
3150                goto out_init_ring_data;
3151        }
3152
3153        ret = register_netdev(netdev);
3154        if (ret) {
3155                dev_err(priv->dev, "probe register netdev fail!\n");
3156                goto out_reg_netdev_fail;
3157        }
3158
3159        hns3_dcbnl_setup(handle);
3160
3161        /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3162        netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3163
3164        return ret;
3165
3166out_reg_netdev_fail:
3167out_init_ring_data:
3168        (void)hns3_nic_uninit_vector_data(priv);
3169out_init_vector_data:
3170        hns3_nic_dealloc_vector_data(priv);
3171out_alloc_vector_data:
3172        priv->ring_data = NULL;
3173out_get_ring_cfg:
3174        priv->ae_handle = NULL;
3175        free_netdev(netdev);
3176        return ret;
3177}
3178
3179static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3180{
3181        struct net_device *netdev = handle->kinfo.netdev;
3182        struct hns3_nic_priv *priv = netdev_priv(netdev);
3183        int ret;
3184
3185        if (netdev->reg_state != NETREG_UNINITIALIZED)
3186                unregister_netdev(netdev);
3187
3188        ret = hns3_nic_uninit_vector_data(priv);
3189        if (ret)
3190                netdev_err(netdev, "uninit vector error\n");
3191
3192        ret = hns3_nic_dealloc_vector_data(priv);
3193        if (ret)
3194                netdev_err(netdev, "dealloc vector error\n");
3195
3196        ret = hns3_uninit_all_ring(priv);
3197        if (ret)
3198                netdev_err(netdev, "uninit ring error\n");
3199
3200        hns3_put_ring_config(priv);
3201
3202        priv->ring_data = NULL;
3203
3204        free_netdev(netdev);
3205}
3206
3207static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3208{
3209        struct net_device *netdev = handle->kinfo.netdev;
3210
3211        if (!netdev)
3212                return;
3213
3214        if (linkup) {
3215                netif_carrier_on(netdev);
3216                netif_tx_wake_all_queues(netdev);
3217                netdev_info(netdev, "link up\n");
3218        } else {
3219                netif_carrier_off(netdev);
3220                netif_tx_stop_all_queues(netdev);
3221                netdev_info(netdev, "link down\n");
3222        }
3223}
3224
3225static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3226{
3227        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3228        struct net_device *ndev = kinfo->netdev;
3229        bool if_running;
3230        int ret;
3231        u8 i;
3232
3233        if (tc > HNAE3_MAX_TC)
3234                return -EINVAL;
3235
3236        if (!ndev)
3237                return -ENODEV;
3238
3239        if_running = netif_running(ndev);
3240
3241        ret = netdev_set_num_tc(ndev, tc);
3242        if (ret)
3243                return ret;
3244
3245        if (if_running) {
3246                (void)hns3_nic_net_stop(ndev);
3247                msleep(100);
3248        }
3249
3250        ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3251                kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3252        if (ret)
3253                goto err_out;
3254
3255        if (tc <= 1) {
3256                netdev_reset_tc(ndev);
3257                goto out;
3258        }
3259
3260        for (i = 0; i < HNAE3_MAX_TC; i++) {
3261                struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3262
3263                if (tc_info->enable)
3264                        netdev_set_tc_queue(ndev,
3265                                            tc_info->tc,
3266                                            tc_info->tqp_count,
3267                                            tc_info->tqp_offset);
3268        }
3269
3270        for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
3271                netdev_set_prio_tc_map(ndev, i,
3272                                       kinfo->prio_tc[i]);
3273        }
3274
3275out:
3276        ret = hns3_nic_set_real_num_queue(ndev);
3277
3278err_out:
3279        if (if_running)
3280                (void)hns3_nic_net_open(ndev);
3281
3282        return ret;
3283}
3284
3285static void hns3_recover_hw_addr(struct net_device *ndev)
3286{
3287        struct netdev_hw_addr_list *list;
3288        struct netdev_hw_addr *ha, *tmp;
3289
3290        /* go through and sync uc_addr entries to the device */
3291        list = &ndev->uc;
3292        list_for_each_entry_safe(ha, tmp, &list->list, list)
3293                hns3_nic_uc_sync(ndev, ha->addr);
3294
3295        /* go through and sync mc_addr entries to the device */
3296        list = &ndev->mc;
3297        list_for_each_entry_safe(ha, tmp, &list->list, list)
3298                hns3_nic_mc_sync(ndev, ha->addr);
3299}
3300
3301static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb)
3302{
3303        dev_kfree_skb_any(skb);
3304}
3305
3306static void hns3_clear_all_ring(struct hnae3_handle *h)
3307{
3308        struct net_device *ndev = h->kinfo.netdev;
3309        struct hns3_nic_priv *priv = netdev_priv(ndev);
3310        u32 i;
3311
3312        for (i = 0; i < h->kinfo.num_tqps; i++) {
3313                struct netdev_queue *dev_queue;
3314                struct hns3_enet_ring *ring;
3315
3316                ring = priv->ring_data[i].ring;
3317                hns3_clean_tx_ring(ring, ring->desc_num);
3318                dev_queue = netdev_get_tx_queue(ndev,
3319                                                priv->ring_data[i].queue_index);
3320                netdev_tx_reset_queue(dev_queue);
3321
3322                ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3323                hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data);
3324        }
3325}
3326
3327static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3328{
3329        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3330        struct net_device *ndev = kinfo->netdev;
3331
3332        if (!netif_running(ndev))
3333                return -EIO;
3334
3335        return hns3_nic_net_stop(ndev);
3336}
3337
3338static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3339{
3340        struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3341        int ret = 0;
3342
3343        if (netif_running(kinfo->netdev)) {
3344                ret = hns3_nic_net_up(kinfo->netdev);
3345                if (ret) {
3346                        netdev_err(kinfo->netdev,
3347                                   "hns net up fail, ret=%d!\n", ret);
3348                        return ret;
3349                }
3350                handle->last_reset_time = jiffies;
3351        }
3352
3353        return ret;
3354}
3355
3356static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3357{
3358        struct net_device *netdev = handle->kinfo.netdev;
3359        struct hns3_nic_priv *priv = netdev_priv(netdev);
3360        int ret;
3361
3362        hns3_init_mac_addr(netdev);
3363        hns3_nic_set_rx_mode(netdev);
3364        hns3_recover_hw_addr(netdev);
3365
3366        /* Hardware table is only clear when pf resets */
3367        if (!(handle->flags & HNAE3_SUPPORT_VF))
3368                hns3_restore_vlan(netdev);
3369
3370        /* Carrier off reporting is important to ethtool even BEFORE open */
3371        netif_carrier_off(netdev);
3372
3373        ret = hns3_get_ring_config(priv);
3374        if (ret)
3375                return ret;
3376
3377        ret = hns3_nic_init_vector_data(priv);
3378        if (ret)
3379                return ret;
3380
3381        ret = hns3_init_all_ring(priv);
3382        if (ret) {
3383                hns3_nic_uninit_vector_data(priv);
3384                priv->ring_data = NULL;
3385        }
3386
3387        return ret;
3388}
3389
3390static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3391{
3392        struct net_device *netdev = handle->kinfo.netdev;
3393        struct hns3_nic_priv *priv = netdev_priv(netdev);
3394        int ret;
3395
3396        hns3_clear_all_ring(handle);
3397
3398        ret = hns3_nic_uninit_vector_data(priv);
3399        if (ret) {
3400                netdev_err(netdev, "uninit vector error\n");
3401                return ret;
3402        }
3403
3404        ret = hns3_uninit_all_ring(priv);
3405        if (ret)
3406                netdev_err(netdev, "uninit ring error\n");
3407
3408        hns3_put_ring_config(priv);
3409
3410        priv->ring_data = NULL;
3411
3412        return ret;
3413}
3414
3415static int hns3_reset_notify(struct hnae3_handle *handle,
3416                             enum hnae3_reset_notify_type type)
3417{
3418        int ret = 0;
3419
3420        switch (type) {
3421        case HNAE3_UP_CLIENT:
3422                ret = hns3_reset_notify_up_enet(handle);
3423                break;
3424        case HNAE3_DOWN_CLIENT:
3425                ret = hns3_reset_notify_down_enet(handle);
3426                break;
3427        case HNAE3_INIT_CLIENT:
3428                ret = hns3_reset_notify_init_enet(handle);
3429                break;
3430        case HNAE3_UNINIT_CLIENT:
3431                ret = hns3_reset_notify_uninit_enet(handle);
3432                break;
3433        default:
3434                break;
3435        }
3436
3437        return ret;
3438}
3439
3440static void hns3_restore_coal(struct hns3_nic_priv *priv,
3441                              struct hns3_enet_coalesce *tx,
3442                              struct hns3_enet_coalesce *rx)
3443{
3444        u16 vector_num = priv->vector_num;
3445        int i;
3446
3447        for (i = 0; i < vector_num; i++) {
3448                memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
3449                       sizeof(struct hns3_enet_coalesce));
3450                memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
3451                       sizeof(struct hns3_enet_coalesce));
3452        }
3453}
3454
3455static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
3456                               struct hns3_enet_coalesce *tx,
3457                               struct hns3_enet_coalesce *rx)
3458{
3459        struct hns3_nic_priv *priv = netdev_priv(netdev);
3460        struct hnae3_handle *h = hns3_get_handle(netdev);
3461        int ret;
3462
3463        ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3464        if (ret)
3465                return ret;
3466
3467        ret = hns3_get_ring_config(priv);
3468        if (ret)
3469                return ret;
3470
3471        ret = hns3_nic_alloc_vector_data(priv);
3472        if (ret)
3473                goto err_alloc_vector;
3474
3475        hns3_restore_coal(priv, tx, rx);
3476
3477        ret = hns3_nic_init_vector_data(priv);
3478        if (ret)
3479                goto err_uninit_vector;
3480
3481        ret = hns3_init_all_ring(priv);
3482        if (ret)
3483                goto err_put_ring;
3484
3485        return 0;
3486
3487err_put_ring:
3488        hns3_put_ring_config(priv);
3489err_uninit_vector:
3490        hns3_nic_uninit_vector_data(priv);
3491err_alloc_vector:
3492        hns3_nic_dealloc_vector_data(priv);
3493        return ret;
3494}
3495
3496static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3497{
3498        return (new_tqp_num / num_tc) * num_tc;
3499}
3500
3501int hns3_set_channels(struct net_device *netdev,
3502                      struct ethtool_channels *ch)
3503{
3504        struct hns3_nic_priv *priv = netdev_priv(netdev);
3505        struct hnae3_handle *h = hns3_get_handle(netdev);
3506        struct hnae3_knic_private_info *kinfo = &h->kinfo;
3507        struct hns3_enet_coalesce tx_coal, rx_coal;
3508        bool if_running = netif_running(netdev);
3509        u32 new_tqp_num = ch->combined_count;
3510        u16 org_tqp_num;
3511        int ret;
3512
3513        if (ch->rx_count || ch->tx_count)
3514                return -EINVAL;
3515
3516        if (new_tqp_num > hns3_get_max_available_channels(h) ||
3517            new_tqp_num < kinfo->num_tc) {
3518                dev_err(&netdev->dev,
3519                        "Change tqps fail, the tqp range is from %d to %d",
3520                        kinfo->num_tc,
3521                        hns3_get_max_available_channels(h));
3522                return -EINVAL;
3523        }
3524
3525        new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3526        if (kinfo->num_tqps == new_tqp_num)
3527                return 0;
3528
3529        if (if_running)
3530                hns3_nic_net_stop(netdev);
3531
3532        hns3_clear_all_ring(h);
3533
3534        ret = hns3_nic_uninit_vector_data(priv);
3535        if (ret) {
3536                dev_err(&netdev->dev,
3537                        "Unbind vector with tqp fail, nothing is changed");
3538                goto open_netdev;
3539        }
3540
3541        /* Changing the tqp num may also change the vector num,
3542         * ethtool only support setting and querying one coal
3543         * configuation for now, so save the vector 0' coal
3544         * configuation here in order to restore it.
3545         */
3546        memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
3547               sizeof(struct hns3_enet_coalesce));
3548        memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
3549               sizeof(struct hns3_enet_coalesce));
3550
3551        hns3_nic_dealloc_vector_data(priv);
3552
3553        hns3_uninit_all_ring(priv);
3554        hns3_put_ring_config(priv);
3555
3556        org_tqp_num = h->kinfo.num_tqps;
3557        ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
3558        if (ret) {
3559                ret = hns3_modify_tqp_num(netdev, org_tqp_num,
3560                                          &tx_coal, &rx_coal);
3561                if (ret) {
3562                        /* If revert to old tqp failed, fatal error occurred */
3563                        dev_err(&netdev->dev,
3564                                "Revert to old tqp num fail, ret=%d", ret);
3565                        return ret;
3566                }
3567                dev_info(&netdev->dev,
3568                         "Change tqp num fail, Revert to old tqp num");
3569        }
3570
3571open_netdev:
3572        if (if_running)
3573                hns3_nic_net_open(netdev);
3574
3575        return ret;
3576}
3577
3578static const struct hnae3_client_ops client_ops = {
3579        .init_instance = hns3_client_init,
3580        .uninit_instance = hns3_client_uninit,
3581        .link_status_change = hns3_link_status_change,
3582        .setup_tc = hns3_client_setup_tc,
3583        .reset_notify = hns3_reset_notify,
3584};
3585
3586/* hns3_init_module - Driver registration routine
3587 * hns3_init_module is the first routine called when the driver is
3588 * loaded. All it does is register with the PCI subsystem.
3589 */
3590static int __init hns3_init_module(void)
3591{
3592        int ret;
3593
3594        pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3595        pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3596
3597        client.type = HNAE3_CLIENT_KNIC;
3598        snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3599                 hns3_driver_name);
3600
3601        client.ops = &client_ops;
3602
3603        ret = hnae3_register_client(&client);
3604        if (ret)
3605                return ret;
3606
3607        ret = pci_register_driver(&hns3_driver);
3608        if (ret)
3609                hnae3_unregister_client(&client);
3610
3611        return ret;
3612}
3613module_init(hns3_init_module);
3614
3615/* hns3_exit_module - Driver exit cleanup routine
3616 * hns3_exit_module is called just before the driver is removed
3617 * from memory.
3618 */
3619static void __exit hns3_exit_module(void)
3620{
3621        pci_unregister_driver(&hns3_driver);
3622        hnae3_unregister_client(&client);
3623}
3624module_exit(hns3_exit_module);
3625
3626MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3627MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3628MODULE_LICENSE("GPL");
3629MODULE_ALIAS("pci:hns-nic");
3630