linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016~2017 Hisilicon Limited.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 */
   9
  10#include <linux/dma-mapping.h>
  11#include <linux/etherdevice.h>
  12#include <linux/interrupt.h>
  13#include <linux/if_vlan.h>
  14#include <linux/ip.h>
  15#include <linux/ipv6.h>
  16#include <linux/module.h>
  17#include <linux/pci.h>
  18#include <linux/skbuff.h>
  19#include <linux/sctp.h>
  20#include <linux/vermagic.h>
  21#include <net/gre.h>
  22#include <net/vxlan.h>
  23
  24#include "hnae3.h"
  25#include "hns3_enet.h"
  26
  27const char hns3_driver_name[] = "hns3";
  28const char hns3_driver_version[] = VERMAGIC_STRING;
  29static const char hns3_driver_string[] =
  30                        "Hisilicon Ethernet Network Driver for Hip08 Family";
  31static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
  32static struct hnae3_client client;
  33
  34/* hns3_pci_tbl - PCI Device ID Table
  35 *
  36 * Last entry must be all 0s
  37 *
  38 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  39 *   Class, Class Mask, private data (not used) }
  40 */
  41static const struct pci_device_id hns3_pci_tbl[] = {
  42        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
  43        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
  44        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
  45         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  46        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
  47         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  48        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
  49         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  50        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
  51         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  52        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
  53         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
  54        /* required last entry */
  55        {0, }
  56};
  57MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
  58
  59static irqreturn_t hns3_irq_handle(int irq, void *dev)
  60{
  61        struct hns3_enet_tqp_vector *tqp_vector = dev;
  62
  63        napi_schedule(&tqp_vector->napi);
  64
  65        return IRQ_HANDLED;
  66}
  67
  68static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
  69{
  70        struct hns3_enet_tqp_vector *tqp_vectors;
  71        unsigned int i;
  72
  73        for (i = 0; i < priv->vector_num; i++) {
  74                tqp_vectors = &priv->tqp_vector[i];
  75
  76                if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
  77                        continue;
  78
  79                /* release the irq resource */
  80                free_irq(tqp_vectors->vector_irq, tqp_vectors);
  81                tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
  82        }
  83}
  84
  85static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
  86{
  87        struct hns3_enet_tqp_vector *tqp_vectors;
  88        int txrx_int_idx = 0;
  89        int rx_int_idx = 0;
  90        int tx_int_idx = 0;
  91        unsigned int i;
  92        int ret;
  93
  94        for (i = 0; i < priv->vector_num; i++) {
  95                tqp_vectors = &priv->tqp_vector[i];
  96
  97                if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
  98                        continue;
  99
 100                if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
 101                        snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 102                                 "%s-%s-%d", priv->netdev->name, "TxRx",
 103                                 txrx_int_idx++);
 104                        txrx_int_idx++;
 105                } else if (tqp_vectors->rx_group.ring) {
 106                        snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 107                                 "%s-%s-%d", priv->netdev->name, "Rx",
 108                                 rx_int_idx++);
 109                } else if (tqp_vectors->tx_group.ring) {
 110                        snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
 111                                 "%s-%s-%d", priv->netdev->name, "Tx",
 112                                 tx_int_idx++);
 113                } else {
 114                        /* Skip this unused q_vector */
 115                        continue;
 116                }
 117
 118                tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
 119
 120                ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
 121                                  tqp_vectors->name,
 122                                       tqp_vectors);
 123                if (ret) {
 124                        netdev_err(priv->netdev, "request irq(%d) fail\n",
 125                                   tqp_vectors->vector_irq);
 126                        return ret;
 127                }
 128
 129                tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
 130        }
 131
 132        return 0;
 133}
 134
 135static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
 136                                 u32 mask_en)
 137{
 138        writel(mask_en, tqp_vector->mask_addr);
 139}
 140
 141static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
 142{
 143        napi_enable(&tqp_vector->napi);
 144
 145        /* enable vector */
 146        hns3_mask_vector_irq(tqp_vector, 1);
 147}
 148
 149static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
 150{
 151        /* disable vector */
 152        hns3_mask_vector_irq(tqp_vector, 0);
 153
 154        disable_irq(tqp_vector->vector_irq);
 155        napi_disable(&tqp_vector->napi);
 156}
 157
 158static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
 159                                       u32 gl_value)
 160{
 161        /* this defines the configuration for GL (Interrupt Gap Limiter)
 162         * GL defines inter interrupt gap.
 163         * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
 164         */
 165        writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
 166        writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
 167        writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
 168}
 169
 170static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
 171                                       u32 rl_value)
 172{
 173        /* this defines the configuration for RL (Interrupt Rate Limiter).
 174         * Rl defines rate of interrupts i.e. number of interrupts-per-second
 175         * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
 176         */
 177        writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
 178}
 179
 180static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
 181{
 182        /* initialize the configuration for interrupt coalescing.
 183         * 1. GL (Interrupt Gap Limiter)
 184         * 2. RL (Interrupt Rate Limiter)
 185         */
 186
 187        /* Default :enable interrupt coalesce */
 188        tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
 189        tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
 190        hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
 191        /* for now we are disabling Interrupt RL - we
 192         * will re-enable later
 193         */
 194        hns3_set_vector_coalesc_rl(tqp_vector, 0);
 195        tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
 196        tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
 197}
 198
 199static int hns3_nic_net_up(struct net_device *netdev)
 200{
 201        struct hns3_nic_priv *priv = netdev_priv(netdev);
 202        struct hnae3_handle *h = priv->ae_handle;
 203        int i, j;
 204        int ret;
 205
 206        /* get irq resource for all vectors */
 207        ret = hns3_nic_init_irq(priv);
 208        if (ret) {
 209                netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
 210                return ret;
 211        }
 212
 213        /* enable the vectors */
 214        for (i = 0; i < priv->vector_num; i++)
 215                hns3_vector_enable(&priv->tqp_vector[i]);
 216
 217        /* start the ae_dev */
 218        ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
 219        if (ret)
 220                goto out_start_err;
 221
 222        return 0;
 223
 224out_start_err:
 225        for (j = i - 1; j >= 0; j--)
 226                hns3_vector_disable(&priv->tqp_vector[j]);
 227
 228        hns3_nic_uninit_irq(priv);
 229
 230        return ret;
 231}
 232
 233static int hns3_nic_net_open(struct net_device *netdev)
 234{
 235        struct hns3_nic_priv *priv = netdev_priv(netdev);
 236        struct hnae3_handle *h = priv->ae_handle;
 237        int ret;
 238
 239        netif_carrier_off(netdev);
 240
 241        ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps);
 242        if (ret) {
 243                netdev_err(netdev,
 244                           "netif_set_real_num_tx_queues fail, ret=%d!\n",
 245                           ret);
 246                return ret;
 247        }
 248
 249        ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps);
 250        if (ret) {
 251                netdev_err(netdev,
 252                           "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
 253                return ret;
 254        }
 255
 256        ret = hns3_nic_net_up(netdev);
 257        if (ret) {
 258                netdev_err(netdev,
 259                           "hns net up fail, ret=%d!\n", ret);
 260                return ret;
 261        }
 262
 263        return 0;
 264}
 265
 266static void hns3_nic_net_down(struct net_device *netdev)
 267{
 268        struct hns3_nic_priv *priv = netdev_priv(netdev);
 269        const struct hnae3_ae_ops *ops;
 270        int i;
 271
 272        /* stop ae_dev */
 273        ops = priv->ae_handle->ae_algo->ops;
 274        if (ops->stop)
 275                ops->stop(priv->ae_handle);
 276
 277        /* disable vectors */
 278        for (i = 0; i < priv->vector_num; i++)
 279                hns3_vector_disable(&priv->tqp_vector[i]);
 280
 281        /* free irq resources */
 282        hns3_nic_uninit_irq(priv);
 283}
 284
 285static int hns3_nic_net_stop(struct net_device *netdev)
 286{
 287        netif_tx_stop_all_queues(netdev);
 288        netif_carrier_off(netdev);
 289
 290        hns3_nic_net_down(netdev);
 291
 292        return 0;
 293}
 294
 295void hns3_set_multicast_list(struct net_device *netdev)
 296{
 297        struct hns3_nic_priv *priv = netdev_priv(netdev);
 298        struct hnae3_handle *h = priv->ae_handle;
 299        struct netdev_hw_addr *ha = NULL;
 300
 301        if (h->ae_algo->ops->set_mc_addr) {
 302                netdev_for_each_mc_addr(ha, netdev)
 303                        if (h->ae_algo->ops->set_mc_addr(h, ha->addr))
 304                                netdev_err(netdev, "set multicast fail\n");
 305        }
 306}
 307
 308static int hns3_nic_uc_sync(struct net_device *netdev,
 309                            const unsigned char *addr)
 310{
 311        struct hns3_nic_priv *priv = netdev_priv(netdev);
 312        struct hnae3_handle *h = priv->ae_handle;
 313
 314        if (h->ae_algo->ops->add_uc_addr)
 315                return h->ae_algo->ops->add_uc_addr(h, addr);
 316
 317        return 0;
 318}
 319
 320static int hns3_nic_uc_unsync(struct net_device *netdev,
 321                              const unsigned char *addr)
 322{
 323        struct hns3_nic_priv *priv = netdev_priv(netdev);
 324        struct hnae3_handle *h = priv->ae_handle;
 325
 326        if (h->ae_algo->ops->rm_uc_addr)
 327                return h->ae_algo->ops->rm_uc_addr(h, addr);
 328
 329        return 0;
 330}
 331
 332static int hns3_nic_mc_sync(struct net_device *netdev,
 333                            const unsigned char *addr)
 334{
 335        struct hns3_nic_priv *priv = netdev_priv(netdev);
 336        struct hnae3_handle *h = priv->ae_handle;
 337
 338        if (h->ae_algo->ops->add_mc_addr)
 339                return h->ae_algo->ops->add_mc_addr(h, addr);
 340
 341        return 0;
 342}
 343
 344static int hns3_nic_mc_unsync(struct net_device *netdev,
 345                              const unsigned char *addr)
 346{
 347        struct hns3_nic_priv *priv = netdev_priv(netdev);
 348        struct hnae3_handle *h = priv->ae_handle;
 349
 350        if (h->ae_algo->ops->rm_mc_addr)
 351                return h->ae_algo->ops->rm_mc_addr(h, addr);
 352
 353        return 0;
 354}
 355
 356void hns3_nic_set_rx_mode(struct net_device *netdev)
 357{
 358        struct hns3_nic_priv *priv = netdev_priv(netdev);
 359        struct hnae3_handle *h = priv->ae_handle;
 360
 361        if (h->ae_algo->ops->set_promisc_mode) {
 362                if (netdev->flags & IFF_PROMISC)
 363                        h->ae_algo->ops->set_promisc_mode(h, 1);
 364                else
 365                        h->ae_algo->ops->set_promisc_mode(h, 0);
 366        }
 367        if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
 368                netdev_err(netdev, "sync uc address fail\n");
 369        if (netdev->flags & IFF_MULTICAST)
 370                if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
 371                        netdev_err(netdev, "sync mc address fail\n");
 372}
 373
 374static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
 375                        u16 *mss, u32 *type_cs_vlan_tso)
 376{
 377        u32 l4_offset, hdr_len;
 378        union l3_hdr_info l3;
 379        union l4_hdr_info l4;
 380        u32 l4_paylen;
 381        int ret;
 382
 383        if (!skb_is_gso(skb))
 384                return 0;
 385
 386        ret = skb_cow_head(skb, 0);
 387        if (ret)
 388                return ret;
 389
 390        l3.hdr = skb_network_header(skb);
 391        l4.hdr = skb_transport_header(skb);
 392
 393        /* Software should clear the IPv4's checksum field when tso is
 394         * needed.
 395         */
 396        if (l3.v4->version == 4)
 397                l3.v4->check = 0;
 398
 399        /* tunnel packet.*/
 400        if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
 401                                         SKB_GSO_GRE_CSUM |
 402                                         SKB_GSO_UDP_TUNNEL |
 403                                         SKB_GSO_UDP_TUNNEL_CSUM)) {
 404                if ((!(skb_shinfo(skb)->gso_type &
 405                    SKB_GSO_PARTIAL)) &&
 406                    (skb_shinfo(skb)->gso_type &
 407                    SKB_GSO_UDP_TUNNEL_CSUM)) {
 408                        /* Software should clear the udp's checksum
 409                         * field when tso is needed.
 410                         */
 411                        l4.udp->check = 0;
 412                }
 413                /* reset l3&l4 pointers from outer to inner headers */
 414                l3.hdr = skb_inner_network_header(skb);
 415                l4.hdr = skb_inner_transport_header(skb);
 416
 417                /* Software should clear the IPv4's checksum field when
 418                 * tso is needed.
 419                 */
 420                if (l3.v4->version == 4)
 421                        l3.v4->check = 0;
 422        }
 423
 424        /* normal or tunnel packet*/
 425        l4_offset = l4.hdr - skb->data;
 426        hdr_len = (l4.tcp->doff * 4) + l4_offset;
 427
 428        /* remove payload length from inner pseudo checksum when tso*/
 429        l4_paylen = skb->len - l4_offset;
 430        csum_replace_by_diff(&l4.tcp->check,
 431                             (__force __wsum)htonl(l4_paylen));
 432
 433        /* find the txbd field values */
 434        *paylen = skb->len - hdr_len;
 435        hnae_set_bit(*type_cs_vlan_tso,
 436                     HNS3_TXD_TSO_B, 1);
 437
 438        /* get MSS for TSO */
 439        *mss = skb_shinfo(skb)->gso_size;
 440
 441        return 0;
 442}
 443
 444static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
 445                                u8 *il4_proto)
 446{
 447        union {
 448                struct iphdr *v4;
 449                struct ipv6hdr *v6;
 450                unsigned char *hdr;
 451        } l3;
 452        unsigned char *l4_hdr;
 453        unsigned char *exthdr;
 454        u8 l4_proto_tmp;
 455        __be16 frag_off;
 456
 457        /* find outer header point */
 458        l3.hdr = skb_network_header(skb);
 459        l4_hdr = skb_inner_transport_header(skb);
 460
 461        if (skb->protocol == htons(ETH_P_IPV6)) {
 462                exthdr = l3.hdr + sizeof(*l3.v6);
 463                l4_proto_tmp = l3.v6->nexthdr;
 464                if (l4_hdr != exthdr)
 465                        ipv6_skip_exthdr(skb, exthdr - skb->data,
 466                                         &l4_proto_tmp, &frag_off);
 467        } else if (skb->protocol == htons(ETH_P_IP)) {
 468                l4_proto_tmp = l3.v4->protocol;
 469        } else {
 470                return -EINVAL;
 471        }
 472
 473        *ol4_proto = l4_proto_tmp;
 474
 475        /* tunnel packet */
 476        if (!skb->encapsulation) {
 477                *il4_proto = 0;
 478                return 0;
 479        }
 480
 481        /* find inner header point */
 482        l3.hdr = skb_inner_network_header(skb);
 483        l4_hdr = skb_inner_transport_header(skb);
 484
 485        if (l3.v6->version == 6) {
 486                exthdr = l3.hdr + sizeof(*l3.v6);
 487                l4_proto_tmp = l3.v6->nexthdr;
 488                if (l4_hdr != exthdr)
 489                        ipv6_skip_exthdr(skb, exthdr - skb->data,
 490                                         &l4_proto_tmp, &frag_off);
 491        } else if (l3.v4->version == 4) {
 492                l4_proto_tmp = l3.v4->protocol;
 493        }
 494
 495        *il4_proto = l4_proto_tmp;
 496
 497        return 0;
 498}
 499
 500static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
 501                                u8 il4_proto, u32 *type_cs_vlan_tso,
 502                                u32 *ol_type_vlan_len_msec)
 503{
 504        union {
 505                struct iphdr *v4;
 506                struct ipv6hdr *v6;
 507                unsigned char *hdr;
 508        } l3;
 509        union {
 510                struct tcphdr *tcp;
 511                struct udphdr *udp;
 512                struct gre_base_hdr *gre;
 513                unsigned char *hdr;
 514        } l4;
 515        unsigned char *l2_hdr;
 516        u8 l4_proto = ol4_proto;
 517        u32 ol2_len;
 518        u32 ol3_len;
 519        u32 ol4_len;
 520        u32 l2_len;
 521        u32 l3_len;
 522
 523        l3.hdr = skb_network_header(skb);
 524        l4.hdr = skb_transport_header(skb);
 525
 526        /* compute L2 header size for normal packet, defined in 2 Bytes */
 527        l2_len = l3.hdr - skb->data;
 528        hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
 529                       HNS3_TXD_L2LEN_S, l2_len >> 1);
 530
 531        /* tunnel packet*/
 532        if (skb->encapsulation) {
 533                /* compute OL2 header size, defined in 2 Bytes */
 534                ol2_len = l2_len;
 535                hnae_set_field(*ol_type_vlan_len_msec,
 536                               HNS3_TXD_L2LEN_M,
 537                               HNS3_TXD_L2LEN_S, ol2_len >> 1);
 538
 539                /* compute OL3 header size, defined in 4 Bytes */
 540                ol3_len = l4.hdr - l3.hdr;
 541                hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
 542                               HNS3_TXD_L3LEN_S, ol3_len >> 2);
 543
 544                /* MAC in UDP, MAC in GRE (0x6558)*/
 545                if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
 546                        /* switch MAC header ptr from outer to inner header.*/
 547                        l2_hdr = skb_inner_mac_header(skb);
 548
 549                        /* compute OL4 header size, defined in 4 Bytes. */
 550                        ol4_len = l2_hdr - l4.hdr;
 551                        hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
 552                                       HNS3_TXD_L4LEN_S, ol4_len >> 2);
 553
 554                        /* switch IP header ptr from outer to inner header */
 555                        l3.hdr = skb_inner_network_header(skb);
 556
 557                        /* compute inner l2 header size, defined in 2 Bytes. */
 558                        l2_len = l3.hdr - l2_hdr;
 559                        hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
 560                                       HNS3_TXD_L2LEN_S, l2_len >> 1);
 561                } else {
 562                        /* skb packet types not supported by hardware,
 563                         * txbd len fild doesn't be filled.
 564                         */
 565                        return;
 566                }
 567
 568                /* switch L4 header pointer from outer to inner */
 569                l4.hdr = skb_inner_transport_header(skb);
 570
 571                l4_proto = il4_proto;
 572        }
 573
 574        /* compute inner(/normal) L3 header size, defined in 4 Bytes */
 575        l3_len = l4.hdr - l3.hdr;
 576        hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
 577                       HNS3_TXD_L3LEN_S, l3_len >> 2);
 578
 579        /* compute inner(/normal) L4 header size, defined in 4 Bytes */
 580        switch (l4_proto) {
 581        case IPPROTO_TCP:
 582                hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
 583                               HNS3_TXD_L4LEN_S, l4.tcp->doff);
 584                break;
 585        case IPPROTO_SCTP:
 586                hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
 587                               HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
 588                break;
 589        case IPPROTO_UDP:
 590                hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
 591                               HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
 592                break;
 593        default:
 594                /* skb packet types not supported by hardware,
 595                 * txbd len fild doesn't be filled.
 596                 */
 597                return;
 598        }
 599}
 600
 601static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
 602                                   u8 il4_proto, u32 *type_cs_vlan_tso,
 603                                   u32 *ol_type_vlan_len_msec)
 604{
 605        union {
 606                struct iphdr *v4;
 607                struct ipv6hdr *v6;
 608                unsigned char *hdr;
 609        } l3;
 610        u32 l4_proto = ol4_proto;
 611
 612        l3.hdr = skb_network_header(skb);
 613
 614        /* define OL3 type and tunnel type(OL4).*/
 615        if (skb->encapsulation) {
 616                /* define outer network header type.*/
 617                if (skb->protocol == htons(ETH_P_IP)) {
 618                        if (skb_is_gso(skb))
 619                                hnae_set_field(*ol_type_vlan_len_msec,
 620                                               HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
 621                                               HNS3_OL3T_IPV4_CSUM);
 622                        else
 623                                hnae_set_field(*ol_type_vlan_len_msec,
 624                                               HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
 625                                               HNS3_OL3T_IPV4_NO_CSUM);
 626
 627                } else if (skb->protocol == htons(ETH_P_IPV6)) {
 628                        hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
 629                                       HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
 630                }
 631
 632                /* define tunnel type(OL4).*/
 633                switch (l4_proto) {
 634                case IPPROTO_UDP:
 635                        hnae_set_field(*ol_type_vlan_len_msec,
 636                                       HNS3_TXD_TUNTYPE_M,
 637                                       HNS3_TXD_TUNTYPE_S,
 638                                       HNS3_TUN_MAC_IN_UDP);
 639                        break;
 640                case IPPROTO_GRE:
 641                        hnae_set_field(*ol_type_vlan_len_msec,
 642                                       HNS3_TXD_TUNTYPE_M,
 643                                       HNS3_TXD_TUNTYPE_S,
 644                                       HNS3_TUN_NVGRE);
 645                        break;
 646                default:
 647                        /* drop the skb tunnel packet if hardware don't support,
 648                         * because hardware can't calculate csum when TSO.
 649                         */
 650                        if (skb_is_gso(skb))
 651                                return -EDOM;
 652
 653                        /* the stack computes the IP header already,
 654                         * driver calculate l4 checksum when not TSO.
 655                         */
 656                        skb_checksum_help(skb);
 657                        return 0;
 658                }
 659
 660                l3.hdr = skb_inner_network_header(skb);
 661                l4_proto = il4_proto;
 662        }
 663
 664        if (l3.v4->version == 4) {
 665                hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
 666                               HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
 667
 668                /* the stack computes the IP header already, the only time we
 669                 * need the hardware to recompute it is in the case of TSO.
 670                 */
 671                if (skb_is_gso(skb))
 672                        hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
 673
 674                hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
 675        } else if (l3.v6->version == 6) {
 676                hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
 677                               HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
 678                hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
 679        }
 680
 681        switch (l4_proto) {
 682        case IPPROTO_TCP:
 683                hnae_set_field(*type_cs_vlan_tso,
 684                               HNS3_TXD_L4T_M,
 685                               HNS3_TXD_L4T_S,
 686                               HNS3_L4T_TCP);
 687                break;
 688        case IPPROTO_UDP:
 689                hnae_set_field(*type_cs_vlan_tso,
 690                               HNS3_TXD_L4T_M,
 691                               HNS3_TXD_L4T_S,
 692                               HNS3_L4T_UDP);
 693                break;
 694        case IPPROTO_SCTP:
 695                hnae_set_field(*type_cs_vlan_tso,
 696                               HNS3_TXD_L4T_M,
 697                               HNS3_TXD_L4T_S,
 698                               HNS3_L4T_SCTP);
 699                break;
 700        default:
 701                /* drop the skb tunnel packet if hardware don't support,
 702                 * because hardware can't calculate csum when TSO.
 703                 */
 704                if (skb_is_gso(skb))
 705                        return -EDOM;
 706
 707                /* the stack computes the IP header already,
 708                 * driver calculate l4 checksum when not TSO.
 709                 */
 710                skb_checksum_help(skb);
 711                return 0;
 712        }
 713
 714        return 0;
 715}
 716
 717static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
 718{
 719        /* Config bd buffer end */
 720        hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
 721                       HNS3_TXD_BDTYPE_M, 0);
 722        hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
 723        hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
 724        hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1);
 725}
 726
 727static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 728                          int size, dma_addr_t dma, int frag_end,
 729                          enum hns_desc_type type)
 730{
 731        struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
 732        struct hns3_desc *desc = &ring->desc[ring->next_to_use];
 733        u32 ol_type_vlan_len_msec = 0;
 734        u16 bdtp_fe_sc_vld_ra_ri = 0;
 735        u32 type_cs_vlan_tso = 0;
 736        struct sk_buff *skb;
 737        u32 paylen = 0;
 738        u16 mss = 0;
 739        __be16 protocol;
 740        u8 ol4_proto;
 741        u8 il4_proto;
 742        int ret;
 743
 744        /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
 745        desc_cb->priv = priv;
 746        desc_cb->length = size;
 747        desc_cb->dma = dma;
 748        desc_cb->type = type;
 749
 750        /* now, fill the descriptor */
 751        desc->addr = cpu_to_le64(dma);
 752        desc->tx.send_size = cpu_to_le16((u16)size);
 753        hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
 754        desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
 755
 756        if (type == DESC_TYPE_SKB) {
 757                skb = (struct sk_buff *)priv;
 758                paylen = cpu_to_le16(skb->len);
 759
 760                if (skb->ip_summed == CHECKSUM_PARTIAL) {
 761                        skb_reset_mac_len(skb);
 762                        protocol = skb->protocol;
 763
 764                        /* vlan packet*/
 765                        if (protocol == htons(ETH_P_8021Q)) {
 766                                protocol = vlan_get_protocol(skb);
 767                                skb->protocol = protocol;
 768                        }
 769                        ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
 770                        if (ret)
 771                                return ret;
 772                        hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
 773                                            &type_cs_vlan_tso,
 774                                            &ol_type_vlan_len_msec);
 775                        ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
 776                                                      &type_cs_vlan_tso,
 777                                                      &ol_type_vlan_len_msec);
 778                        if (ret)
 779                                return ret;
 780
 781                        ret = hns3_set_tso(skb, &paylen, &mss,
 782                                           &type_cs_vlan_tso);
 783                        if (ret)
 784                                return ret;
 785                }
 786
 787                /* Set txbd */
 788                desc->tx.ol_type_vlan_len_msec =
 789                        cpu_to_le32(ol_type_vlan_len_msec);
 790                desc->tx.type_cs_vlan_tso_len =
 791                        cpu_to_le32(type_cs_vlan_tso);
 792                desc->tx.paylen = cpu_to_le16(paylen);
 793                desc->tx.mss = cpu_to_le16(mss);
 794        }
 795
 796        /* move ring pointer to next.*/
 797        ring_ptr_move_fw(ring, next_to_use);
 798
 799        return 0;
 800}
 801
 802static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
 803                              int size, dma_addr_t dma, int frag_end,
 804                              enum hns_desc_type type)
 805{
 806        unsigned int frag_buf_num;
 807        unsigned int k;
 808        int sizeoflast;
 809        int ret;
 810
 811        frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
 812        sizeoflast = size % HNS3_MAX_BD_SIZE;
 813        sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
 814
 815        /* When the frag size is bigger than hardware, split this frag */
 816        for (k = 0; k < frag_buf_num; k++) {
 817                ret = hns3_fill_desc(ring, priv,
 818                                     (k == frag_buf_num - 1) ?
 819                                sizeoflast : HNS3_MAX_BD_SIZE,
 820                                dma + HNS3_MAX_BD_SIZE * k,
 821                                frag_end && (k == frag_buf_num - 1) ? 1 : 0,
 822                                (type == DESC_TYPE_SKB && !k) ?
 823                                        DESC_TYPE_SKB : DESC_TYPE_PAGE);
 824                if (ret)
 825                        return ret;
 826        }
 827
 828        return 0;
 829}
 830
 831static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
 832                                   struct hns3_enet_ring *ring)
 833{
 834        struct sk_buff *skb = *out_skb;
 835        struct skb_frag_struct *frag;
 836        int bdnum_for_frag;
 837        int frag_num;
 838        int buf_num;
 839        int size;
 840        int i;
 841
 842        size = skb_headlen(skb);
 843        buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
 844
 845        frag_num = skb_shinfo(skb)->nr_frags;
 846        for (i = 0; i < frag_num; i++) {
 847                frag = &skb_shinfo(skb)->frags[i];
 848                size = skb_frag_size(frag);
 849                bdnum_for_frag =
 850                        (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
 851                if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
 852                        return -ENOMEM;
 853
 854                buf_num += bdnum_for_frag;
 855        }
 856
 857        if (buf_num > ring_space(ring))
 858                return -EBUSY;
 859
 860        *bnum = buf_num;
 861        return 0;
 862}
 863
 864static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
 865                                  struct hns3_enet_ring *ring)
 866{
 867        struct sk_buff *skb = *out_skb;
 868        int buf_num;
 869
 870        /* No. of segments (plus a header) */
 871        buf_num = skb_shinfo(skb)->nr_frags + 1;
 872
 873        if (buf_num > ring_space(ring))
 874                return -EBUSY;
 875
 876        *bnum = buf_num;
 877
 878        return 0;
 879}
 880
 881static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
 882{
 883        struct device *dev = ring_to_dev(ring);
 884        unsigned int i;
 885
 886        for (i = 0; i < ring->desc_num; i++) {
 887                /* check if this is where we started */
 888                if (ring->next_to_use == next_to_use_orig)
 889                        break;
 890
 891                /* unmap the descriptor dma address */
 892                if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
 893                        dma_unmap_single(dev,
 894                                         ring->desc_cb[ring->next_to_use].dma,
 895                                        ring->desc_cb[ring->next_to_use].length,
 896                                        DMA_TO_DEVICE);
 897                else
 898                        dma_unmap_page(dev,
 899                                       ring->desc_cb[ring->next_to_use].dma,
 900                                       ring->desc_cb[ring->next_to_use].length,
 901                                       DMA_TO_DEVICE);
 902
 903                /* rollback one */
 904                ring_ptr_move_bw(ring, next_to_use);
 905        }
 906}
 907
 908static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb,
 909                                     struct net_device *netdev)
 910{
 911        struct hns3_nic_priv *priv = netdev_priv(netdev);
 912        struct hns3_nic_ring_data *ring_data =
 913                &tx_ring_data(priv, skb->queue_mapping);
 914        struct hns3_enet_ring *ring = ring_data->ring;
 915        struct device *dev = priv->dev;
 916        struct netdev_queue *dev_queue;
 917        struct skb_frag_struct *frag;
 918        int next_to_use_head;
 919        int next_to_use_frag;
 920        dma_addr_t dma;
 921        int buf_num;
 922        int seg_num;
 923        int size;
 924        int ret;
 925        int i;
 926
 927        /* Prefetch the data used later */
 928        prefetch(skb->data);
 929
 930        switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
 931        case -EBUSY:
 932                u64_stats_update_begin(&ring->syncp);
 933                ring->stats.tx_busy++;
 934                u64_stats_update_end(&ring->syncp);
 935
 936                goto out_net_tx_busy;
 937        case -ENOMEM:
 938                u64_stats_update_begin(&ring->syncp);
 939                ring->stats.sw_err_cnt++;
 940                u64_stats_update_end(&ring->syncp);
 941                netdev_err(netdev, "no memory to xmit!\n");
 942
 943                goto out_err_tx_ok;
 944        default:
 945                break;
 946        }
 947
 948        /* No. of segments (plus a header) */
 949        seg_num = skb_shinfo(skb)->nr_frags + 1;
 950        /* Fill the first part */
 951        size = skb_headlen(skb);
 952
 953        next_to_use_head = ring->next_to_use;
 954
 955        dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
 956        if (dma_mapping_error(dev, dma)) {
 957                netdev_err(netdev, "TX head DMA map failed\n");
 958                ring->stats.sw_err_cnt++;
 959                goto out_err_tx_ok;
 960        }
 961
 962        ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
 963                           DESC_TYPE_SKB);
 964        if (ret)
 965                goto head_dma_map_err;
 966
 967        next_to_use_frag = ring->next_to_use;
 968        /* Fill the fragments */
 969        for (i = 1; i < seg_num; i++) {
 970                frag = &skb_shinfo(skb)->frags[i - 1];
 971                size = skb_frag_size(frag);
 972                dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
 973                if (dma_mapping_error(dev, dma)) {
 974                        netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
 975                        ring->stats.sw_err_cnt++;
 976                        goto frag_dma_map_err;
 977                }
 978                ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
 979                                    seg_num - 1 == i ? 1 : 0,
 980                                    DESC_TYPE_PAGE);
 981
 982                if (ret)
 983                        goto frag_dma_map_err;
 984        }
 985
 986        /* Complete translate all packets */
 987        dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
 988        netdev_tx_sent_queue(dev_queue, skb->len);
 989
 990        wmb(); /* Commit all data before submit */
 991
 992        hnae_queue_xmit(ring->tqp, buf_num);
 993
 994        return NETDEV_TX_OK;
 995
 996frag_dma_map_err:
 997        hns_nic_dma_unmap(ring, next_to_use_frag);
 998
 999head_dma_map_err:
1000        hns_nic_dma_unmap(ring, next_to_use_head);
1001
1002out_err_tx_ok:
1003        dev_kfree_skb_any(skb);
1004        return NETDEV_TX_OK;
1005
1006out_net_tx_busy:
1007        netif_stop_subqueue(netdev, ring_data->queue_index);
1008        smp_mb(); /* Commit all data before submit */
1009
1010        return NETDEV_TX_BUSY;
1011}
1012
1013static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1014{
1015        struct hns3_nic_priv *priv = netdev_priv(netdev);
1016        struct hnae3_handle *h = priv->ae_handle;
1017        struct sockaddr *mac_addr = p;
1018        int ret;
1019
1020        if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1021                return -EADDRNOTAVAIL;
1022
1023        ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1024        if (ret) {
1025                netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1026                return ret;
1027        }
1028
1029        ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1030
1031        return 0;
1032}
1033
1034static int hns3_nic_set_features(struct net_device *netdev,
1035                                 netdev_features_t features)
1036{
1037        struct hns3_nic_priv *priv = netdev_priv(netdev);
1038
1039        if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1040                priv->ops.fill_desc = hns3_fill_desc_tso;
1041                priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1042        } else {
1043                priv->ops.fill_desc = hns3_fill_desc;
1044                priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1045        }
1046
1047        netdev->features = features;
1048        return 0;
1049}
1050
1051static void
1052hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1053{
1054        struct hns3_nic_priv *priv = netdev_priv(netdev);
1055        int queue_num = priv->ae_handle->kinfo.num_tqps;
1056        struct hns3_enet_ring *ring;
1057        unsigned int start;
1058        unsigned int idx;
1059        u64 tx_bytes = 0;
1060        u64 rx_bytes = 0;
1061        u64 tx_pkts = 0;
1062        u64 rx_pkts = 0;
1063
1064        for (idx = 0; idx < queue_num; idx++) {
1065                /* fetch the tx stats */
1066                ring = priv->ring_data[idx].ring;
1067                do {
1068                        start = u64_stats_fetch_begin_irq(&ring->syncp);
1069                        tx_bytes += ring->stats.tx_bytes;
1070                        tx_pkts += ring->stats.tx_pkts;
1071                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1072
1073                /* fetch the rx stats */
1074                ring = priv->ring_data[idx + queue_num].ring;
1075                do {
1076                        start = u64_stats_fetch_begin_irq(&ring->syncp);
1077                        rx_bytes += ring->stats.rx_bytes;
1078                        rx_pkts += ring->stats.rx_pkts;
1079                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1080        }
1081
1082        stats->tx_bytes = tx_bytes;
1083        stats->tx_packets = tx_pkts;
1084        stats->rx_bytes = rx_bytes;
1085        stats->rx_packets = rx_pkts;
1086
1087        stats->rx_errors = netdev->stats.rx_errors;
1088        stats->multicast = netdev->stats.multicast;
1089        stats->rx_length_errors = netdev->stats.rx_length_errors;
1090        stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1091        stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1092
1093        stats->tx_errors = netdev->stats.tx_errors;
1094        stats->rx_dropped = netdev->stats.rx_dropped;
1095        stats->tx_dropped = netdev->stats.tx_dropped;
1096        stats->collisions = netdev->stats.collisions;
1097        stats->rx_over_errors = netdev->stats.rx_over_errors;
1098        stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1099        stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1100        stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1101        stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1102        stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1103        stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1104        stats->tx_window_errors = netdev->stats.tx_window_errors;
1105        stats->rx_compressed = netdev->stats.rx_compressed;
1106        stats->tx_compressed = netdev->stats.tx_compressed;
1107}
1108
1109static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1110                                 enum hns3_udp_tnl_type type)
1111{
1112        struct hns3_nic_priv *priv = netdev_priv(netdev);
1113        struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1114        struct hnae3_handle *h = priv->ae_handle;
1115
1116        if (udp_tnl->used && udp_tnl->dst_port == port) {
1117                udp_tnl->used++;
1118                return;
1119        }
1120
1121        if (udp_tnl->used) {
1122                netdev_warn(netdev,
1123                            "UDP tunnel [%d], port [%d] offload\n", type, port);
1124                return;
1125        }
1126
1127        udp_tnl->dst_port = port;
1128        udp_tnl->used = 1;
1129        /* TBD send command to hardware to add port */
1130        if (h->ae_algo->ops->add_tunnel_udp)
1131                h->ae_algo->ops->add_tunnel_udp(h, port);
1132}
1133
1134static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1135                                 enum hns3_udp_tnl_type type)
1136{
1137        struct hns3_nic_priv *priv = netdev_priv(netdev);
1138        struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1139        struct hnae3_handle *h = priv->ae_handle;
1140
1141        if (!udp_tnl->used || udp_tnl->dst_port != port) {
1142                netdev_warn(netdev,
1143                            "Invalid UDP tunnel port %d\n", port);
1144                return;
1145        }
1146
1147        udp_tnl->used--;
1148        if (udp_tnl->used)
1149                return;
1150
1151        udp_tnl->dst_port = 0;
1152        /* TBD send command to hardware to del port  */
1153        if (h->ae_algo->ops->del_tunnel_udp)
1154                h->ae_algo->ops->del_tunnel_udp(h, port);
1155}
1156
1157/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1158 * @netdev: This physical ports's netdev
1159 * @ti: Tunnel information
1160 */
1161static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1162                                    struct udp_tunnel_info *ti)
1163{
1164        u16 port_n = ntohs(ti->port);
1165
1166        switch (ti->type) {
1167        case UDP_TUNNEL_TYPE_VXLAN:
1168                hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1169                break;
1170        case UDP_TUNNEL_TYPE_GENEVE:
1171                hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1172                break;
1173        default:
1174                netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1175                break;
1176        }
1177}
1178
1179static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1180                                    struct udp_tunnel_info *ti)
1181{
1182        u16 port_n = ntohs(ti->port);
1183
1184        switch (ti->type) {
1185        case UDP_TUNNEL_TYPE_VXLAN:
1186                hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1187                break;
1188        case UDP_TUNNEL_TYPE_GENEVE:
1189                hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1190                break;
1191        default:
1192                break;
1193        }
1194}
1195
1196static int hns3_setup_tc(struct net_device *netdev, u8 tc)
1197{
1198        struct hns3_nic_priv *priv = netdev_priv(netdev);
1199        struct hnae3_handle *h = priv->ae_handle;
1200        struct hnae3_knic_private_info *kinfo = &h->kinfo;
1201        unsigned int i;
1202        int ret;
1203
1204        if (tc > HNAE3_MAX_TC)
1205                return -EINVAL;
1206
1207        if (kinfo->num_tc == tc)
1208                return 0;
1209
1210        if (!netdev)
1211                return -EINVAL;
1212
1213        if (!tc) {
1214                netdev_reset_tc(netdev);
1215                return 0;
1216        }
1217
1218        /* Set num_tc for netdev */
1219        ret = netdev_set_num_tc(netdev, tc);
1220        if (ret)
1221                return ret;
1222
1223        /* Set per TC queues for the VSI */
1224        for (i = 0; i < HNAE3_MAX_TC; i++) {
1225                if (kinfo->tc_info[i].enable)
1226                        netdev_set_tc_queue(netdev,
1227                                            kinfo->tc_info[i].tc,
1228                                            kinfo->tc_info[i].tqp_count,
1229                                            kinfo->tc_info[i].tqp_offset);
1230        }
1231
1232        return 0;
1233}
1234
1235static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1236                             void *type_data)
1237{
1238        struct tc_mqprio_qopt *mqprio = type_data;
1239
1240        if (type != TC_SETUP_MQPRIO)
1241                return -EOPNOTSUPP;
1242
1243        return hns3_setup_tc(dev, mqprio->num_tc);
1244}
1245
1246static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1247                                __be16 proto, u16 vid)
1248{
1249        struct hns3_nic_priv *priv = netdev_priv(netdev);
1250        struct hnae3_handle *h = priv->ae_handle;
1251        int ret = -EIO;
1252
1253        if (h->ae_algo->ops->set_vlan_filter)
1254                ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1255
1256        return ret;
1257}
1258
1259static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1260                                 __be16 proto, u16 vid)
1261{
1262        struct hns3_nic_priv *priv = netdev_priv(netdev);
1263        struct hnae3_handle *h = priv->ae_handle;
1264        int ret = -EIO;
1265
1266        if (h->ae_algo->ops->set_vlan_filter)
1267                ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1268
1269        return ret;
1270}
1271
1272static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1273                                u8 qos, __be16 vlan_proto)
1274{
1275        struct hns3_nic_priv *priv = netdev_priv(netdev);
1276        struct hnae3_handle *h = priv->ae_handle;
1277        int ret = -EIO;
1278
1279        if (h->ae_algo->ops->set_vf_vlan_filter)
1280                ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1281                                                   qos, vlan_proto);
1282
1283        return ret;
1284}
1285
1286static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1287{
1288        struct hns3_nic_priv *priv = netdev_priv(netdev);
1289        struct hnae3_handle *h = priv->ae_handle;
1290        bool if_running = netif_running(netdev);
1291        int ret;
1292
1293        if (!h->ae_algo->ops->set_mtu)
1294                return -EOPNOTSUPP;
1295
1296        /* if this was called with netdev up then bring netdevice down */
1297        if (if_running) {
1298                (void)hns3_nic_net_stop(netdev);
1299                msleep(100);
1300        }
1301
1302        ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1303        if (ret) {
1304                netdev_err(netdev, "failed to change MTU in hardware %d\n",
1305                           ret);
1306                return ret;
1307        }
1308
1309        /* if the netdev was running earlier, bring it up again */
1310        if (if_running && hns3_nic_net_open(netdev))
1311                ret = -EINVAL;
1312
1313        return ret;
1314}
1315
1316static const struct net_device_ops hns3_nic_netdev_ops = {
1317        .ndo_open               = hns3_nic_net_open,
1318        .ndo_stop               = hns3_nic_net_stop,
1319        .ndo_start_xmit         = hns3_nic_net_xmit,
1320        .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
1321        .ndo_change_mtu         = hns3_nic_change_mtu,
1322        .ndo_set_features       = hns3_nic_set_features,
1323        .ndo_get_stats64        = hns3_nic_get_stats64,
1324        .ndo_setup_tc           = hns3_nic_setup_tc,
1325        .ndo_set_rx_mode        = hns3_nic_set_rx_mode,
1326        .ndo_udp_tunnel_add     = hns3_nic_udp_tunnel_add,
1327        .ndo_udp_tunnel_del     = hns3_nic_udp_tunnel_del,
1328        .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
1329        .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
1330        .ndo_set_vf_vlan        = hns3_ndo_set_vf_vlan,
1331};
1332
1333/* hns3_probe - Device initialization routine
1334 * @pdev: PCI device information struct
1335 * @ent: entry in hns3_pci_tbl
1336 *
1337 * hns3_probe initializes a PF identified by a pci_dev structure.
1338 * The OS initialization, configuring of the PF private structure,
1339 * and a hardware reset occur.
1340 *
1341 * Returns 0 on success, negative on failure
1342 */
1343static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1344{
1345        struct hnae3_ae_dev *ae_dev;
1346        int ret;
1347
1348        ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1349                              GFP_KERNEL);
1350        if (!ae_dev) {
1351                ret = -ENOMEM;
1352                return ret;
1353        }
1354
1355        ae_dev->pdev = pdev;
1356        ae_dev->flag = ent->driver_data;
1357        ae_dev->dev_type = HNAE3_DEV_KNIC;
1358        pci_set_drvdata(pdev, ae_dev);
1359
1360        return hnae3_register_ae_dev(ae_dev);
1361}
1362
1363/* hns3_remove - Device removal routine
1364 * @pdev: PCI device information struct
1365 */
1366static void hns3_remove(struct pci_dev *pdev)
1367{
1368        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1369
1370        hnae3_unregister_ae_dev(ae_dev);
1371
1372        devm_kfree(&pdev->dev, ae_dev);
1373
1374        pci_set_drvdata(pdev, NULL);
1375}
1376
1377static struct pci_driver hns3_driver = {
1378        .name     = hns3_driver_name,
1379        .id_table = hns3_pci_tbl,
1380        .probe    = hns3_probe,
1381        .remove   = hns3_remove,
1382};
1383
1384/* set default feature to hns3 */
1385static void hns3_set_default_feature(struct net_device *netdev)
1386{
1387        netdev->priv_flags |= IFF_UNICAST_FLT;
1388
1389        netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1390                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1391                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1392                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1393                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1394
1395        netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1396
1397        netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1398
1399        netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1400                NETIF_F_HW_VLAN_CTAG_FILTER |
1401                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1402                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1403                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1404                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1405
1406        netdev->vlan_features |=
1407                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1408                NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1409                NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1410                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1411                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1412
1413        netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1414                NETIF_F_HW_VLAN_CTAG_FILTER |
1415                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1416                NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1417                NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1418                NETIF_F_GSO_UDP_TUNNEL_CSUM;
1419}
1420
1421static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1422                             struct hns3_desc_cb *cb)
1423{
1424        unsigned int order = hnae_page_order(ring);
1425        struct page *p;
1426
1427        p = dev_alloc_pages(order);
1428        if (!p)
1429                return -ENOMEM;
1430
1431        cb->priv = p;
1432        cb->page_offset = 0;
1433        cb->reuse_flag = 0;
1434        cb->buf  = page_address(p);
1435        cb->length = hnae_page_size(ring);
1436        cb->type = DESC_TYPE_PAGE;
1437
1438        memset(cb->buf, 0, cb->length);
1439
1440        return 0;
1441}
1442
1443static void hns3_free_buffer(struct hns3_enet_ring *ring,
1444                             struct hns3_desc_cb *cb)
1445{
1446        if (cb->type == DESC_TYPE_SKB)
1447                dev_kfree_skb_any((struct sk_buff *)cb->priv);
1448        else if (!HNAE3_IS_TX_RING(ring))
1449                put_page((struct page *)cb->priv);
1450        memset(cb, 0, sizeof(*cb));
1451}
1452
1453static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1454{
1455        cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1456                               cb->length, ring_to_dma_dir(ring));
1457
1458        if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1459                return -EIO;
1460
1461        return 0;
1462}
1463
1464static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1465                              struct hns3_desc_cb *cb)
1466{
1467        if (cb->type == DESC_TYPE_SKB)
1468                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1469                                 ring_to_dma_dir(ring));
1470        else
1471                dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1472                               ring_to_dma_dir(ring));
1473}
1474
1475static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1476{
1477        hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1478        ring->desc[i].addr = 0;
1479}
1480
1481static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1482{
1483        struct hns3_desc_cb *cb = &ring->desc_cb[i];
1484
1485        if (!ring->desc_cb[i].dma)
1486                return;
1487
1488        hns3_buffer_detach(ring, i);
1489        hns3_free_buffer(ring, cb);
1490}
1491
1492static void hns3_free_buffers(struct hns3_enet_ring *ring)
1493{
1494        int i;
1495
1496        for (i = 0; i < ring->desc_num; i++)
1497                hns3_free_buffer_detach(ring, i);
1498}
1499
1500/* free desc along with its attached buffer */
1501static void hns3_free_desc(struct hns3_enet_ring *ring)
1502{
1503        hns3_free_buffers(ring);
1504
1505        dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1506                         ring->desc_num * sizeof(ring->desc[0]),
1507                         DMA_BIDIRECTIONAL);
1508        ring->desc_dma_addr = 0;
1509        kfree(ring->desc);
1510        ring->desc = NULL;
1511}
1512
1513static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1514{
1515        int size = ring->desc_num * sizeof(ring->desc[0]);
1516
1517        ring->desc = kzalloc(size, GFP_KERNEL);
1518        if (!ring->desc)
1519                return -ENOMEM;
1520
1521        ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1522                                             size, DMA_BIDIRECTIONAL);
1523        if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1524                ring->desc_dma_addr = 0;
1525                kfree(ring->desc);
1526                ring->desc = NULL;
1527                return -ENOMEM;
1528        }
1529
1530        return 0;
1531}
1532
1533static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1534                                   struct hns3_desc_cb *cb)
1535{
1536        int ret;
1537
1538        ret = hns3_alloc_buffer(ring, cb);
1539        if (ret)
1540                goto out;
1541
1542        ret = hns3_map_buffer(ring, cb);
1543        if (ret)
1544                goto out_with_buf;
1545
1546        return 0;
1547
1548out_with_buf:
1549        hns3_free_buffers(ring);
1550out:
1551        return ret;
1552}
1553
1554static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1555{
1556        int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1557
1558        if (ret)
1559                return ret;
1560
1561        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1562
1563        return 0;
1564}
1565
1566/* Allocate memory for raw pkg, and map with dma */
1567static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1568{
1569        int i, j, ret;
1570
1571        for (i = 0; i < ring->desc_num; i++) {
1572                ret = hns3_alloc_buffer_attach(ring, i);
1573                if (ret)
1574                        goto out_buffer_fail;
1575        }
1576
1577        return 0;
1578
1579out_buffer_fail:
1580        for (j = i - 1; j >= 0; j--)
1581                hns3_free_buffer_detach(ring, j);
1582        return ret;
1583}
1584
1585/* detach a in-used buffer and replace with a reserved one  */
1586static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1587                                struct hns3_desc_cb *res_cb)
1588{
1589        hns3_map_buffer(ring, &ring->desc_cb[i]);
1590        ring->desc_cb[i] = *res_cb;
1591        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1592}
1593
1594static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1595{
1596        ring->desc_cb[i].reuse_flag = 0;
1597        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1598                + ring->desc_cb[i].page_offset);
1599}
1600
1601static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1602                                      int *pkts)
1603{
1604        struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1605
1606        (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1607        (*bytes) += desc_cb->length;
1608        /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1609        hns3_free_buffer_detach(ring, ring->next_to_clean);
1610
1611        ring_ptr_move_fw(ring, next_to_clean);
1612}
1613
1614static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1615{
1616        int u = ring->next_to_use;
1617        int c = ring->next_to_clean;
1618
1619        if (unlikely(h > ring->desc_num))
1620                return 0;
1621
1622        return u > c ? (h > c && h <= u) : (h > c || h <= u);
1623}
1624
1625int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1626{
1627        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1628        struct netdev_queue *dev_queue;
1629        int bytes, pkts;
1630        int head;
1631
1632        head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1633        rmb(); /* Make sure head is ready before touch any data */
1634
1635        if (is_ring_empty(ring) || head == ring->next_to_clean)
1636                return 0; /* no data to poll */
1637
1638        if (!is_valid_clean_head(ring, head)) {
1639                netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1640                           ring->next_to_use, ring->next_to_clean);
1641
1642                u64_stats_update_begin(&ring->syncp);
1643                ring->stats.io_err_cnt++;
1644                u64_stats_update_end(&ring->syncp);
1645                return -EIO;
1646        }
1647
1648        bytes = 0;
1649        pkts = 0;
1650        while (head != ring->next_to_clean && budget) {
1651                hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1652                /* Issue prefetch for next Tx descriptor */
1653                prefetch(&ring->desc_cb[ring->next_to_clean]);
1654                budget--;
1655        }
1656
1657        ring->tqp_vector->tx_group.total_bytes += bytes;
1658        ring->tqp_vector->tx_group.total_packets += pkts;
1659
1660        u64_stats_update_begin(&ring->syncp);
1661        ring->stats.tx_bytes += bytes;
1662        ring->stats.tx_pkts += pkts;
1663        u64_stats_update_end(&ring->syncp);
1664
1665        dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1666        netdev_tx_completed_queue(dev_queue, pkts, bytes);
1667
1668        if (unlikely(pkts && netif_carrier_ok(netdev) &&
1669                     (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1670                /* Make sure that anybody stopping the queue after this
1671                 * sees the new next_to_clean.
1672                 */
1673                smp_mb();
1674                if (netif_tx_queue_stopped(dev_queue)) {
1675                        netif_tx_wake_queue(dev_queue);
1676                        ring->stats.restart_queue++;
1677                }
1678        }
1679
1680        return !!budget;
1681}
1682
1683static int hns3_desc_unused(struct hns3_enet_ring *ring)
1684{
1685        int ntc = ring->next_to_clean;
1686        int ntu = ring->next_to_use;
1687
1688        return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1689}
1690
1691static void
1692hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1693{
1694        struct hns3_desc_cb *desc_cb;
1695        struct hns3_desc_cb res_cbs;
1696        int i, ret;
1697
1698        for (i = 0; i < cleand_count; i++) {
1699                desc_cb = &ring->desc_cb[ring->next_to_use];
1700                if (desc_cb->reuse_flag) {
1701                        u64_stats_update_begin(&ring->syncp);
1702                        ring->stats.reuse_pg_cnt++;
1703                        u64_stats_update_end(&ring->syncp);
1704
1705                        hns3_reuse_buffer(ring, ring->next_to_use);
1706                } else {
1707                        ret = hns3_reserve_buffer_map(ring, &res_cbs);
1708                        if (ret) {
1709                                u64_stats_update_begin(&ring->syncp);
1710                                ring->stats.sw_err_cnt++;
1711                                u64_stats_update_end(&ring->syncp);
1712
1713                                netdev_err(ring->tqp->handle->kinfo.netdev,
1714                                           "hnae reserve buffer map failed.\n");
1715                                break;
1716                        }
1717                        hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1718                }
1719
1720                ring_ptr_move_fw(ring, next_to_use);
1721        }
1722
1723        wmb(); /* Make all data has been write before submit */
1724        writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1725}
1726
1727/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1728 * @data: pointer to the start of the headers
1729 * @max: total length of section to find headers in
1730 *
1731 * This function is meant to determine the length of headers that will
1732 * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
1733 * motivation of doing this is to only perform one pull for IPv4 TCP
1734 * packets so that we can do basic things like calculating the gso_size
1735 * based on the average data per packet.
1736 */
1737static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1738                                         unsigned int max_size)
1739{
1740        unsigned char *network;
1741        u8 hlen;
1742
1743        /* This should never happen, but better safe than sorry */
1744        if (max_size < ETH_HLEN)
1745                return max_size;
1746
1747        /* Initialize network frame pointer */
1748        network = data;
1749
1750        /* Set first protocol and move network header forward */
1751        network += ETH_HLEN;
1752
1753        /* Handle any vlan tag if present */
1754        if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1755                == HNS3_RX_FLAG_VLAN_PRESENT) {
1756                if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1757                        return max_size;
1758
1759                network += VLAN_HLEN;
1760        }
1761
1762        /* Handle L3 protocols */
1763        if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1764                == HNS3_RX_FLAG_L3ID_IPV4) {
1765                if ((typeof(max_size))(network - data) >
1766                    (max_size - sizeof(struct iphdr)))
1767                        return max_size;
1768
1769                /* Access ihl as a u8 to avoid unaligned access on ia64 */
1770                hlen = (network[0] & 0x0F) << 2;
1771
1772                /* Verify hlen meets minimum size requirements */
1773                if (hlen < sizeof(struct iphdr))
1774                        return network - data;
1775
1776                /* Record next protocol if header is present */
1777        } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1778                == HNS3_RX_FLAG_L3ID_IPV6) {
1779                if ((typeof(max_size))(network - data) >
1780                    (max_size - sizeof(struct ipv6hdr)))
1781                        return max_size;
1782
1783                /* Record next protocol */
1784                hlen = sizeof(struct ipv6hdr);
1785        } else {
1786                return network - data;
1787        }
1788
1789        /* Relocate pointer to start of L4 header */
1790        network += hlen;
1791
1792        /* Finally sort out TCP/UDP */
1793        if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1794                == HNS3_RX_FLAG_L4ID_TCP) {
1795                if ((typeof(max_size))(network - data) >
1796                    (max_size - sizeof(struct tcphdr)))
1797                        return max_size;
1798
1799                /* Access doff as a u8 to avoid unaligned access on ia64 */
1800                hlen = (network[12] & 0xF0) >> 2;
1801
1802                /* Verify hlen meets minimum size requirements */
1803                if (hlen < sizeof(struct tcphdr))
1804                        return network - data;
1805
1806                network += hlen;
1807        } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1808                == HNS3_RX_FLAG_L4ID_UDP) {
1809                if ((typeof(max_size))(network - data) >
1810                    (max_size - sizeof(struct udphdr)))
1811                        return max_size;
1812
1813                network += sizeof(struct udphdr);
1814        }
1815
1816        /* If everything has gone correctly network should be the
1817         * data section of the packet and will be the end of the header.
1818         * If not then it probably represents the end of the last recognized
1819         * header.
1820         */
1821        if ((typeof(max_size))(network - data) < max_size)
1822                return network - data;
1823        else
1824                return max_size;
1825}
1826
1827static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1828                                struct hns3_enet_ring *ring, int pull_len,
1829                                struct hns3_desc_cb *desc_cb)
1830{
1831        struct hns3_desc *desc;
1832        int truesize, size;
1833        int last_offset;
1834        bool twobufs;
1835
1836        twobufs = ((PAGE_SIZE < 8192) &&
1837                hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1838
1839        desc = &ring->desc[ring->next_to_clean];
1840        size = le16_to_cpu(desc->rx.size);
1841
1842        if (twobufs) {
1843                truesize = hnae_buf_size(ring);
1844        } else {
1845                truesize = ALIGN(size, L1_CACHE_BYTES);
1846                last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1847        }
1848
1849        skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1850                        size - pull_len, truesize - pull_len);
1851
1852         /* Avoid re-using remote pages,flag default unreuse */
1853        if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1854                return;
1855
1856        if (twobufs) {
1857                /* If we are only owner of page we can reuse it */
1858                if (likely(page_count(desc_cb->priv) == 1)) {
1859                        /* Flip page offset to other buffer */
1860                        desc_cb->page_offset ^= truesize;
1861
1862                        desc_cb->reuse_flag = 1;
1863                        /* bump ref count on page before it is given*/
1864                        get_page(desc_cb->priv);
1865                }
1866                return;
1867        }
1868
1869        /* Move offset up to the next cache line */
1870        desc_cb->page_offset += truesize;
1871
1872        if (desc_cb->page_offset <= last_offset) {
1873                desc_cb->reuse_flag = 1;
1874                /* Bump ref count on page before it is given*/
1875                get_page(desc_cb->priv);
1876        }
1877}
1878
1879static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1880                             struct hns3_desc *desc)
1881{
1882        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1883        int l3_type, l4_type;
1884        u32 bd_base_info;
1885        int ol4_type;
1886        u32 l234info;
1887
1888        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1889        l234info = le32_to_cpu(desc->rx.l234_info);
1890
1891        skb->ip_summed = CHECKSUM_NONE;
1892
1893        skb_checksum_none_assert(skb);
1894
1895        if (!(netdev->features & NETIF_F_RXCSUM))
1896                return;
1897
1898        /* check if hardware has done checksum */
1899        if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1900                return;
1901
1902        if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1903                     hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1904                     hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1905                     hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1906                netdev_err(netdev, "L3/L4 error pkt\n");
1907                u64_stats_update_begin(&ring->syncp);
1908                ring->stats.l3l4_csum_err++;
1909                u64_stats_update_end(&ring->syncp);
1910
1911                return;
1912        }
1913
1914        l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1915                                 HNS3_RXD_L3ID_S);
1916        l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1917                                 HNS3_RXD_L4ID_S);
1918
1919        ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1920        switch (ol4_type) {
1921        case HNS3_OL4_TYPE_MAC_IN_UDP:
1922        case HNS3_OL4_TYPE_NVGRE:
1923                skb->csum_level = 1;
1924        case HNS3_OL4_TYPE_NO_TUN:
1925                /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1926                if (l3_type == HNS3_L3_TYPE_IPV4 ||
1927                    (l3_type == HNS3_L3_TYPE_IPV6 &&
1928                     (l4_type == HNS3_L4_TYPE_UDP ||
1929                      l4_type == HNS3_L4_TYPE_TCP ||
1930                      l4_type == HNS3_L4_TYPE_SCTP)))
1931                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1932                break;
1933        }
1934}
1935
1936static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1937                             struct sk_buff **out_skb, int *out_bnum)
1938{
1939        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1940        struct hns3_desc_cb *desc_cb;
1941        struct hns3_desc *desc;
1942        struct sk_buff *skb;
1943        unsigned char *va;
1944        u32 bd_base_info;
1945        int pull_len;
1946        u32 l234info;
1947        int length;
1948        int bnum;
1949
1950        desc = &ring->desc[ring->next_to_clean];
1951        desc_cb = &ring->desc_cb[ring->next_to_clean];
1952
1953        prefetch(desc);
1954
1955        length = le16_to_cpu(desc->rx.pkt_len);
1956        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1957        l234info = le32_to_cpu(desc->rx.l234_info);
1958
1959        /* Check valid BD */
1960        if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1961                return -EFAULT;
1962
1963        va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1964
1965        /* Prefetch first cache line of first page
1966         * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1967         * line size is 64B so need to prefetch twice to make it 128B. But in
1968         * actual we can have greater size of caches with 128B Level 1 cache
1969         * lines. In such a case, single fetch would suffice to cache in the
1970         * relevant part of the header.
1971         */
1972        prefetch(va);
1973#if L1_CACHE_BYTES < 128
1974        prefetch(va + L1_CACHE_BYTES);
1975#endif
1976
1977        skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1978                                        HNS3_RX_HEAD_SIZE);
1979        if (unlikely(!skb)) {
1980                netdev_err(netdev, "alloc rx skb fail\n");
1981
1982                u64_stats_update_begin(&ring->syncp);
1983                ring->stats.sw_err_cnt++;
1984                u64_stats_update_end(&ring->syncp);
1985
1986                return -ENOMEM;
1987        }
1988
1989        prefetchw(skb->data);
1990
1991        bnum = 1;
1992        if (length <= HNS3_RX_HEAD_SIZE) {
1993                memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
1994
1995                /* We can reuse buffer as-is, just make sure it is local */
1996                if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
1997                        desc_cb->reuse_flag = 1;
1998                else /* This page cannot be reused so discard it */
1999                        put_page(desc_cb->priv);
2000
2001                ring_ptr_move_fw(ring, next_to_clean);
2002        } else {
2003                u64_stats_update_begin(&ring->syncp);
2004                ring->stats.seg_pkt_cnt++;
2005                u64_stats_update_end(&ring->syncp);
2006
2007                pull_len = hns3_nic_get_headlen(va, l234info,
2008                                                HNS3_RX_HEAD_SIZE);
2009                memcpy(__skb_put(skb, pull_len), va,
2010                       ALIGN(pull_len, sizeof(long)));
2011
2012                hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2013                ring_ptr_move_fw(ring, next_to_clean);
2014
2015                while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2016                        desc = &ring->desc[ring->next_to_clean];
2017                        desc_cb = &ring->desc_cb[ring->next_to_clean];
2018                        bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2019                        hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2020                        ring_ptr_move_fw(ring, next_to_clean);
2021                        bnum++;
2022                }
2023        }
2024
2025        *out_bnum = bnum;
2026
2027        if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2028                netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2029                           ((u64 *)desc)[0], ((u64 *)desc)[1]);
2030                u64_stats_update_begin(&ring->syncp);
2031                ring->stats.non_vld_descs++;
2032                u64_stats_update_end(&ring->syncp);
2033
2034                dev_kfree_skb_any(skb);
2035                return -EINVAL;
2036        }
2037
2038        if (unlikely((!desc->rx.pkt_len) ||
2039                     hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2040                netdev_err(netdev, "truncated pkt\n");
2041                u64_stats_update_begin(&ring->syncp);
2042                ring->stats.err_pkt_len++;
2043                u64_stats_update_end(&ring->syncp);
2044
2045                dev_kfree_skb_any(skb);
2046                return -EFAULT;
2047        }
2048
2049        if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2050                netdev_err(netdev, "L2 error pkt\n");
2051                u64_stats_update_begin(&ring->syncp);
2052                ring->stats.l2_err++;
2053                u64_stats_update_end(&ring->syncp);
2054
2055                dev_kfree_skb_any(skb);
2056                return -EFAULT;
2057        }
2058
2059        u64_stats_update_begin(&ring->syncp);
2060        ring->stats.rx_pkts++;
2061        ring->stats.rx_bytes += skb->len;
2062        u64_stats_update_end(&ring->syncp);
2063
2064        ring->tqp_vector->rx_group.total_bytes += skb->len;
2065
2066        hns3_rx_checksum(ring, skb, desc);
2067        return 0;
2068}
2069
2070static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
2071{
2072#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2073        struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2074        int recv_pkts, recv_bds, clean_count, err;
2075        int unused_count = hns3_desc_unused(ring);
2076        struct sk_buff *skb = NULL;
2077        int num, bnum = 0;
2078
2079        num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2080        rmb(); /* Make sure num taken effect before the other data is touched */
2081
2082        recv_pkts = 0, recv_bds = 0, clean_count = 0;
2083        num -= unused_count;
2084
2085        while (recv_pkts < budget && recv_bds < num) {
2086                /* Reuse or realloc buffers */
2087                if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2088                        hns3_nic_alloc_rx_buffers(ring,
2089                                                  clean_count + unused_count);
2090                        clean_count = 0;
2091                        unused_count = hns3_desc_unused(ring);
2092                }
2093
2094                /* Poll one pkt */
2095                err = hns3_handle_rx_bd(ring, &skb, &bnum);
2096                if (unlikely(!skb)) /* This fault cannot be repaired */
2097                        goto out;
2098
2099                recv_bds += bnum;
2100                clean_count += bnum;
2101                if (unlikely(err)) {  /* Do jump the err */
2102                        recv_pkts++;
2103                        continue;
2104                }
2105
2106                /* Do update ip stack process */
2107                skb->protocol = eth_type_trans(skb, netdev);
2108                (void)napi_gro_receive(&ring->tqp_vector->napi, skb);
2109
2110                recv_pkts++;
2111        }
2112
2113out:
2114        /* Make all data has been write before submit */
2115        if (clean_count + unused_count > 0)
2116                hns3_nic_alloc_rx_buffers(ring,
2117                                          clean_count + unused_count);
2118
2119        return recv_pkts;
2120}
2121
2122static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2123{
2124#define HNS3_RX_ULTRA_PACKET_RATE 40000
2125        enum hns3_flow_level_range new_flow_level;
2126        struct hns3_enet_tqp_vector *tqp_vector;
2127        int packets_per_secs;
2128        int bytes_per_usecs;
2129        u16 new_int_gl;
2130        int usecs;
2131
2132        if (!ring_group->int_gl)
2133                return false;
2134
2135        if (ring_group->total_packets == 0) {
2136                ring_group->int_gl = HNS3_INT_GL_50K;
2137                ring_group->flow_level = HNS3_FLOW_LOW;
2138                return true;
2139        }
2140
2141        /* Simple throttlerate management
2142         * 0-10MB/s   lower     (50000 ints/s)
2143         * 10-20MB/s   middle    (20000 ints/s)
2144         * 20-1249MB/s high      (18000 ints/s)
2145         * > 40000pps  ultra     (8000 ints/s)
2146         */
2147        new_flow_level = ring_group->flow_level;
2148        new_int_gl = ring_group->int_gl;
2149        tqp_vector = ring_group->ring->tqp_vector;
2150        usecs = (ring_group->int_gl << 1);
2151        bytes_per_usecs = ring_group->total_bytes / usecs;
2152        /* 1000000 microseconds */
2153        packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2154
2155        switch (new_flow_level) {
2156        case HNS3_FLOW_LOW:
2157                if (bytes_per_usecs > 10)
2158                        new_flow_level = HNS3_FLOW_MID;
2159                break;
2160        case HNS3_FLOW_MID:
2161                if (bytes_per_usecs > 20)
2162                        new_flow_level = HNS3_FLOW_HIGH;
2163                else if (bytes_per_usecs <= 10)
2164                        new_flow_level = HNS3_FLOW_LOW;
2165                break;
2166        case HNS3_FLOW_HIGH:
2167        case HNS3_FLOW_ULTRA:
2168        default:
2169                if (bytes_per_usecs <= 20)
2170                        new_flow_level = HNS3_FLOW_MID;
2171                break;
2172        }
2173
2174        if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2175            (&tqp_vector->rx_group == ring_group))
2176                new_flow_level = HNS3_FLOW_ULTRA;
2177
2178        switch (new_flow_level) {
2179        case HNS3_FLOW_LOW:
2180                new_int_gl = HNS3_INT_GL_50K;
2181                break;
2182        case HNS3_FLOW_MID:
2183                new_int_gl = HNS3_INT_GL_20K;
2184                break;
2185        case HNS3_FLOW_HIGH:
2186                new_int_gl = HNS3_INT_GL_18K;
2187                break;
2188        case HNS3_FLOW_ULTRA:
2189                new_int_gl = HNS3_INT_GL_8K;
2190                break;
2191        default:
2192                break;
2193        }
2194
2195        ring_group->total_bytes = 0;
2196        ring_group->total_packets = 0;
2197        ring_group->flow_level = new_flow_level;
2198        if (new_int_gl != ring_group->int_gl) {
2199                ring_group->int_gl = new_int_gl;
2200                return true;
2201        }
2202        return false;
2203}
2204
2205static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2206{
2207        u16 rx_int_gl, tx_int_gl;
2208        bool rx, tx;
2209
2210        rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2211        tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2212        rx_int_gl = tqp_vector->rx_group.int_gl;
2213        tx_int_gl = tqp_vector->tx_group.int_gl;
2214        if (rx && tx) {
2215                if (rx_int_gl > tx_int_gl) {
2216                        tqp_vector->tx_group.int_gl = rx_int_gl;
2217                        tqp_vector->tx_group.flow_level =
2218                                tqp_vector->rx_group.flow_level;
2219                        hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2220                } else {
2221                        tqp_vector->rx_group.int_gl = tx_int_gl;
2222                        tqp_vector->rx_group.flow_level =
2223                                tqp_vector->tx_group.flow_level;
2224                        hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2225                }
2226        }
2227}
2228
2229static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2230{
2231        struct hns3_enet_ring *ring;
2232        int rx_pkt_total = 0;
2233
2234        struct hns3_enet_tqp_vector *tqp_vector =
2235                container_of(napi, struct hns3_enet_tqp_vector, napi);
2236        bool clean_complete = true;
2237        int rx_budget;
2238
2239        /* Since the actual Tx work is minimal, we can give the Tx a larger
2240         * budget and be more aggressive about cleaning up the Tx descriptors.
2241         */
2242        hns3_for_each_ring(ring, tqp_vector->tx_group) {
2243                if (!hns3_clean_tx_ring(ring, budget))
2244                        clean_complete = false;
2245        }
2246
2247        /* make sure rx ring budget not smaller than 1 */
2248        rx_budget = max(budget / tqp_vector->num_tqps, 1);
2249
2250        hns3_for_each_ring(ring, tqp_vector->rx_group) {
2251                int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget);
2252
2253                if (rx_cleaned >= rx_budget)
2254                        clean_complete = false;
2255
2256                rx_pkt_total += rx_cleaned;
2257        }
2258
2259        tqp_vector->rx_group.total_packets += rx_pkt_total;
2260
2261        if (!clean_complete)
2262                return budget;
2263
2264        napi_complete(napi);
2265        hns3_update_new_int_gl(tqp_vector);
2266        hns3_mask_vector_irq(tqp_vector, 1);
2267
2268        return rx_pkt_total;
2269}
2270
2271static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2272                                      struct hnae3_ring_chain_node *head)
2273{
2274        struct pci_dev *pdev = tqp_vector->handle->pdev;
2275        struct hnae3_ring_chain_node *cur_chain = head;
2276        struct hnae3_ring_chain_node *chain;
2277        struct hns3_enet_ring *tx_ring;
2278        struct hns3_enet_ring *rx_ring;
2279
2280        tx_ring = tqp_vector->tx_group.ring;
2281        if (tx_ring) {
2282                cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2283                hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2284                             HNAE3_RING_TYPE_TX);
2285
2286                cur_chain->next = NULL;
2287
2288                while (tx_ring->next) {
2289                        tx_ring = tx_ring->next;
2290
2291                        chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2292                                             GFP_KERNEL);
2293                        if (!chain)
2294                                return -ENOMEM;
2295
2296                        cur_chain->next = chain;
2297                        chain->tqp_index = tx_ring->tqp->tqp_index;
2298                        hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2299                                     HNAE3_RING_TYPE_TX);
2300
2301                        cur_chain = chain;
2302                }
2303        }
2304
2305        rx_ring = tqp_vector->rx_group.ring;
2306        if (!tx_ring && rx_ring) {
2307                cur_chain->next = NULL;
2308                cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2309                hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2310                             HNAE3_RING_TYPE_RX);
2311
2312                rx_ring = rx_ring->next;
2313        }
2314
2315        while (rx_ring) {
2316                chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2317                if (!chain)
2318                        return -ENOMEM;
2319
2320                cur_chain->next = chain;
2321                chain->tqp_index = rx_ring->tqp->tqp_index;
2322                hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2323                             HNAE3_RING_TYPE_RX);
2324                cur_chain = chain;
2325
2326                rx_ring = rx_ring->next;
2327        }
2328
2329        return 0;
2330}
2331
2332static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2333                                        struct hnae3_ring_chain_node *head)
2334{
2335        struct pci_dev *pdev = tqp_vector->handle->pdev;
2336        struct hnae3_ring_chain_node *chain_tmp, *chain;
2337
2338        chain = head->next;
2339
2340        while (chain) {
2341                chain_tmp = chain->next;
2342                devm_kfree(&pdev->dev, chain);
2343                chain = chain_tmp;
2344        }
2345}
2346
2347static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2348                                   struct hns3_enet_ring *ring)
2349{
2350        ring->next = group->ring;
2351        group->ring = ring;
2352
2353        group->count++;
2354}
2355
2356static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2357{
2358        struct hnae3_ring_chain_node vector_ring_chain;
2359        struct hnae3_handle *h = priv->ae_handle;
2360        struct hns3_enet_tqp_vector *tqp_vector;
2361        struct hnae3_vector_info *vector;
2362        struct pci_dev *pdev = h->pdev;
2363        u16 tqp_num = h->kinfo.num_tqps;
2364        u16 vector_num;
2365        int ret = 0;
2366        u16 i;
2367
2368        /* RSS size, cpu online and vector_num should be the same */
2369        /* Should consider 2p/4p later */
2370        vector_num = min_t(u16, num_online_cpus(), tqp_num);
2371        vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2372                              GFP_KERNEL);
2373        if (!vector)
2374                return -ENOMEM;
2375
2376        vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2377
2378        priv->vector_num = vector_num;
2379        priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2380                devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2381                             GFP_KERNEL);
2382        if (!priv->tqp_vector)
2383                return -ENOMEM;
2384
2385        for (i = 0; i < tqp_num; i++) {
2386                u16 vector_i = i % vector_num;
2387
2388                tqp_vector = &priv->tqp_vector[vector_i];
2389
2390                hns3_add_ring_to_group(&tqp_vector->tx_group,
2391                                       priv->ring_data[i].ring);
2392
2393                hns3_add_ring_to_group(&tqp_vector->rx_group,
2394                                       priv->ring_data[i + tqp_num].ring);
2395
2396                tqp_vector->idx = vector_i;
2397                tqp_vector->mask_addr = vector[vector_i].io_addr;
2398                tqp_vector->vector_irq = vector[vector_i].vector;
2399                tqp_vector->num_tqps++;
2400
2401                priv->ring_data[i].ring->tqp_vector = tqp_vector;
2402                priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2403        }
2404
2405        for (i = 0; i < vector_num; i++) {
2406                tqp_vector = &priv->tqp_vector[i];
2407
2408                tqp_vector->rx_group.total_bytes = 0;
2409                tqp_vector->rx_group.total_packets = 0;
2410                tqp_vector->tx_group.total_bytes = 0;
2411                tqp_vector->tx_group.total_packets = 0;
2412                hns3_vector_gl_rl_init(tqp_vector);
2413                tqp_vector->handle = h;
2414
2415                ret = hns3_get_vector_ring_chain(tqp_vector,
2416                                                 &vector_ring_chain);
2417                if (ret)
2418                        goto out;
2419
2420                ret = h->ae_algo->ops->map_ring_to_vector(h,
2421                        tqp_vector->vector_irq, &vector_ring_chain);
2422                if (ret)
2423                        goto out;
2424
2425                hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2426
2427                netif_napi_add(priv->netdev, &tqp_vector->napi,
2428                               hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2429        }
2430
2431out:
2432        devm_kfree(&pdev->dev, vector);
2433        return ret;
2434}
2435
2436static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2437{
2438        struct hnae3_ring_chain_node vector_ring_chain;
2439        struct hnae3_handle *h = priv->ae_handle;
2440        struct hns3_enet_tqp_vector *tqp_vector;
2441        struct pci_dev *pdev = h->pdev;
2442        int i, ret;
2443
2444        for (i = 0; i < priv->vector_num; i++) {
2445                tqp_vector = &priv->tqp_vector[i];
2446
2447                ret = hns3_get_vector_ring_chain(tqp_vector,
2448                                                 &vector_ring_chain);
2449                if (ret)
2450                        return ret;
2451
2452                ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2453                        tqp_vector->vector_irq, &vector_ring_chain);
2454                if (ret)
2455                        return ret;
2456
2457                hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2458
2459                if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2460                        (void)irq_set_affinity_hint(
2461                                priv->tqp_vector[i].vector_irq,
2462                                                    NULL);
2463                        devm_free_irq(&pdev->dev,
2464                                      priv->tqp_vector[i].vector_irq,
2465                                      &priv->tqp_vector[i]);
2466                }
2467
2468                priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2469
2470                netif_napi_del(&priv->tqp_vector[i].napi);
2471        }
2472
2473        devm_kfree(&pdev->dev, priv->tqp_vector);
2474
2475        return 0;
2476}
2477
2478static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2479                             int ring_type)
2480{
2481        struct hns3_nic_ring_data *ring_data = priv->ring_data;
2482        int queue_num = priv->ae_handle->kinfo.num_tqps;
2483        struct pci_dev *pdev = priv->ae_handle->pdev;
2484        struct hns3_enet_ring *ring;
2485
2486        ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2487        if (!ring)
2488                return -ENOMEM;
2489
2490        if (ring_type == HNAE3_RING_TYPE_TX) {
2491                ring_data[q->tqp_index].ring = ring;
2492                ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2493        } else {
2494                ring_data[q->tqp_index + queue_num].ring = ring;
2495                ring->io_base = q->io_base;
2496        }
2497
2498        hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2499
2500        ring_data[q->tqp_index].queue_index = q->tqp_index;
2501
2502        ring->tqp = q;
2503        ring->desc = NULL;
2504        ring->desc_cb = NULL;
2505        ring->dev = priv->dev;
2506        ring->desc_dma_addr = 0;
2507        ring->buf_size = q->buf_size;
2508        ring->desc_num = q->desc_num;
2509        ring->next_to_use = 0;
2510        ring->next_to_clean = 0;
2511
2512        return 0;
2513}
2514
2515static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2516                              struct hns3_nic_priv *priv)
2517{
2518        int ret;
2519
2520        ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2521        if (ret)
2522                return ret;
2523
2524        ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2525        if (ret)
2526                return ret;
2527
2528        return 0;
2529}
2530
2531static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2532{
2533        struct hnae3_handle *h = priv->ae_handle;
2534        struct pci_dev *pdev = h->pdev;
2535        int i, ret;
2536
2537        priv->ring_data =  devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2538                                        sizeof(*priv->ring_data) * 2,
2539                                        GFP_KERNEL);
2540        if (!priv->ring_data)
2541                return -ENOMEM;
2542
2543        for (i = 0; i < h->kinfo.num_tqps; i++) {
2544                ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2545                if (ret)
2546                        goto err;
2547        }
2548
2549        return 0;
2550err:
2551        devm_kfree(&pdev->dev, priv->ring_data);
2552        return ret;
2553}
2554
2555static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2556{
2557        int ret;
2558
2559        if (ring->desc_num <= 0 || ring->buf_size <= 0)
2560                return -EINVAL;
2561
2562        ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2563                                GFP_KERNEL);
2564        if (!ring->desc_cb) {
2565                ret = -ENOMEM;
2566                goto out;
2567        }
2568
2569        ret = hns3_alloc_desc(ring);
2570        if (ret)
2571                goto out_with_desc_cb;
2572
2573        if (!HNAE3_IS_TX_RING(ring)) {
2574                ret = hns3_alloc_ring_buffers(ring);
2575                if (ret)
2576                        goto out_with_desc;
2577        }
2578
2579        return 0;
2580
2581out_with_desc:
2582        hns3_free_desc(ring);
2583out_with_desc_cb:
2584        kfree(ring->desc_cb);
2585        ring->desc_cb = NULL;
2586out:
2587        return ret;
2588}
2589
2590static void hns3_fini_ring(struct hns3_enet_ring *ring)
2591{
2592        hns3_free_desc(ring);
2593        kfree(ring->desc_cb);
2594        ring->desc_cb = NULL;
2595        ring->next_to_clean = 0;
2596        ring->next_to_use = 0;
2597}
2598
2599int hns3_buf_size2type(u32 buf_size)
2600{
2601        int bd_size_type;
2602
2603        switch (buf_size) {
2604        case 512:
2605                bd_size_type = HNS3_BD_SIZE_512_TYPE;
2606                break;
2607        case 1024:
2608                bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2609                break;
2610        case 2048:
2611                bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2612                break;
2613        case 4096:
2614                bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2615                break;
2616        default:
2617                bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2618        }
2619
2620        return bd_size_type;
2621}
2622
2623static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2624{
2625        dma_addr_t dma = ring->desc_dma_addr;
2626        struct hnae3_queue *q = ring->tqp;
2627
2628        if (!HNAE3_IS_TX_RING(ring)) {
2629                hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2630                               (u32)dma);
2631                hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2632                               (u32)((dma >> 31) >> 1));
2633
2634                hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2635                               hns3_buf_size2type(ring->buf_size));
2636                hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2637                               ring->desc_num / 8 - 1);
2638
2639        } else {
2640                hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2641                               (u32)dma);
2642                hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2643                               (u32)((dma >> 31) >> 1));
2644
2645                hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2646                               hns3_buf_size2type(ring->buf_size));
2647                hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2648                               ring->desc_num / 8 - 1);
2649        }
2650}
2651
2652static int hns3_init_all_ring(struct hns3_nic_priv *priv)
2653{
2654        struct hnae3_handle *h = priv->ae_handle;
2655        int ring_num = h->kinfo.num_tqps * 2;
2656        int i, j;
2657        int ret;
2658
2659        for (i = 0; i < ring_num; i++) {
2660                ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2661                if (ret) {
2662                        dev_err(priv->dev,
2663                                "Alloc ring memory fail! ret=%d\n", ret);
2664                        goto out_when_alloc_ring_memory;
2665                }
2666
2667                hns3_init_ring_hw(priv->ring_data[i].ring);
2668
2669                u64_stats_init(&priv->ring_data[i].ring->syncp);
2670        }
2671
2672        return 0;
2673
2674out_when_alloc_ring_memory:
2675        for (j = i - 1; j >= 0; j--)
2676                hns3_fini_ring(priv->ring_data[i].ring);
2677
2678        return -ENOMEM;
2679}
2680
2681static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2682{
2683        struct hnae3_handle *h = priv->ae_handle;
2684        int i;
2685
2686        for (i = 0; i < h->kinfo.num_tqps; i++) {
2687                if (h->ae_algo->ops->reset_queue)
2688                        h->ae_algo->ops->reset_queue(h, i);
2689
2690                hns3_fini_ring(priv->ring_data[i].ring);
2691                hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2692        }
2693
2694        return 0;
2695}
2696
2697/* Set mac addr if it is configured. or leave it to the AE driver */
2698static void hns3_init_mac_addr(struct net_device *netdev)
2699{
2700        struct hns3_nic_priv *priv = netdev_priv(netdev);
2701        struct hnae3_handle *h = priv->ae_handle;
2702        u8 mac_addr_temp[ETH_ALEN];
2703
2704        if (h->ae_algo->ops->get_mac_addr) {
2705                h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2706                ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2707        }
2708
2709        /* Check if the MAC address is valid, if not get a random one */
2710        if (!is_valid_ether_addr(netdev->dev_addr)) {
2711                eth_hw_addr_random(netdev);
2712                dev_warn(priv->dev, "using random MAC address %pM\n",
2713                         netdev->dev_addr);
2714        }
2715
2716        if (h->ae_algo->ops->set_mac_addr)
2717                h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2718
2719}
2720
2721static void hns3_nic_set_priv_ops(struct net_device *netdev)
2722{
2723        struct hns3_nic_priv *priv = netdev_priv(netdev);
2724
2725        if ((netdev->features & NETIF_F_TSO) ||
2726            (netdev->features & NETIF_F_TSO6)) {
2727                priv->ops.fill_desc = hns3_fill_desc_tso;
2728                priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2729        } else {
2730                priv->ops.fill_desc = hns3_fill_desc;
2731                priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2732        }
2733}
2734
2735static int hns3_client_init(struct hnae3_handle *handle)
2736{
2737        struct pci_dev *pdev = handle->pdev;
2738        struct hns3_nic_priv *priv;
2739        struct net_device *netdev;
2740        int ret;
2741
2742        netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2743                                   handle->kinfo.num_tqps);
2744        if (!netdev)
2745                return -ENOMEM;
2746
2747        priv = netdev_priv(netdev);
2748        priv->dev = &pdev->dev;
2749        priv->netdev = netdev;
2750        priv->ae_handle = handle;
2751
2752        handle->kinfo.netdev = netdev;
2753        handle->priv = (void *)priv;
2754
2755        hns3_init_mac_addr(netdev);
2756
2757        hns3_set_default_feature(netdev);
2758
2759        netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2760        netdev->priv_flags |= IFF_UNICAST_FLT;
2761        netdev->netdev_ops = &hns3_nic_netdev_ops;
2762        SET_NETDEV_DEV(netdev, &pdev->dev);
2763        hns3_ethtool_set_ops(netdev);
2764        hns3_nic_set_priv_ops(netdev);
2765
2766        /* Carrier off reporting is important to ethtool even BEFORE open */
2767        netif_carrier_off(netdev);
2768
2769        ret = hns3_get_ring_config(priv);
2770        if (ret) {
2771                ret = -ENOMEM;
2772                goto out_get_ring_cfg;
2773        }
2774
2775        ret = hns3_nic_init_vector_data(priv);
2776        if (ret) {
2777                ret = -ENOMEM;
2778                goto out_init_vector_data;
2779        }
2780
2781        ret = hns3_init_all_ring(priv);
2782        if (ret) {
2783                ret = -ENOMEM;
2784                goto out_init_ring_data;
2785        }
2786
2787        ret = register_netdev(netdev);
2788        if (ret) {
2789                dev_err(priv->dev, "probe register netdev fail!\n");
2790                goto out_reg_netdev_fail;
2791        }
2792
2793        /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
2794        netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2795
2796        return ret;
2797
2798out_reg_netdev_fail:
2799out_init_ring_data:
2800        (void)hns3_nic_uninit_vector_data(priv);
2801        priv->ring_data = NULL;
2802out_init_vector_data:
2803out_get_ring_cfg:
2804        priv->ae_handle = NULL;
2805        free_netdev(netdev);
2806        return ret;
2807}
2808
2809static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2810{
2811        struct net_device *netdev = handle->kinfo.netdev;
2812        struct hns3_nic_priv *priv = netdev_priv(netdev);
2813        int ret;
2814
2815        if (netdev->reg_state != NETREG_UNINITIALIZED)
2816                unregister_netdev(netdev);
2817
2818        ret = hns3_nic_uninit_vector_data(priv);
2819        if (ret)
2820                netdev_err(netdev, "uninit vector error\n");
2821
2822        ret = hns3_uninit_all_ring(priv);
2823        if (ret)
2824                netdev_err(netdev, "uninit ring error\n");
2825
2826        priv->ring_data = NULL;
2827
2828        free_netdev(netdev);
2829}
2830
2831static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2832{
2833        struct net_device *netdev = handle->kinfo.netdev;
2834
2835        if (!netdev)
2836                return;
2837
2838        if (linkup) {
2839                netif_carrier_on(netdev);
2840                netif_tx_wake_all_queues(netdev);
2841                netdev_info(netdev, "link up\n");
2842        } else {
2843                netif_carrier_off(netdev);
2844                netif_tx_stop_all_queues(netdev);
2845                netdev_info(netdev, "link down\n");
2846        }
2847}
2848
2849const struct hnae3_client_ops client_ops = {
2850        .init_instance = hns3_client_init,
2851        .uninit_instance = hns3_client_uninit,
2852        .link_status_change = hns3_link_status_change,
2853};
2854
2855/* hns3_init_module - Driver registration routine
2856 * hns3_init_module is the first routine called when the driver is
2857 * loaded. All it does is register with the PCI subsystem.
2858 */
2859static int __init hns3_init_module(void)
2860{
2861        int ret;
2862
2863        pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2864        pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2865
2866        client.type = HNAE3_CLIENT_KNIC;
2867        snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2868                 hns3_driver_name);
2869
2870        client.ops = &client_ops;
2871
2872        ret = hnae3_register_client(&client);
2873        if (ret)
2874                return ret;
2875
2876        ret = pci_register_driver(&hns3_driver);
2877        if (ret)
2878                hnae3_unregister_client(&client);
2879
2880        return ret;
2881}
2882module_init(hns3_init_module);
2883
2884/* hns3_exit_module - Driver exit cleanup routine
2885 * hns3_exit_module is called just before the driver is removed
2886 * from memory.
2887 */
2888static void __exit hns3_exit_module(void)
2889{
2890        pci_unregister_driver(&hns3_driver);
2891        hnae3_unregister_client(&client);
2892}
2893module_exit(hns3_exit_module);
2894
2895MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2896MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2897MODULE_LICENSE("GPL");
2898MODULE_ALIAS("pci:hns-nic");
2899