linux/drivers/net/ethernet/hisilicon/hns/hns_enet.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014-2015 Hisilicon Limited.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 */
   9
  10#include <linux/clk.h>
  11#include <linux/cpumask.h>
  12#include <linux/etherdevice.h>
  13#include <linux/if_vlan.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/ip.h>
  17#include <linux/ipv6.h>
  18#include <linux/module.h>
  19#include <linux/phy.h>
  20#include <linux/platform_device.h>
  21#include <linux/skbuff.h>
  22
  23#include "hnae.h"
  24#include "hns_enet.h"
  25
  26#define NIC_MAX_Q_PER_VF 16
  27#define HNS_NIC_TX_TIMEOUT (5 * HZ)
  28
  29#define SERVICE_TIMER_HZ (1 * HZ)
  30
  31#define NIC_TX_CLEAN_MAX_NUM 256
  32#define NIC_RX_CLEAN_MAX_NUM 64
  33
  34#define RCB_IRQ_NOT_INITED 0
  35#define RCB_IRQ_INITED 1
  36#define HNS_BUFFER_SIZE_2048 2048
  37
  38#define BD_MAX_SEND_SIZE 8191
  39#define SKB_TMP_LEN(SKB) \
  40        (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
  41
  42static void fill_v2_desc(struct hnae_ring *ring, void *priv,
  43                         int size, dma_addr_t dma, int frag_end,
  44                         int buf_num, enum hns_desc_type type, int mtu)
  45{
  46        struct hnae_desc *desc = &ring->desc[ring->next_to_use];
  47        struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
  48        struct iphdr *iphdr;
  49        struct ipv6hdr *ipv6hdr;
  50        struct sk_buff *skb;
  51        __be16 protocol;
  52        u8 bn_pid = 0;
  53        u8 rrcfv = 0;
  54        u8 ip_offset = 0;
  55        u8 tvsvsn = 0;
  56        u16 mss = 0;
  57        u8 l4_len = 0;
  58        u16 paylen = 0;
  59
  60        desc_cb->priv = priv;
  61        desc_cb->length = size;
  62        desc_cb->dma = dma;
  63        desc_cb->type = type;
  64
  65        desc->addr = cpu_to_le64(dma);
  66        desc->tx.send_size = cpu_to_le16((u16)size);
  67
  68        /* config bd buffer end */
  69        hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
  70        hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
  71
  72        /* fill port_id in the tx bd for sending management pkts */
  73        hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
  74                       HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
  75
  76        if (type == DESC_TYPE_SKB) {
  77                skb = (struct sk_buff *)priv;
  78
  79                if (skb->ip_summed == CHECKSUM_PARTIAL) {
  80                        skb_reset_mac_len(skb);
  81                        protocol = skb->protocol;
  82                        ip_offset = ETH_HLEN;
  83
  84                        if (protocol == htons(ETH_P_8021Q)) {
  85                                ip_offset += VLAN_HLEN;
  86                                protocol = vlan_get_protocol(skb);
  87                                skb->protocol = protocol;
  88                        }
  89
  90                        if (skb->protocol == htons(ETH_P_IP)) {
  91                                iphdr = ip_hdr(skb);
  92                                hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
  93                                hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
  94
  95                                /* check for tcp/udp header */
  96                                if (iphdr->protocol == IPPROTO_TCP &&
  97                                    skb_is_gso(skb)) {
  98                                        hnae_set_bit(tvsvsn,
  99                                                     HNSV2_TXD_TSE_B, 1);
 100                                        l4_len = tcp_hdrlen(skb);
 101                                        mss = skb_shinfo(skb)->gso_size;
 102                                        paylen = skb->len - SKB_TMP_LEN(skb);
 103                                }
 104                        } else if (skb->protocol == htons(ETH_P_IPV6)) {
 105                                hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
 106                                ipv6hdr = ipv6_hdr(skb);
 107                                hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
 108
 109                                /* check for tcp/udp header */
 110                                if (ipv6hdr->nexthdr == IPPROTO_TCP &&
 111                                    skb_is_gso(skb) && skb_is_gso_v6(skb)) {
 112                                        hnae_set_bit(tvsvsn,
 113                                                     HNSV2_TXD_TSE_B, 1);
 114                                        l4_len = tcp_hdrlen(skb);
 115                                        mss = skb_shinfo(skb)->gso_size;
 116                                        paylen = skb->len - SKB_TMP_LEN(skb);
 117                                }
 118                        }
 119                        desc->tx.ip_offset = ip_offset;
 120                        desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
 121                        desc->tx.mss = cpu_to_le16(mss);
 122                        desc->tx.l4_len = l4_len;
 123                        desc->tx.paylen = cpu_to_le16(paylen);
 124                }
 125        }
 126
 127        hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
 128
 129        desc->tx.bn_pid = bn_pid;
 130        desc->tx.ra_ri_cs_fe_vld = rrcfv;
 131
 132        ring_ptr_move_fw(ring, next_to_use);
 133}
 134
 135static void fill_desc(struct hnae_ring *ring, void *priv,
 136                      int size, dma_addr_t dma, int frag_end,
 137                      int buf_num, enum hns_desc_type type, int mtu)
 138{
 139        struct hnae_desc *desc = &ring->desc[ring->next_to_use];
 140        struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
 141        struct sk_buff *skb;
 142        __be16 protocol;
 143        u32 ip_offset;
 144        u32 asid_bufnum_pid = 0;
 145        u32 flag_ipoffset = 0;
 146
 147        desc_cb->priv = priv;
 148        desc_cb->length = size;
 149        desc_cb->dma = dma;
 150        desc_cb->type = type;
 151
 152        desc->addr = cpu_to_le64(dma);
 153        desc->tx.send_size = cpu_to_le16((u16)size);
 154
 155        /*config bd buffer end */
 156        flag_ipoffset |= 1 << HNS_TXD_VLD_B;
 157
 158        asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
 159
 160        if (type == DESC_TYPE_SKB) {
 161                skb = (struct sk_buff *)priv;
 162
 163                if (skb->ip_summed == CHECKSUM_PARTIAL) {
 164                        protocol = skb->protocol;
 165                        ip_offset = ETH_HLEN;
 166
 167                        /*if it is a SW VLAN check the next protocol*/
 168                        if (protocol == htons(ETH_P_8021Q)) {
 169                                ip_offset += VLAN_HLEN;
 170                                protocol = vlan_get_protocol(skb);
 171                                skb->protocol = protocol;
 172                        }
 173
 174                        if (skb->protocol == htons(ETH_P_IP)) {
 175                                flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
 176                                /* check for tcp/udp header */
 177                                flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
 178
 179                        } else if (skb->protocol == htons(ETH_P_IPV6)) {
 180                                /* ipv6 has not l3 cs, check for L4 header */
 181                                flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
 182                        }
 183
 184                        flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
 185                }
 186        }
 187
 188        flag_ipoffset |= frag_end << HNS_TXD_FE_B;
 189
 190        desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
 191        desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
 192
 193        ring_ptr_move_fw(ring, next_to_use);
 194}
 195
 196static void unfill_desc(struct hnae_ring *ring)
 197{
 198        ring_ptr_move_bw(ring, next_to_use);
 199}
 200
 201static int hns_nic_maybe_stop_tx(
 202        struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
 203{
 204        struct sk_buff *skb = *out_skb;
 205        struct sk_buff *new_skb = NULL;
 206        int buf_num;
 207
 208        /* no. of segments (plus a header) */
 209        buf_num = skb_shinfo(skb)->nr_frags + 1;
 210
 211        if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
 212                if (ring_space(ring) < 1)
 213                        return -EBUSY;
 214
 215                new_skb = skb_copy(skb, GFP_ATOMIC);
 216                if (!new_skb)
 217                        return -ENOMEM;
 218
 219                dev_kfree_skb_any(skb);
 220                *out_skb = new_skb;
 221                buf_num = 1;
 222        } else if (buf_num > ring_space(ring)) {
 223                return -EBUSY;
 224        }
 225
 226        *bnum = buf_num;
 227        return 0;
 228}
 229
 230static int hns_nic_maybe_stop_tso(
 231        struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
 232{
 233        int i;
 234        int size;
 235        int buf_num;
 236        int frag_num;
 237        struct sk_buff *skb = *out_skb;
 238        struct sk_buff *new_skb = NULL;
 239        struct skb_frag_struct *frag;
 240
 241        size = skb_headlen(skb);
 242        buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
 243
 244        frag_num = skb_shinfo(skb)->nr_frags;
 245        for (i = 0; i < frag_num; i++) {
 246                frag = &skb_shinfo(skb)->frags[i];
 247                size = skb_frag_size(frag);
 248                buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
 249        }
 250
 251        if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
 252                buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
 253                if (ring_space(ring) < buf_num)
 254                        return -EBUSY;
 255                /* manual split the send packet */
 256                new_skb = skb_copy(skb, GFP_ATOMIC);
 257                if (!new_skb)
 258                        return -ENOMEM;
 259                dev_kfree_skb_any(skb);
 260                *out_skb = new_skb;
 261
 262        } else if (ring_space(ring) < buf_num) {
 263                return -EBUSY;
 264        }
 265
 266        *bnum = buf_num;
 267        return 0;
 268}
 269
 270static void fill_tso_desc(struct hnae_ring *ring, void *priv,
 271                          int size, dma_addr_t dma, int frag_end,
 272                          int buf_num, enum hns_desc_type type, int mtu)
 273{
 274        int frag_buf_num;
 275        int sizeoflast;
 276        int k;
 277
 278        frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
 279        sizeoflast = size % BD_MAX_SEND_SIZE;
 280        sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
 281
 282        /* when the frag size is bigger than hardware, split this frag */
 283        for (k = 0; k < frag_buf_num; k++)
 284                fill_v2_desc(ring, priv,
 285                             (k == frag_buf_num - 1) ?
 286                                        sizeoflast : BD_MAX_SEND_SIZE,
 287                             dma + BD_MAX_SEND_SIZE * k,
 288                             frag_end && (k == frag_buf_num - 1) ? 1 : 0,
 289                             buf_num,
 290                             (type == DESC_TYPE_SKB && !k) ?
 291                                        DESC_TYPE_SKB : DESC_TYPE_PAGE,
 292                             mtu);
 293}
 294
 295int hns_nic_net_xmit_hw(struct net_device *ndev,
 296                        struct sk_buff *skb,
 297                        struct hns_nic_ring_data *ring_data)
 298{
 299        struct hns_nic_priv *priv = netdev_priv(ndev);
 300        struct device *dev = priv->dev;
 301        struct hnae_ring *ring = ring_data->ring;
 302        struct netdev_queue *dev_queue;
 303        struct skb_frag_struct *frag;
 304        int buf_num;
 305        int seg_num;
 306        dma_addr_t dma;
 307        int size, next_to_use;
 308        int i;
 309
 310        switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
 311        case -EBUSY:
 312                ring->stats.tx_busy++;
 313                goto out_net_tx_busy;
 314        case -ENOMEM:
 315                ring->stats.sw_err_cnt++;
 316                netdev_err(ndev, "no memory to xmit!\n");
 317                goto out_err_tx_ok;
 318        default:
 319                break;
 320        }
 321
 322        /* no. of segments (plus a header) */
 323        seg_num = skb_shinfo(skb)->nr_frags + 1;
 324        next_to_use = ring->next_to_use;
 325
 326        /* fill the first part */
 327        size = skb_headlen(skb);
 328        dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
 329        if (dma_mapping_error(dev, dma)) {
 330                netdev_err(ndev, "TX head DMA map failed\n");
 331                ring->stats.sw_err_cnt++;
 332                goto out_err_tx_ok;
 333        }
 334        priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
 335                            buf_num, DESC_TYPE_SKB, ndev->mtu);
 336
 337        /* fill the fragments */
 338        for (i = 1; i < seg_num; i++) {
 339                frag = &skb_shinfo(skb)->frags[i - 1];
 340                size = skb_frag_size(frag);
 341                dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
 342                if (dma_mapping_error(dev, dma)) {
 343                        netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
 344                        ring->stats.sw_err_cnt++;
 345                        goto out_map_frag_fail;
 346                }
 347                priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
 348                                    seg_num - 1 == i ? 1 : 0, buf_num,
 349                                    DESC_TYPE_PAGE, ndev->mtu);
 350        }
 351
 352        /*complete translate all packets*/
 353        dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
 354        netdev_tx_sent_queue(dev_queue, skb->len);
 355
 356        wmb(); /* commit all data before submit */
 357        assert(skb->queue_mapping < priv->ae_handle->q_num);
 358        hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
 359        ring->stats.tx_pkts++;
 360        ring->stats.tx_bytes += skb->len;
 361
 362        return NETDEV_TX_OK;
 363
 364out_map_frag_fail:
 365
 366        while (ring->next_to_use != next_to_use) {
 367                unfill_desc(ring);
 368                if (ring->next_to_use != next_to_use)
 369                        dma_unmap_page(dev,
 370                                       ring->desc_cb[ring->next_to_use].dma,
 371                                       ring->desc_cb[ring->next_to_use].length,
 372                                       DMA_TO_DEVICE);
 373                else
 374                        dma_unmap_single(dev,
 375                                         ring->desc_cb[next_to_use].dma,
 376                                         ring->desc_cb[next_to_use].length,
 377                                         DMA_TO_DEVICE);
 378        }
 379
 380out_err_tx_ok:
 381
 382        dev_kfree_skb_any(skb);
 383        return NETDEV_TX_OK;
 384
 385out_net_tx_busy:
 386
 387        netif_stop_subqueue(ndev, skb->queue_mapping);
 388
 389        /* Herbert's original patch had:
 390         *  smp_mb__after_netif_stop_queue();
 391         * but since that doesn't exist yet, just open code it.
 392         */
 393        smp_mb();
 394        return NETDEV_TX_BUSY;
 395}
 396
 397/**
 398 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
 399 * @data: pointer to the start of the headers
 400 * @max: total length of section to find headers in
 401 *
 402 * This function is meant to determine the length of headers that will
 403 * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
 404 * motivation of doing this is to only perform one pull for IPv4 TCP
 405 * packets so that we can do basic things like calculating the gso_size
 406 * based on the average data per packet.
 407 **/
 408static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
 409                                        unsigned int max_size)
 410{
 411        unsigned char *network;
 412        u8 hlen;
 413
 414        /* this should never happen, but better safe than sorry */
 415        if (max_size < ETH_HLEN)
 416                return max_size;
 417
 418        /* initialize network frame pointer */
 419        network = data;
 420
 421        /* set first protocol and move network header forward */
 422        network += ETH_HLEN;
 423
 424        /* handle any vlan tag if present */
 425        if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
 426                == HNS_RX_FLAG_VLAN_PRESENT) {
 427                if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
 428                        return max_size;
 429
 430                network += VLAN_HLEN;
 431        }
 432
 433        /* handle L3 protocols */
 434        if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
 435                == HNS_RX_FLAG_L3ID_IPV4) {
 436                if ((typeof(max_size))(network - data) >
 437                    (max_size - sizeof(struct iphdr)))
 438                        return max_size;
 439
 440                /* access ihl as a u8 to avoid unaligned access on ia64 */
 441                hlen = (network[0] & 0x0F) << 2;
 442
 443                /* verify hlen meets minimum size requirements */
 444                if (hlen < sizeof(struct iphdr))
 445                        return network - data;
 446
 447                /* record next protocol if header is present */
 448        } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
 449                == HNS_RX_FLAG_L3ID_IPV6) {
 450                if ((typeof(max_size))(network - data) >
 451                    (max_size - sizeof(struct ipv6hdr)))
 452                        return max_size;
 453
 454                /* record next protocol */
 455                hlen = sizeof(struct ipv6hdr);
 456        } else {
 457                return network - data;
 458        }
 459
 460        /* relocate pointer to start of L4 header */
 461        network += hlen;
 462
 463        /* finally sort out TCP/UDP */
 464        if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
 465                == HNS_RX_FLAG_L4ID_TCP) {
 466                if ((typeof(max_size))(network - data) >
 467                    (max_size - sizeof(struct tcphdr)))
 468                        return max_size;
 469
 470                /* access doff as a u8 to avoid unaligned access on ia64 */
 471                hlen = (network[12] & 0xF0) >> 2;
 472
 473                /* verify hlen meets minimum size requirements */
 474                if (hlen < sizeof(struct tcphdr))
 475                        return network - data;
 476
 477                network += hlen;
 478        } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
 479                == HNS_RX_FLAG_L4ID_UDP) {
 480                if ((typeof(max_size))(network - data) >
 481                    (max_size - sizeof(struct udphdr)))
 482                        return max_size;
 483
 484                network += sizeof(struct udphdr);
 485        }
 486
 487        /* If everything has gone correctly network should be the
 488         * data section of the packet and will be the end of the header.
 489         * If not then it probably represents the end of the last recognized
 490         * header.
 491         */
 492        if ((typeof(max_size))(network - data) < max_size)
 493                return network - data;
 494        else
 495                return max_size;
 496}
 497
 498static void hns_nic_reuse_page(struct sk_buff *skb, int i,
 499                               struct hnae_ring *ring, int pull_len,
 500                               struct hnae_desc_cb *desc_cb)
 501{
 502        struct hnae_desc *desc;
 503        int truesize, size;
 504        int last_offset;
 505        bool twobufs;
 506
 507        twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
 508
 509        desc = &ring->desc[ring->next_to_clean];
 510        size = le16_to_cpu(desc->rx.size);
 511
 512        if (twobufs) {
 513                truesize = hnae_buf_size(ring);
 514        } else {
 515                truesize = ALIGN(size, L1_CACHE_BYTES);
 516                last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
 517        }
 518
 519        skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
 520                        size - pull_len, truesize - pull_len);
 521
 522         /* avoid re-using remote pages,flag default unreuse */
 523        if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
 524                return;
 525
 526        if (twobufs) {
 527                /* if we are only owner of page we can reuse it */
 528                if (likely(page_count(desc_cb->priv) == 1)) {
 529                        /* flip page offset to other buffer */
 530                        desc_cb->page_offset ^= truesize;
 531
 532                        desc_cb->reuse_flag = 1;
 533                        /* bump ref count on page before it is given*/
 534                        get_page(desc_cb->priv);
 535                }
 536                return;
 537        }
 538
 539        /* move offset up to the next cache line */
 540        desc_cb->page_offset += truesize;
 541
 542        if (desc_cb->page_offset <= last_offset) {
 543                desc_cb->reuse_flag = 1;
 544                /* bump ref count on page before it is given*/
 545                get_page(desc_cb->priv);
 546        }
 547}
 548
 549static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
 550{
 551        *out_bnum = hnae_get_field(bnum_flag,
 552                                   HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
 553}
 554
 555static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
 556{
 557        *out_bnum = hnae_get_field(bnum_flag,
 558                                   HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
 559}
 560
 561static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
 562                               struct sk_buff **out_skb, int *out_bnum)
 563{
 564        struct hnae_ring *ring = ring_data->ring;
 565        struct net_device *ndev = ring_data->napi.dev;
 566        struct hns_nic_priv *priv = netdev_priv(ndev);
 567        struct sk_buff *skb;
 568        struct hnae_desc *desc;
 569        struct hnae_desc_cb *desc_cb;
 570        struct ethhdr *eh;
 571        unsigned char *va;
 572        int bnum, length, i;
 573        int pull_len;
 574        u32 bnum_flag;
 575
 576        desc = &ring->desc[ring->next_to_clean];
 577        desc_cb = &ring->desc_cb[ring->next_to_clean];
 578
 579        prefetch(desc);
 580
 581        va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
 582
 583        /* prefetch first cache line of first page */
 584        prefetch(va);
 585#if L1_CACHE_BYTES < 128
 586        prefetch(va + L1_CACHE_BYTES);
 587#endif
 588
 589        skb = *out_skb = napi_alloc_skb(&ring_data->napi,
 590                                        HNS_RX_HEAD_SIZE);
 591        if (unlikely(!skb)) {
 592                netdev_err(ndev, "alloc rx skb fail\n");
 593                ring->stats.sw_err_cnt++;
 594                return -ENOMEM;
 595        }
 596
 597        prefetchw(skb->data);
 598        length = le16_to_cpu(desc->rx.pkt_len);
 599        bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
 600        priv->ops.get_rxd_bnum(bnum_flag, &bnum);
 601        *out_bnum = bnum;
 602
 603        if (length <= HNS_RX_HEAD_SIZE) {
 604                memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
 605
 606                /* we can reuse buffer as-is, just make sure it is local */
 607                if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
 608                        desc_cb->reuse_flag = 1;
 609                else /* this page cannot be reused so discard it */
 610                        put_page(desc_cb->priv);
 611
 612                ring_ptr_move_fw(ring, next_to_clean);
 613
 614                if (unlikely(bnum != 1)) { /* check err*/
 615                        *out_bnum = 1;
 616                        goto out_bnum_err;
 617                }
 618        } else {
 619                ring->stats.seg_pkt_cnt++;
 620
 621                pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
 622                memcpy(__skb_put(skb, pull_len), va,
 623                       ALIGN(pull_len, sizeof(long)));
 624
 625                hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
 626                ring_ptr_move_fw(ring, next_to_clean);
 627
 628                if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
 629                        *out_bnum = 1;
 630                        goto out_bnum_err;
 631                }
 632                for (i = 1; i < bnum; i++) {
 633                        desc = &ring->desc[ring->next_to_clean];
 634                        desc_cb = &ring->desc_cb[ring->next_to_clean];
 635
 636                        hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
 637                        ring_ptr_move_fw(ring, next_to_clean);
 638                }
 639        }
 640
 641        /* check except process, free skb and jump the desc */
 642        if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
 643out_bnum_err:
 644                *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
 645                netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
 646                           bnum, ring->max_desc_num_per_pkt,
 647                           length, (int)MAX_SKB_FRAGS,
 648                           ((u64 *)desc)[0], ((u64 *)desc)[1]);
 649                ring->stats.err_bd_num++;
 650                dev_kfree_skb_any(skb);
 651                return -EDOM;
 652        }
 653
 654        bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
 655
 656        if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
 657                netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
 658                           ((u64 *)desc)[0], ((u64 *)desc)[1]);
 659                ring->stats.non_vld_descs++;
 660                dev_kfree_skb_any(skb);
 661                return -EINVAL;
 662        }
 663
 664        if (unlikely((!desc->rx.pkt_len) ||
 665                     hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
 666                ring->stats.err_pkt_len++;
 667                dev_kfree_skb_any(skb);
 668                return -EFAULT;
 669        }
 670
 671        if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
 672                ring->stats.l2_err++;
 673                dev_kfree_skb_any(skb);
 674                return -EFAULT;
 675        }
 676
 677        /* filter out multicast pkt with the same src mac as this port */
 678        eh = eth_hdr(skb);
 679        if (unlikely(is_multicast_ether_addr(eh->h_dest) &&
 680                     ether_addr_equal(ndev->dev_addr, eh->h_source))) {
 681                dev_kfree_skb_any(skb);
 682                return -EFAULT;
 683        }
 684
 685        ring->stats.rx_pkts++;
 686        ring->stats.rx_bytes += skb->len;
 687
 688        if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
 689                     hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
 690                ring->stats.l3l4_csum_err++;
 691                return 0;
 692        }
 693
 694        skb->ip_summed = CHECKSUM_UNNECESSARY;
 695
 696        return 0;
 697}
 698
 699static void
 700hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
 701{
 702        int i, ret;
 703        struct hnae_desc_cb res_cbs;
 704        struct hnae_desc_cb *desc_cb;
 705        struct hnae_ring *ring = ring_data->ring;
 706        struct net_device *ndev = ring_data->napi.dev;
 707
 708        for (i = 0; i < cleand_count; i++) {
 709                desc_cb = &ring->desc_cb[ring->next_to_use];
 710                if (desc_cb->reuse_flag) {
 711                        ring->stats.reuse_pg_cnt++;
 712                        hnae_reuse_buffer(ring, ring->next_to_use);
 713                } else {
 714                        ret = hnae_reserve_buffer_map(ring, &res_cbs);
 715                        if (ret) {
 716                                ring->stats.sw_err_cnt++;
 717                                netdev_err(ndev, "hnae reserve buffer map failed.\n");
 718                                break;
 719                        }
 720                        hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
 721                }
 722
 723                ring_ptr_move_fw(ring, next_to_use);
 724        }
 725
 726        wmb(); /* make all data has been write before submit */
 727        writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
 728}
 729
 730/* return error number for error or number of desc left to take
 731 */
 732static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
 733                              struct sk_buff *skb)
 734{
 735        struct net_device *ndev = ring_data->napi.dev;
 736
 737        skb->protocol = eth_type_trans(skb, ndev);
 738        (void)napi_gro_receive(&ring_data->napi, skb);
 739        ndev->last_rx = jiffies;
 740}
 741
 742static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
 743                               int budget, void *v)
 744{
 745        struct hnae_ring *ring = ring_data->ring;
 746        struct sk_buff *skb;
 747        int num, bnum, ex_num;
 748#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
 749        int recv_pkts, recv_bds, clean_count, err;
 750
 751        num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
 752        rmb(); /* make sure num taken effect before the other data is touched */
 753
 754        recv_pkts = 0, recv_bds = 0, clean_count = 0;
 755recv:
 756        while (recv_pkts < budget && recv_bds < num) {
 757                /* reuse or realloc buffers*/
 758                if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
 759                        hns_nic_alloc_rx_buffers(ring_data, clean_count);
 760                        clean_count = 0;
 761                }
 762
 763                /* poll one pkg*/
 764                err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
 765                if (unlikely(!skb)) /* this fault cannot be repaired */
 766                        break;
 767
 768                recv_bds += bnum;
 769                clean_count += bnum;
 770                if (unlikely(err)) {  /* do jump the err */
 771                        recv_pkts++;
 772                        continue;
 773                }
 774
 775                /* do update ip stack process*/
 776                ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
 777                                                        ring_data, skb);
 778                recv_pkts++;
 779        }
 780
 781        /* make all data has been write before submit */
 782        if (recv_pkts < budget) {
 783                ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
 784
 785                if (ex_num > clean_count) {
 786                        num += ex_num - clean_count;
 787                        rmb(); /*complete read rx ring bd number*/
 788                        goto recv;
 789                }
 790        }
 791
 792        /* make all data has been write before submit */
 793        if (clean_count > 0)
 794                hns_nic_alloc_rx_buffers(ring_data, clean_count);
 795
 796        return recv_pkts;
 797}
 798
 799static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
 800{
 801        struct hnae_ring *ring = ring_data->ring;
 802        int num = 0;
 803
 804        /* for hardware bug fixed */
 805        num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
 806
 807        if (num > 0) {
 808                ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
 809                        ring_data->ring, 1);
 810
 811                napi_schedule(&ring_data->napi);
 812        }
 813}
 814
 815static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
 816                                            int *bytes, int *pkts)
 817{
 818        struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
 819
 820        (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
 821        (*bytes) += desc_cb->length;
 822        /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
 823        hnae_free_buffer_detach(ring, ring->next_to_clean);
 824
 825        ring_ptr_move_fw(ring, next_to_clean);
 826}
 827
 828static int is_valid_clean_head(struct hnae_ring *ring, int h)
 829{
 830        int u = ring->next_to_use;
 831        int c = ring->next_to_clean;
 832
 833        if (unlikely(h > ring->desc_num))
 834                return 0;
 835
 836        assert(u > 0 && u < ring->desc_num);
 837        assert(c > 0 && c < ring->desc_num);
 838        assert(u != c && h != c); /* must be checked before call this func */
 839
 840        return u > c ? (h > c && h <= u) : (h > c || h <= u);
 841}
 842
 843/* netif_tx_lock will turn down the performance, set only when necessary */
 844#ifdef CONFIG_NET_POLL_CONTROLLER
 845#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
 846#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
 847#else
 848#define NETIF_TX_LOCK(ndev)
 849#define NETIF_TX_UNLOCK(ndev)
 850#endif
 851/* reclaim all desc in one budget
 852 * return error or number of desc left
 853 */
 854static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
 855                               int budget, void *v)
 856{
 857        struct hnae_ring *ring = ring_data->ring;
 858        struct net_device *ndev = ring_data->napi.dev;
 859        struct netdev_queue *dev_queue;
 860        struct hns_nic_priv *priv = netdev_priv(ndev);
 861        int head;
 862        int bytes, pkts;
 863
 864        NETIF_TX_LOCK(ndev);
 865
 866        head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
 867        rmb(); /* make sure head is ready before touch any data */
 868
 869        if (is_ring_empty(ring) || head == ring->next_to_clean) {
 870                NETIF_TX_UNLOCK(ndev);
 871                return 0; /* no data to poll */
 872        }
 873
 874        if (!is_valid_clean_head(ring, head)) {
 875                netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
 876                           ring->next_to_use, ring->next_to_clean);
 877                ring->stats.io_err_cnt++;
 878                NETIF_TX_UNLOCK(ndev);
 879                return -EIO;
 880        }
 881
 882        bytes = 0;
 883        pkts = 0;
 884        while (head != ring->next_to_clean) {
 885                hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
 886                /* issue prefetch for next Tx descriptor */
 887                prefetch(&ring->desc_cb[ring->next_to_clean]);
 888        }
 889
 890        NETIF_TX_UNLOCK(ndev);
 891
 892        dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
 893        netdev_tx_completed_queue(dev_queue, pkts, bytes);
 894
 895        if (unlikely(priv->link && !netif_carrier_ok(ndev)))
 896                netif_carrier_on(ndev);
 897
 898        if (unlikely(pkts && netif_carrier_ok(ndev) &&
 899                     (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
 900                /* Make sure that anybody stopping the queue after this
 901                 * sees the new next_to_clean.
 902                 */
 903                smp_mb();
 904                if (netif_tx_queue_stopped(dev_queue) &&
 905                    !test_bit(NIC_STATE_DOWN, &priv->state)) {
 906                        netif_tx_wake_queue(dev_queue);
 907                        ring->stats.restart_queue++;
 908                }
 909        }
 910        return 0;
 911}
 912
 913static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
 914{
 915        struct hnae_ring *ring = ring_data->ring;
 916        int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
 917
 918        if (head != ring->next_to_clean) {
 919                ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
 920                        ring_data->ring, 1);
 921
 922                napi_schedule(&ring_data->napi);
 923        }
 924}
 925
 926static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
 927{
 928        struct hnae_ring *ring = ring_data->ring;
 929        struct net_device *ndev = ring_data->napi.dev;
 930        struct netdev_queue *dev_queue;
 931        int head;
 932        int bytes, pkts;
 933
 934        NETIF_TX_LOCK(ndev);
 935
 936        head = ring->next_to_use; /* ntu :soft setted ring position*/
 937        bytes = 0;
 938        pkts = 0;
 939        while (head != ring->next_to_clean)
 940                hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
 941
 942        NETIF_TX_UNLOCK(ndev);
 943
 944        dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
 945        netdev_tx_reset_queue(dev_queue);
 946}
 947
 948static int hns_nic_common_poll(struct napi_struct *napi, int budget)
 949{
 950        struct hns_nic_ring_data *ring_data =
 951                container_of(napi, struct hns_nic_ring_data, napi);
 952        int clean_complete = ring_data->poll_one(
 953                                ring_data, budget, ring_data->ex_process);
 954
 955        if (clean_complete >= 0 && clean_complete < budget) {
 956                napi_complete(napi);
 957                ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
 958                        ring_data->ring, 0);
 959                if (ring_data->fini_process)
 960                        ring_data->fini_process(ring_data);
 961                return 0;
 962        }
 963
 964        return clean_complete;
 965}
 966
 967static irqreturn_t hns_irq_handle(int irq, void *dev)
 968{
 969        struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
 970
 971        ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
 972                ring_data->ring, 1);
 973        napi_schedule(&ring_data->napi);
 974
 975        return IRQ_HANDLED;
 976}
 977
 978/**
 979 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
 980 *@ndev: net device
 981 */
 982static void hns_nic_adjust_link(struct net_device *ndev)
 983{
 984        struct hns_nic_priv *priv = netdev_priv(ndev);
 985        struct hnae_handle *h = priv->ae_handle;
 986
 987        h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex);
 988}
 989
 990/**
 991 *hns_nic_init_phy - init phy
 992 *@ndev: net device
 993 *@h: ae handle
 994 * Return 0 on success, negative on failure
 995 */
 996int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
 997{
 998        struct hns_nic_priv *priv = netdev_priv(ndev);
 999        struct phy_device *phy_dev = NULL;
1000
1001        if (!h->phy_node)
1002                return 0;
1003
1004        if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1005                phy_dev = of_phy_connect(ndev, h->phy_node,
1006                                         hns_nic_adjust_link, 0, h->phy_if);
1007        else
1008                phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
1009
1010        if (unlikely(!phy_dev) || IS_ERR(phy_dev))
1011                return !phy_dev ? -ENODEV : PTR_ERR(phy_dev);
1012
1013        phy_dev->supported &= h->if_support;
1014        phy_dev->advertising = phy_dev->supported;
1015
1016        if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1017                phy_dev->autoneg = false;
1018
1019        priv->phy = phy_dev;
1020
1021        return 0;
1022}
1023
1024static int hns_nic_ring_open(struct net_device *netdev, int idx)
1025{
1026        struct hns_nic_priv *priv = netdev_priv(netdev);
1027        struct hnae_handle *h = priv->ae_handle;
1028
1029        napi_enable(&priv->ring_data[idx].napi);
1030
1031        enable_irq(priv->ring_data[idx].ring->irq);
1032        h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
1033
1034        return 0;
1035}
1036
1037static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
1038{
1039        struct hns_nic_priv *priv = netdev_priv(ndev);
1040        struct hnae_handle *h = priv->ae_handle;
1041        struct sockaddr *mac_addr = p;
1042        int ret;
1043
1044        if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1045                return -EADDRNOTAVAIL;
1046
1047        ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
1048        if (ret) {
1049                netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
1050                return ret;
1051        }
1052
1053        memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
1054
1055        return 0;
1056}
1057
1058void hns_nic_update_stats(struct net_device *netdev)
1059{
1060        struct hns_nic_priv *priv = netdev_priv(netdev);
1061        struct hnae_handle *h = priv->ae_handle;
1062
1063        h->dev->ops->update_stats(h, &netdev->stats);
1064}
1065
1066/* set mac addr if it is configed. or leave it to the AE driver */
1067static void hns_init_mac_addr(struct net_device *ndev)
1068{
1069        struct hns_nic_priv *priv = netdev_priv(ndev);
1070        struct device_node *node = priv->dev->of_node;
1071        const void *mac_addr_temp;
1072
1073        mac_addr_temp = of_get_mac_address(node);
1074        if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
1075                memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
1076        } else {
1077                eth_hw_addr_random(ndev);
1078                dev_warn(priv->dev, "No valid mac, use random mac %pM",
1079                         ndev->dev_addr);
1080        }
1081}
1082
1083static void hns_nic_ring_close(struct net_device *netdev, int idx)
1084{
1085        struct hns_nic_priv *priv = netdev_priv(netdev);
1086        struct hnae_handle *h = priv->ae_handle;
1087
1088        h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
1089        disable_irq(priv->ring_data[idx].ring->irq);
1090
1091        napi_disable(&priv->ring_data[idx].napi);
1092}
1093
1094static void hns_set_irq_affinity(struct hns_nic_priv *priv)
1095{
1096        struct hnae_handle *h = priv->ae_handle;
1097        struct hns_nic_ring_data *rd;
1098        int i;
1099        int cpu;
1100        cpumask_t mask;
1101
1102        /*diffrent irq banlance for 16core and 32core*/
1103        if (h->q_num == num_possible_cpus()) {
1104                for (i = 0; i < h->q_num * 2; i++) {
1105                        rd = &priv->ring_data[i];
1106                        if (cpu_online(rd->queue_index)) {
1107                                cpumask_clear(&mask);
1108                                cpu = rd->queue_index;
1109                                cpumask_set_cpu(cpu, &mask);
1110                                (void)irq_set_affinity_hint(rd->ring->irq,
1111                                                            &mask);
1112                        }
1113                }
1114        } else {
1115                for (i = 0; i < h->q_num; i++) {
1116                        rd = &priv->ring_data[i];
1117                        if (cpu_online(rd->queue_index * 2)) {
1118                                cpumask_clear(&mask);
1119                                cpu = rd->queue_index * 2;
1120                                cpumask_set_cpu(cpu, &mask);
1121                                (void)irq_set_affinity_hint(rd->ring->irq,
1122                                                            &mask);
1123                        }
1124                }
1125
1126                for (i = h->q_num; i < h->q_num * 2; i++) {
1127                        rd = &priv->ring_data[i];
1128                        if (cpu_online(rd->queue_index * 2 + 1)) {
1129                                cpumask_clear(&mask);
1130                                cpu = rd->queue_index * 2 + 1;
1131                                cpumask_set_cpu(cpu, &mask);
1132                                (void)irq_set_affinity_hint(rd->ring->irq,
1133                                                            &mask);
1134                        }
1135                }
1136        }
1137}
1138
1139static int hns_nic_init_irq(struct hns_nic_priv *priv)
1140{
1141        struct hnae_handle *h = priv->ae_handle;
1142        struct hns_nic_ring_data *rd;
1143        int i;
1144        int ret;
1145
1146        for (i = 0; i < h->q_num * 2; i++) {
1147                rd = &priv->ring_data[i];
1148
1149                if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
1150                        break;
1151
1152                snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
1153                         "%s-%s%d", priv->netdev->name,
1154                         (i < h->q_num ? "tx" : "rx"), rd->queue_index);
1155
1156                rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
1157
1158                ret = request_irq(rd->ring->irq,
1159                                  hns_irq_handle, 0, rd->ring->ring_name, rd);
1160                if (ret) {
1161                        netdev_err(priv->netdev, "request irq(%d) fail\n",
1162                                   rd->ring->irq);
1163                        return ret;
1164                }
1165                disable_irq(rd->ring->irq);
1166                rd->ring->irq_init_flag = RCB_IRQ_INITED;
1167        }
1168
1169        /*set cpu affinity*/
1170        hns_set_irq_affinity(priv);
1171
1172        return 0;
1173}
1174
1175static int hns_nic_net_up(struct net_device *ndev)
1176{
1177        struct hns_nic_priv *priv = netdev_priv(ndev);
1178        struct hnae_handle *h = priv->ae_handle;
1179        int i, j, k;
1180        int ret;
1181
1182        ret = hns_nic_init_irq(priv);
1183        if (ret != 0) {
1184                netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
1185                return ret;
1186        }
1187
1188        for (i = 0; i < h->q_num * 2; i++) {
1189                ret = hns_nic_ring_open(ndev, i);
1190                if (ret)
1191                        goto out_has_some_queues;
1192        }
1193
1194        for (k = 0; k < h->q_num; k++)
1195                h->dev->ops->toggle_queue_status(h->qs[k], 1);
1196
1197        ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1198        if (ret)
1199                goto out_set_mac_addr_err;
1200
1201        ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
1202        if (ret)
1203                goto out_start_err;
1204
1205        if (priv->phy)
1206                phy_start(priv->phy);
1207
1208        clear_bit(NIC_STATE_DOWN, &priv->state);
1209        (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1210
1211        return 0;
1212
1213out_start_err:
1214        netif_stop_queue(ndev);
1215out_set_mac_addr_err:
1216        for (k = 0; k < h->q_num; k++)
1217                h->dev->ops->toggle_queue_status(h->qs[k], 0);
1218out_has_some_queues:
1219        for (j = i - 1; j >= 0; j--)
1220                hns_nic_ring_close(ndev, j);
1221
1222        set_bit(NIC_STATE_DOWN, &priv->state);
1223
1224        return ret;
1225}
1226
1227static void hns_nic_net_down(struct net_device *ndev)
1228{
1229        int i;
1230        struct hnae_ae_ops *ops;
1231        struct hns_nic_priv *priv = netdev_priv(ndev);
1232
1233        if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
1234                return;
1235
1236        (void)del_timer_sync(&priv->service_timer);
1237        netif_tx_stop_all_queues(ndev);
1238        netif_carrier_off(ndev);
1239        netif_tx_disable(ndev);
1240        priv->link = 0;
1241
1242        if (priv->phy)
1243                phy_stop(priv->phy);
1244
1245        ops = priv->ae_handle->dev->ops;
1246
1247        if (ops->stop)
1248                ops->stop(priv->ae_handle);
1249
1250        netif_tx_stop_all_queues(ndev);
1251
1252        for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
1253                hns_nic_ring_close(ndev, i);
1254                hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
1255
1256                /* clean tx buffers*/
1257                hns_nic_tx_clr_all_bufs(priv->ring_data + i);
1258        }
1259}
1260
1261void hns_nic_net_reset(struct net_device *ndev)
1262{
1263        struct hns_nic_priv *priv = netdev_priv(ndev);
1264        struct hnae_handle *handle = priv->ae_handle;
1265
1266        while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1267                usleep_range(1000, 2000);
1268
1269        (void)hnae_reinit_handle(handle);
1270
1271        clear_bit(NIC_STATE_RESETTING, &priv->state);
1272}
1273
1274void hns_nic_net_reinit(struct net_device *netdev)
1275{
1276        struct hns_nic_priv *priv = netdev_priv(netdev);
1277
1278        priv->netdev->trans_start = jiffies;
1279        while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1280                usleep_range(1000, 2000);
1281
1282        hns_nic_net_down(netdev);
1283        hns_nic_net_reset(netdev);
1284        (void)hns_nic_net_up(netdev);
1285        clear_bit(NIC_STATE_REINITING, &priv->state);
1286}
1287
1288static int hns_nic_net_open(struct net_device *ndev)
1289{
1290        struct hns_nic_priv *priv = netdev_priv(ndev);
1291        struct hnae_handle *h = priv->ae_handle;
1292        int ret;
1293
1294        if (test_bit(NIC_STATE_TESTING, &priv->state))
1295                return -EBUSY;
1296
1297        priv->link = 0;
1298        netif_carrier_off(ndev);
1299
1300        ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1301        if (ret < 0) {
1302                netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1303                           ret);
1304                return ret;
1305        }
1306
1307        ret = netif_set_real_num_rx_queues(ndev, h->q_num);
1308        if (ret < 0) {
1309                netdev_err(ndev,
1310                           "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1311                return ret;
1312        }
1313
1314        ret = hns_nic_net_up(ndev);
1315        if (ret) {
1316                netdev_err(ndev,
1317                           "hns net up fail, ret=%d!\n", ret);
1318                return ret;
1319        }
1320
1321        return 0;
1322}
1323
1324static int hns_nic_net_stop(struct net_device *ndev)
1325{
1326        hns_nic_net_down(ndev);
1327
1328        return 0;
1329}
1330
1331static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1332static void hns_nic_net_timeout(struct net_device *ndev)
1333{
1334        struct hns_nic_priv *priv = netdev_priv(ndev);
1335
1336        hns_tx_timeout_reset(priv);
1337}
1338
1339static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1340                            int cmd)
1341{
1342        struct hns_nic_priv *priv = netdev_priv(netdev);
1343        struct phy_device *phy_dev = priv->phy;
1344
1345        if (!netif_running(netdev))
1346                return -EINVAL;
1347
1348        if (!phy_dev)
1349                return -ENOTSUPP;
1350
1351        return phy_mii_ioctl(phy_dev, ifr, cmd);
1352}
1353
1354/* use only for netconsole to poll with the device without interrupt */
1355#ifdef CONFIG_NET_POLL_CONTROLLER
1356void hns_nic_poll_controller(struct net_device *ndev)
1357{
1358        struct hns_nic_priv *priv = netdev_priv(ndev);
1359        unsigned long flags;
1360        int i;
1361
1362        local_irq_save(flags);
1363        for (i = 0; i < priv->ae_handle->q_num * 2; i++)
1364                napi_schedule(&priv->ring_data[i].napi);
1365        local_irq_restore(flags);
1366}
1367#endif
1368
1369static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1370                                    struct net_device *ndev)
1371{
1372        struct hns_nic_priv *priv = netdev_priv(ndev);
1373        int ret;
1374
1375        assert(skb->queue_mapping < ndev->ae_handle->q_num);
1376        ret = hns_nic_net_xmit_hw(ndev, skb,
1377                                  &tx_ring_data(priv, skb->queue_mapping));
1378        if (ret == NETDEV_TX_OK) {
1379                ndev->trans_start = jiffies;
1380                ndev->stats.tx_bytes += skb->len;
1381                ndev->stats.tx_packets++;
1382        }
1383        return (netdev_tx_t)ret;
1384}
1385
1386static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1387{
1388        struct hns_nic_priv *priv = netdev_priv(ndev);
1389        struct hnae_handle *h = priv->ae_handle;
1390        int ret;
1391
1392        /* MTU < 68 is an error and causes problems on some kernels */
1393        if (new_mtu < 68)
1394                return -EINVAL;
1395
1396        if (!h->dev->ops->set_mtu)
1397                return -ENOTSUPP;
1398
1399        if (netif_running(ndev)) {
1400                (void)hns_nic_net_stop(ndev);
1401                msleep(100);
1402
1403                ret = h->dev->ops->set_mtu(h, new_mtu);
1404                if (ret)
1405                        netdev_err(ndev, "set mtu fail, return value %d\n",
1406                                   ret);
1407
1408                if (hns_nic_net_open(ndev))
1409                        netdev_err(ndev, "hns net open fail\n");
1410        } else {
1411                ret = h->dev->ops->set_mtu(h, new_mtu);
1412        }
1413
1414        if (!ret)
1415                ndev->mtu = new_mtu;
1416
1417        return ret;
1418}
1419
1420static int hns_nic_set_features(struct net_device *netdev,
1421                                netdev_features_t features)
1422{
1423        struct hns_nic_priv *priv = netdev_priv(netdev);
1424        struct hnae_handle *h = priv->ae_handle;
1425
1426        switch (priv->enet_ver) {
1427        case AE_VERSION_1:
1428                if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1429                        netdev_info(netdev, "enet v1 do not support tso!\n");
1430                break;
1431        default:
1432                if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1433                        priv->ops.fill_desc = fill_tso_desc;
1434                        priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1435                        /* The chip only support 7*4096 */
1436                        netif_set_gso_max_size(netdev, 7 * 4096);
1437                        h->dev->ops->set_tso_stats(h, 1);
1438                } else {
1439                        priv->ops.fill_desc = fill_v2_desc;
1440                        priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1441                        h->dev->ops->set_tso_stats(h, 0);
1442                }
1443                break;
1444        }
1445        netdev->features = features;
1446        return 0;
1447}
1448
1449static netdev_features_t hns_nic_fix_features(
1450                struct net_device *netdev, netdev_features_t features)
1451{
1452        struct hns_nic_priv *priv = netdev_priv(netdev);
1453
1454        switch (priv->enet_ver) {
1455        case AE_VERSION_1:
1456                features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
1457                                NETIF_F_HW_VLAN_CTAG_FILTER);
1458                break;
1459        default:
1460                break;
1461        }
1462        return features;
1463}
1464
1465/**
1466 * nic_set_multicast_list - set mutl mac address
1467 * @netdev: net device
1468 * @p: mac address
1469 *
1470 * return void
1471 */
1472void hns_set_multicast_list(struct net_device *ndev)
1473{
1474        struct hns_nic_priv *priv = netdev_priv(ndev);
1475        struct hnae_handle *h = priv->ae_handle;
1476        struct netdev_hw_addr *ha = NULL;
1477
1478        if (!h) {
1479                netdev_err(ndev, "hnae handle is null\n");
1480                return;
1481        }
1482
1483        if (h->dev->ops->set_mc_addr) {
1484                netdev_for_each_mc_addr(ha, ndev)
1485                        if (h->dev->ops->set_mc_addr(h, ha->addr))
1486                                netdev_err(ndev, "set multicast fail\n");
1487        }
1488}
1489
1490void hns_nic_set_rx_mode(struct net_device *ndev)
1491{
1492        struct hns_nic_priv *priv = netdev_priv(ndev);
1493        struct hnae_handle *h = priv->ae_handle;
1494
1495        if (h->dev->ops->set_promisc_mode) {
1496                if (ndev->flags & IFF_PROMISC)
1497                        h->dev->ops->set_promisc_mode(h, 1);
1498                else
1499                        h->dev->ops->set_promisc_mode(h, 0);
1500        }
1501
1502        hns_set_multicast_list(ndev);
1503}
1504
1505struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
1506                                              struct rtnl_link_stats64 *stats)
1507{
1508        int idx = 0;
1509        u64 tx_bytes = 0;
1510        u64 rx_bytes = 0;
1511        u64 tx_pkts = 0;
1512        u64 rx_pkts = 0;
1513        struct hns_nic_priv *priv = netdev_priv(ndev);
1514        struct hnae_handle *h = priv->ae_handle;
1515
1516        for (idx = 0; idx < h->q_num; idx++) {
1517                tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1518                tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1519                rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1520                rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1521        }
1522
1523        stats->tx_bytes = tx_bytes;
1524        stats->tx_packets = tx_pkts;
1525        stats->rx_bytes = rx_bytes;
1526        stats->rx_packets = rx_pkts;
1527
1528        stats->rx_errors = ndev->stats.rx_errors;
1529        stats->multicast = ndev->stats.multicast;
1530        stats->rx_length_errors = ndev->stats.rx_length_errors;
1531        stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1532        stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1533
1534        stats->tx_errors = ndev->stats.tx_errors;
1535        stats->rx_dropped = ndev->stats.rx_dropped;
1536        stats->tx_dropped = ndev->stats.tx_dropped;
1537        stats->collisions = ndev->stats.collisions;
1538        stats->rx_over_errors = ndev->stats.rx_over_errors;
1539        stats->rx_frame_errors = ndev->stats.rx_frame_errors;
1540        stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
1541        stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
1542        stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
1543        stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
1544        stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
1545        stats->tx_window_errors = ndev->stats.tx_window_errors;
1546        stats->rx_compressed = ndev->stats.rx_compressed;
1547        stats->tx_compressed = ndev->stats.tx_compressed;
1548
1549        return stats;
1550}
1551
1552static const struct net_device_ops hns_nic_netdev_ops = {
1553        .ndo_open = hns_nic_net_open,
1554        .ndo_stop = hns_nic_net_stop,
1555        .ndo_start_xmit = hns_nic_net_xmit,
1556        .ndo_tx_timeout = hns_nic_net_timeout,
1557        .ndo_set_mac_address = hns_nic_net_set_mac_address,
1558        .ndo_change_mtu = hns_nic_change_mtu,
1559        .ndo_do_ioctl = hns_nic_do_ioctl,
1560        .ndo_set_features = hns_nic_set_features,
1561        .ndo_fix_features = hns_nic_fix_features,
1562        .ndo_get_stats64 = hns_nic_get_stats64,
1563#ifdef CONFIG_NET_POLL_CONTROLLER
1564        .ndo_poll_controller = hns_nic_poll_controller,
1565#endif
1566        .ndo_set_rx_mode = hns_nic_set_rx_mode,
1567};
1568
1569static void hns_nic_update_link_status(struct net_device *netdev)
1570{
1571        struct hns_nic_priv *priv = netdev_priv(netdev);
1572
1573        struct hnae_handle *h = priv->ae_handle;
1574        int state = 1;
1575
1576        if (priv->phy) {
1577                if (!genphy_update_link(priv->phy))
1578                        state = priv->phy->link;
1579                else
1580                        state = 0;
1581        }
1582        state = state && h->dev->ops->get_status(h);
1583
1584        if (state != priv->link) {
1585                if (state) {
1586                        netif_carrier_on(netdev);
1587                        netif_tx_wake_all_queues(netdev);
1588                        netdev_info(netdev, "link up\n");
1589                } else {
1590                        netif_carrier_off(netdev);
1591                        netdev_info(netdev, "link down\n");
1592                }
1593                priv->link = state;
1594        }
1595}
1596
1597/* for dumping key regs*/
1598static void hns_nic_dump(struct hns_nic_priv *priv)
1599{
1600        struct hnae_handle *h = priv->ae_handle;
1601        struct hnae_ae_ops *ops = h->dev->ops;
1602        u32 *data, reg_num, i;
1603
1604        if (ops->get_regs_len && ops->get_regs) {
1605                reg_num = ops->get_regs_len(priv->ae_handle);
1606                reg_num = (reg_num + 3ul) & ~3ul;
1607                data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
1608                if (data) {
1609                        ops->get_regs(priv->ae_handle, data);
1610                        for (i = 0; i < reg_num; i += 4)
1611                                pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1612                                        i, data[i], data[i + 1],
1613                                        data[i + 2], data[i + 3]);
1614                        kfree(data);
1615                }
1616        }
1617
1618        for (i = 0; i < h->q_num; i++) {
1619                pr_info("tx_queue%d_next_to_clean:%d\n",
1620                        i, h->qs[i]->tx_ring.next_to_clean);
1621                pr_info("tx_queue%d_next_to_use:%d\n",
1622                        i, h->qs[i]->tx_ring.next_to_use);
1623                pr_info("rx_queue%d_next_to_clean:%d\n",
1624                        i, h->qs[i]->rx_ring.next_to_clean);
1625                pr_info("rx_queue%d_next_to_use:%d\n",
1626                        i, h->qs[i]->rx_ring.next_to_use);
1627        }
1628}
1629
1630/* for resetting suntask*/
1631static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
1632{
1633        enum hnae_port_type type = priv->ae_handle->port_type;
1634
1635        if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
1636                return;
1637        clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1638
1639        /* If we're already down, removing or resetting, just bail */
1640        if (test_bit(NIC_STATE_DOWN, &priv->state) ||
1641            test_bit(NIC_STATE_REMOVING, &priv->state) ||
1642            test_bit(NIC_STATE_RESETTING, &priv->state))
1643                return;
1644
1645        hns_nic_dump(priv);
1646        netdev_info(priv->netdev, "try to reset %s port!\n",
1647                    (type == HNAE_PORT_DEBUG ? "debug" : "service"));
1648
1649        rtnl_lock();
1650        /* put off any impending NetWatchDogTimeout */
1651        priv->netdev->trans_start = jiffies;
1652
1653        if (type == HNAE_PORT_DEBUG) {
1654                hns_nic_net_reinit(priv->netdev);
1655        } else {
1656                netif_carrier_off(priv->netdev);
1657                netif_tx_disable(priv->netdev);
1658        }
1659        rtnl_unlock();
1660}
1661
1662/* for doing service complete*/
1663static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
1664{
1665        WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
1666
1667        smp_mb__before_atomic();
1668        clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
1669}
1670
1671static void hns_nic_service_task(struct work_struct *work)
1672{
1673        struct hns_nic_priv *priv
1674                = container_of(work, struct hns_nic_priv, service_task);
1675        struct hnae_handle *h = priv->ae_handle;
1676
1677        hns_nic_update_link_status(priv->netdev);
1678        h->dev->ops->update_led_status(h);
1679        hns_nic_update_stats(priv->netdev);
1680
1681        hns_nic_reset_subtask(priv);
1682        hns_nic_service_event_complete(priv);
1683}
1684
1685static void hns_nic_task_schedule(struct hns_nic_priv *priv)
1686{
1687        if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
1688            !test_bit(NIC_STATE_REMOVING, &priv->state) &&
1689            !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
1690                (void)schedule_work(&priv->service_task);
1691}
1692
1693static void hns_nic_service_timer(unsigned long data)
1694{
1695        struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
1696
1697        (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1698
1699        hns_nic_task_schedule(priv);
1700}
1701
1702/**
1703 * hns_tx_timeout_reset - initiate reset due to Tx timeout
1704 * @priv: driver private struct
1705 **/
1706static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
1707{
1708        /* Do the reset outside of interrupt context */
1709        if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
1710                set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1711                netdev_warn(priv->netdev,
1712                            "initiating reset due to tx timeout(%llu,0x%lx)\n",
1713                            priv->tx_timeout_count, priv->state);
1714                priv->tx_timeout_count++;
1715                hns_nic_task_schedule(priv);
1716        }
1717}
1718
1719static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1720{
1721        struct hnae_handle *h = priv->ae_handle;
1722        struct hns_nic_ring_data *rd;
1723        bool is_ver1 = AE_IS_VER1(priv->enet_ver);
1724        int i;
1725
1726        if (h->q_num > NIC_MAX_Q_PER_VF) {
1727                netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
1728                return -EINVAL;
1729        }
1730
1731        priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
1732                                  GFP_KERNEL);
1733        if (!priv->ring_data)
1734                return -ENOMEM;
1735
1736        for (i = 0; i < h->q_num; i++) {
1737                rd = &priv->ring_data[i];
1738                rd->queue_index = i;
1739                rd->ring = &h->qs[i]->tx_ring;
1740                rd->poll_one = hns_nic_tx_poll_one;
1741                rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
1742
1743                netif_napi_add(priv->netdev, &rd->napi,
1744                               hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
1745                rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1746        }
1747        for (i = h->q_num; i < h->q_num * 2; i++) {
1748                rd = &priv->ring_data[i];
1749                rd->queue_index = i - h->q_num;
1750                rd->ring = &h->qs[i - h->q_num]->rx_ring;
1751                rd->poll_one = hns_nic_rx_poll_one;
1752                rd->ex_process = hns_nic_rx_up_pro;
1753                rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
1754
1755                netif_napi_add(priv->netdev, &rd->napi,
1756                               hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
1757                rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1758        }
1759
1760        return 0;
1761}
1762
1763static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
1764{
1765        struct hnae_handle *h = priv->ae_handle;
1766        int i;
1767
1768        for (i = 0; i < h->q_num * 2; i++) {
1769                netif_napi_del(&priv->ring_data[i].napi);
1770                if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
1771                        (void)irq_set_affinity_hint(
1772                                priv->ring_data[i].ring->irq,
1773                                NULL);
1774                        free_irq(priv->ring_data[i].ring->irq,
1775                                 &priv->ring_data[i]);
1776                }
1777
1778                priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1779        }
1780        kfree(priv->ring_data);
1781}
1782
1783static void hns_nic_set_priv_ops(struct net_device *netdev)
1784{
1785        struct hns_nic_priv *priv = netdev_priv(netdev);
1786        struct hnae_handle *h = priv->ae_handle;
1787
1788        if (AE_IS_VER1(priv->enet_ver)) {
1789                priv->ops.fill_desc = fill_desc;
1790                priv->ops.get_rxd_bnum = get_rx_desc_bnum;
1791                priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1792        } else {
1793                priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
1794                if ((netdev->features & NETIF_F_TSO) ||
1795                    (netdev->features & NETIF_F_TSO6)) {
1796                        priv->ops.fill_desc = fill_tso_desc;
1797                        priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1798                        /* This chip only support 7*4096 */
1799                        netif_set_gso_max_size(netdev, 7 * 4096);
1800                        h->dev->ops->set_tso_stats(h, 1);
1801                } else {
1802                        priv->ops.fill_desc = fill_v2_desc;
1803                        priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1804                }
1805        }
1806}
1807
1808static int hns_nic_try_get_ae(struct net_device *ndev)
1809{
1810        struct hns_nic_priv *priv = netdev_priv(ndev);
1811        struct hnae_handle *h;
1812        int ret;
1813
1814        h = hnae_get_handle(&priv->netdev->dev,
1815                            priv->ae_node, priv->port_id, NULL);
1816        if (IS_ERR_OR_NULL(h)) {
1817                ret = -ENODEV;
1818                dev_dbg(priv->dev, "has not handle, register notifier!\n");
1819                goto out;
1820        }
1821        priv->ae_handle = h;
1822
1823        ret = hns_nic_init_phy(ndev, h);
1824        if (ret) {
1825                dev_err(priv->dev, "probe phy device fail!\n");
1826                goto out_init_phy;
1827        }
1828
1829        ret = hns_nic_init_ring_data(priv);
1830        if (ret) {
1831                ret = -ENOMEM;
1832                goto out_init_ring_data;
1833        }
1834
1835        hns_nic_set_priv_ops(ndev);
1836
1837        ret = register_netdev(ndev);
1838        if (ret) {
1839                dev_err(priv->dev, "probe register netdev fail!\n");
1840                goto out_reg_ndev_fail;
1841        }
1842        return 0;
1843
1844out_reg_ndev_fail:
1845        hns_nic_uninit_ring_data(priv);
1846        priv->ring_data = NULL;
1847out_init_phy:
1848out_init_ring_data:
1849        hnae_put_handle(priv->ae_handle);
1850        priv->ae_handle = NULL;
1851out:
1852        return ret;
1853}
1854
1855static int hns_nic_notifier_action(struct notifier_block *nb,
1856                                   unsigned long action, void *data)
1857{
1858        struct hns_nic_priv *priv =
1859                container_of(nb, struct hns_nic_priv, notifier_block);
1860
1861        assert(action == HNAE_AE_REGISTER);
1862
1863        if (!hns_nic_try_get_ae(priv->netdev)) {
1864                hnae_unregister_notifier(&priv->notifier_block);
1865                priv->notifier_block.notifier_call = NULL;
1866        }
1867        return 0;
1868}
1869
1870static int hns_nic_dev_probe(struct platform_device *pdev)
1871{
1872        struct device *dev = &pdev->dev;
1873        struct net_device *ndev;
1874        struct hns_nic_priv *priv;
1875        struct device_node *node = dev->of_node;
1876        int ret;
1877
1878        ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
1879        if (!ndev)
1880                return -ENOMEM;
1881
1882        platform_set_drvdata(pdev, ndev);
1883
1884        priv = netdev_priv(ndev);
1885        priv->dev = dev;
1886        priv->netdev = ndev;
1887
1888        if (of_device_is_compatible(node, "hisilicon,hns-nic-v1"))
1889                priv->enet_ver = AE_VERSION_1;
1890        else
1891                priv->enet_ver = AE_VERSION_2;
1892
1893        priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0);
1894        if (IS_ERR_OR_NULL(priv->ae_node)) {
1895                ret = PTR_ERR(priv->ae_node);
1896                dev_err(dev, "not find ae-handle\n");
1897                goto out_read_prop_fail;
1898        }
1899
1900        ret = of_property_read_u32(node, "port-id", &priv->port_id);
1901        if (ret)
1902                goto out_read_prop_fail;
1903
1904        hns_init_mac_addr(ndev);
1905
1906        ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
1907        ndev->priv_flags |= IFF_UNICAST_FLT;
1908        ndev->netdev_ops = &hns_nic_netdev_ops;
1909        hns_ethtool_set_ops(ndev);
1910
1911        ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1912                NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1913                NETIF_F_GRO;
1914        ndev->vlan_features |=
1915                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
1916        ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
1917
1918        switch (priv->enet_ver) {
1919        case AE_VERSION_2:
1920                ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1921                ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1922                        NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1923                        NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
1924                break;
1925        default:
1926                break;
1927        }
1928
1929        SET_NETDEV_DEV(ndev, dev);
1930
1931        if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
1932                dev_dbg(dev, "set mask to 64bit\n");
1933        else
1934                dev_err(dev, "set mask to 32bit fail!\n");
1935
1936        /* carrier off reporting is important to ethtool even BEFORE open */
1937        netif_carrier_off(ndev);
1938
1939        setup_timer(&priv->service_timer, hns_nic_service_timer,
1940                    (unsigned long)priv);
1941        INIT_WORK(&priv->service_task, hns_nic_service_task);
1942
1943        set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
1944        clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
1945        set_bit(NIC_STATE_DOWN, &priv->state);
1946
1947        if (hns_nic_try_get_ae(priv->netdev)) {
1948                priv->notifier_block.notifier_call = hns_nic_notifier_action;
1949                ret = hnae_register_notifier(&priv->notifier_block);
1950                if (ret) {
1951                        dev_err(dev, "register notifier fail!\n");
1952                        goto out_notify_fail;
1953                }
1954                dev_dbg(dev, "has not handle, register notifier!\n");
1955        }
1956
1957        return 0;
1958
1959out_notify_fail:
1960        (void)cancel_work_sync(&priv->service_task);
1961out_read_prop_fail:
1962        free_netdev(ndev);
1963        return ret;
1964}
1965
1966static int hns_nic_dev_remove(struct platform_device *pdev)
1967{
1968        struct net_device *ndev = platform_get_drvdata(pdev);
1969        struct hns_nic_priv *priv = netdev_priv(ndev);
1970
1971        if (ndev->reg_state != NETREG_UNINITIALIZED)
1972                unregister_netdev(ndev);
1973
1974        if (priv->ring_data)
1975                hns_nic_uninit_ring_data(priv);
1976        priv->ring_data = NULL;
1977
1978        if (priv->phy)
1979                phy_disconnect(priv->phy);
1980        priv->phy = NULL;
1981
1982        if (!IS_ERR_OR_NULL(priv->ae_handle))
1983                hnae_put_handle(priv->ae_handle);
1984        priv->ae_handle = NULL;
1985        if (priv->notifier_block.notifier_call)
1986                hnae_unregister_notifier(&priv->notifier_block);
1987        priv->notifier_block.notifier_call = NULL;
1988
1989        set_bit(NIC_STATE_REMOVING, &priv->state);
1990        (void)cancel_work_sync(&priv->service_task);
1991
1992        free_netdev(ndev);
1993        return 0;
1994}
1995
1996static const struct of_device_id hns_enet_of_match[] = {
1997        {.compatible = "hisilicon,hns-nic-v1",},
1998        {.compatible = "hisilicon,hns-nic-v2",},
1999        {},
2000};
2001
2002MODULE_DEVICE_TABLE(of, hns_enet_of_match);
2003
2004static struct platform_driver hns_nic_dev_driver = {
2005        .driver = {
2006                .name = "hns-nic",
2007                .of_match_table = hns_enet_of_match,
2008        },
2009        .probe = hns_nic_dev_probe,
2010        .remove = hns_nic_dev_remove,
2011};
2012
2013module_platform_driver(hns_nic_dev_driver);
2014
2015MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2016MODULE_AUTHOR("Hisilicon, Inc.");
2017MODULE_LICENSE("GPL");
2018MODULE_ALIAS("platform:hns-nic");
2019