linux/drivers/net/ethernet/huawei/hinic/hinic_rx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Huawei HiNIC PCI Express Linux driver
   4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/types.h>
   9#include <linux/errno.h>
  10#include <linux/pci.h>
  11#include <linux/device.h>
  12#include <linux/netdevice.h>
  13#include <linux/etherdevice.h>
  14#include <linux/u64_stats_sync.h>
  15#include <linux/slab.h>
  16#include <linux/interrupt.h>
  17#include <linux/skbuff.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/prefetch.h>
  20#include <linux/cpumask.h>
  21#include <linux/if_vlan.h>
  22#include <asm/barrier.h>
  23
  24#include "hinic_common.h"
  25#include "hinic_hw_if.h"
  26#include "hinic_hw_wqe.h"
  27#include "hinic_hw_wq.h"
  28#include "hinic_hw_qp.h"
  29#include "hinic_hw_dev.h"
  30#include "hinic_rx.h"
  31#include "hinic_dev.h"
  32
  33#define RX_IRQ_NO_PENDING               0
  34#define RX_IRQ_NO_COALESC               0
  35#define RX_IRQ_NO_LLI_TIMER             0
  36#define RX_IRQ_NO_CREDIT                0
  37#define RX_IRQ_NO_RESEND_TIMER          0
  38#define HINIC_RX_BUFFER_WRITE           16
  39
  40#define HINIC_RX_IPV6_PKT               7
  41#define LRO_PKT_HDR_LEN_IPV4            66
  42#define LRO_PKT_HDR_LEN_IPV6            86
  43#define LRO_REPLENISH_THLD              256
  44
  45#define LRO_PKT_HDR_LEN(cqe)            \
  46        (HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
  47         HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
  48
  49/**
  50 * hinic_rxq_clean_stats - Clean the statistics of specific queue
  51 * @rxq: Logical Rx Queue
  52 **/
  53void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
  54{
  55        struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
  56
  57        u64_stats_update_begin(&rxq_stats->syncp);
  58        rxq_stats->pkts  = 0;
  59        rxq_stats->bytes = 0;
  60        rxq_stats->errors = 0;
  61        rxq_stats->csum_errors = 0;
  62        rxq_stats->other_errors = 0;
  63        u64_stats_update_end(&rxq_stats->syncp);
  64}
  65
  66/**
  67 * hinic_rxq_get_stats - get statistics of Rx Queue
  68 * @rxq: Logical Rx Queue
  69 * @stats: return updated stats here
  70 **/
  71void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
  72{
  73        struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
  74        unsigned int start;
  75
  76        u64_stats_update_begin(&stats->syncp);
  77        do {
  78                start = u64_stats_fetch_begin(&rxq_stats->syncp);
  79                stats->pkts = rxq_stats->pkts;
  80                stats->bytes = rxq_stats->bytes;
  81                stats->errors = rxq_stats->csum_errors +
  82                                rxq_stats->other_errors;
  83                stats->csum_errors = rxq_stats->csum_errors;
  84                stats->other_errors = rxq_stats->other_errors;
  85        } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
  86        u64_stats_update_end(&stats->syncp);
  87}
  88
  89/**
  90 * rxq_stats_init - Initialize the statistics of specific queue
  91 * @rxq: Logical Rx Queue
  92 **/
  93static void rxq_stats_init(struct hinic_rxq *rxq)
  94{
  95        struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
  96
  97        u64_stats_init(&rxq_stats->syncp);
  98        hinic_rxq_clean_stats(rxq);
  99}
 100
 101static void rx_csum(struct hinic_rxq *rxq, u32 status,
 102                    struct sk_buff *skb)
 103{
 104        struct net_device *netdev = rxq->netdev;
 105        u32 csum_err;
 106
 107        csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
 108
 109        if (!(netdev->features & NETIF_F_RXCSUM))
 110                return;
 111
 112        if (!csum_err) {
 113                skb->ip_summed = CHECKSUM_UNNECESSARY;
 114        } else {
 115                if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
 116                        HINIC_RX_CSUM_IPSU_OTHER_ERR)))
 117                        rxq->rxq_stats.csum_errors++;
 118                skb->ip_summed = CHECKSUM_NONE;
 119        }
 120}
 121/**
 122 * rx_alloc_skb - allocate skb and map it to dma address
 123 * @rxq: rx queue
 124 * @dma_addr: returned dma address for the skb
 125 *
 126 * Return skb
 127 **/
 128static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
 129                                    dma_addr_t *dma_addr)
 130{
 131        struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 132        struct hinic_hwdev *hwdev = nic_dev->hwdev;
 133        struct hinic_hwif *hwif = hwdev->hwif;
 134        struct pci_dev *pdev = hwif->pdev;
 135        struct sk_buff *skb;
 136        dma_addr_t addr;
 137        int err;
 138
 139        skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
 140        if (!skb)
 141                return NULL;
 142
 143        addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
 144                              DMA_FROM_DEVICE);
 145        err = dma_mapping_error(&pdev->dev, addr);
 146        if (err) {
 147                dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
 148                goto err_rx_map;
 149        }
 150
 151        *dma_addr = addr;
 152        return skb;
 153
 154err_rx_map:
 155        dev_kfree_skb_any(skb);
 156        return NULL;
 157}
 158
 159/**
 160 * rx_unmap_skb - unmap the dma address of the skb
 161 * @rxq: rx queue
 162 * @dma_addr: dma address of the skb
 163 **/
 164static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
 165{
 166        struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 167        struct hinic_hwdev *hwdev = nic_dev->hwdev;
 168        struct hinic_hwif *hwif = hwdev->hwif;
 169        struct pci_dev *pdev = hwif->pdev;
 170
 171        dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
 172                         DMA_FROM_DEVICE);
 173}
 174
 175/**
 176 * rx_free_skb - unmap and free skb
 177 * @rxq: rx queue
 178 * @skb: skb to free
 179 * @dma_addr: dma address of the skb
 180 **/
 181static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
 182                        dma_addr_t dma_addr)
 183{
 184        rx_unmap_skb(rxq, dma_addr);
 185        dev_kfree_skb_any(skb);
 186}
 187
 188/**
 189 * rx_alloc_pkts - allocate pkts in rx queue
 190 * @rxq: rx queue
 191 *
 192 * Return number of skbs allocated
 193 **/
 194static int rx_alloc_pkts(struct hinic_rxq *rxq)
 195{
 196        struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 197        struct hinic_rq_wqe *rq_wqe;
 198        unsigned int free_wqebbs;
 199        struct hinic_sge sge;
 200        dma_addr_t dma_addr;
 201        struct sk_buff *skb;
 202        u16 prod_idx;
 203        int i;
 204
 205        free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
 206
 207        /* Limit the allocation chunks */
 208        if (free_wqebbs > nic_dev->rx_weight)
 209                free_wqebbs = nic_dev->rx_weight;
 210
 211        for (i = 0; i < free_wqebbs; i++) {
 212                skb = rx_alloc_skb(rxq, &dma_addr);
 213                if (!skb)
 214                        goto skb_out;
 215
 216                hinic_set_sge(&sge, dma_addr, skb->len);
 217
 218                rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
 219                                          &prod_idx);
 220                if (!rq_wqe) {
 221                        rx_free_skb(rxq, skb, dma_addr);
 222                        goto skb_out;
 223                }
 224
 225                hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
 226
 227                hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
 228        }
 229
 230skb_out:
 231        if (i) {
 232                wmb();  /* write all the wqes before update PI */
 233
 234                hinic_rq_update(rxq->rq, prod_idx);
 235        }
 236
 237        return i;
 238}
 239
 240/**
 241 * free_all_rx_skbs - free all skbs in rx queue
 242 * @rxq: rx queue
 243 **/
 244static void free_all_rx_skbs(struct hinic_rxq *rxq)
 245{
 246        struct hinic_rq *rq = rxq->rq;
 247        struct hinic_hw_wqe *hw_wqe;
 248        struct hinic_sge sge;
 249        u16 ci;
 250
 251        while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
 252                if (IS_ERR(hw_wqe))
 253                        break;
 254
 255                hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
 256
 257                hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
 258
 259                rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
 260        }
 261}
 262
 263/**
 264 * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
 265 * @rxq: rx queue
 266 * @head_skb: the first skb in the list
 267 * @left_pkt_len: left size of the pkt exclude head skb
 268 * @ci: consumer index
 269 *
 270 * Return number of wqes that used for the left of the pkt
 271 **/
 272static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
 273                             unsigned int left_pkt_len, u16 ci)
 274{
 275        struct sk_buff *skb, *curr_skb = head_skb;
 276        struct hinic_rq_wqe *rq_wqe;
 277        unsigned int curr_len;
 278        struct hinic_sge sge;
 279        int num_wqes = 0;
 280
 281        while (left_pkt_len > 0) {
 282                rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
 283                                                &skb, &ci);
 284
 285                num_wqes++;
 286
 287                hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
 288
 289                rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
 290
 291                prefetch(skb->data);
 292
 293                curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
 294                            left_pkt_len;
 295
 296                left_pkt_len -= curr_len;
 297
 298                __skb_put(skb, curr_len);
 299
 300                if (curr_skb == head_skb)
 301                        skb_shinfo(head_skb)->frag_list = skb;
 302                else
 303                        curr_skb->next = skb;
 304
 305                head_skb->len += skb->len;
 306                head_skb->data_len += skb->len;
 307                head_skb->truesize += skb->truesize;
 308
 309                curr_skb = skb;
 310        }
 311
 312        return num_wqes;
 313}
 314
 315static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
 316                               struct sk_buff *skb)
 317{
 318        struct net_device *netdev = nic_dev->netdev;
 319        u8 *lb_buf = nic_dev->lb_test_rx_buf;
 320        int lb_len = nic_dev->lb_pkt_len;
 321        int pkt_offset, frag_len, i;
 322        void *frag_data = NULL;
 323
 324        if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) {
 325                nic_dev->lb_test_rx_idx = 0;
 326                netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n");
 327        }
 328
 329        if (skb->len != nic_dev->lb_pkt_len) {
 330                netif_warn(nic_dev, drv, netdev, "Wrong packet length\n");
 331                nic_dev->lb_test_rx_idx++;
 332                return;
 333        }
 334
 335        pkt_offset = nic_dev->lb_test_rx_idx * lb_len;
 336        frag_len = (int)skb_headlen(skb);
 337        memcpy(lb_buf + pkt_offset, skb->data, frag_len);
 338        pkt_offset += frag_len;
 339        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 340                frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
 341                frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
 342                memcpy((lb_buf + pkt_offset), frag_data, frag_len);
 343                pkt_offset += frag_len;
 344        }
 345        nic_dev->lb_test_rx_idx++;
 346}
 347
 348/**
 349 * rxq_recv - Rx handler
 350 * @rxq: rx queue
 351 * @budget: maximum pkts to process
 352 *
 353 * Return number of pkts received
 354 **/
 355static int rxq_recv(struct hinic_rxq *rxq, int budget)
 356{
 357        struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
 358        struct net_device *netdev = rxq->netdev;
 359        u64 pkt_len = 0, rx_bytes = 0;
 360        struct hinic_rq *rq = rxq->rq;
 361        struct hinic_rq_wqe *rq_wqe;
 362        struct hinic_dev *nic_dev;
 363        unsigned int free_wqebbs;
 364        struct hinic_rq_cqe *cqe;
 365        int num_wqes, pkts = 0;
 366        struct hinic_sge sge;
 367        unsigned int status;
 368        struct sk_buff *skb;
 369        u32 offload_type;
 370        u16 ci, num_lro;
 371        u16 num_wqe = 0;
 372        u32 vlan_len;
 373        u16 vid;
 374
 375        nic_dev = netdev_priv(netdev);
 376
 377        while (pkts < budget) {
 378                num_wqes = 0;
 379
 380                rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
 381                                           &ci);
 382                if (!rq_wqe)
 383                        break;
 384
 385                /* make sure we read rx_done before packet length */
 386                dma_rmb();
 387
 388                cqe = rq->cqe[ci];
 389                status =  be32_to_cpu(cqe->status);
 390                hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
 391
 392                rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
 393
 394                rx_csum(rxq, status, skb);
 395
 396                prefetch(skb->data);
 397
 398                pkt_len = sge.len;
 399
 400                if (pkt_len <= HINIC_RX_BUF_SZ) {
 401                        __skb_put(skb, pkt_len);
 402                } else {
 403                        __skb_put(skb, HINIC_RX_BUF_SZ);
 404                        num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
 405                                                     HINIC_RX_BUF_SZ, ci);
 406                }
 407
 408                hinic_rq_put_wqe(rq, ci,
 409                                 (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
 410
 411                offload_type = be32_to_cpu(cqe->offload_type);
 412                vlan_len = be32_to_cpu(cqe->len);
 413                if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 414                    HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
 415                        vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
 416                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 417                }
 418
 419                if (unlikely(nic_dev->flags & HINIC_LP_TEST))
 420                        hinic_copy_lp_data(nic_dev, skb);
 421
 422                skb_record_rx_queue(skb, qp->q_id);
 423                skb->protocol = eth_type_trans(skb, rxq->netdev);
 424
 425                napi_gro_receive(&rxq->napi, skb);
 426
 427                pkts++;
 428                rx_bytes += pkt_len;
 429
 430                num_lro = HINIC_GET_RX_NUM_LRO(status);
 431                if (num_lro) {
 432                        rx_bytes += ((num_lro - 1) *
 433                                     LRO_PKT_HDR_LEN(cqe));
 434
 435                        num_wqe +=
 436                        (u16)(pkt_len >> rxq->rx_buff_shift) +
 437                        ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
 438                }
 439
 440                cqe->status = 0;
 441
 442                if (num_wqe >= LRO_REPLENISH_THLD)
 443                        break;
 444        }
 445
 446        free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
 447        if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
 448                rx_alloc_pkts(rxq);
 449
 450        u64_stats_update_begin(&rxq->rxq_stats.syncp);
 451        rxq->rxq_stats.pkts += pkts;
 452        rxq->rxq_stats.bytes += rx_bytes;
 453        u64_stats_update_end(&rxq->rxq_stats.syncp);
 454
 455        return pkts;
 456}
 457
 458static int rx_poll(struct napi_struct *napi, int budget)
 459{
 460        struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
 461        struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 462        struct hinic_rq *rq = rxq->rq;
 463        int pkts;
 464
 465        pkts = rxq_recv(rxq, budget);
 466        if (pkts >= budget)
 467                return budget;
 468
 469        napi_complete(napi);
 470
 471        if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
 472                hinic_hwdev_set_msix_state(nic_dev->hwdev,
 473                                           rq->msix_entry,
 474                                           HINIC_MSIX_ENABLE);
 475
 476        return pkts;
 477}
 478
 479static void rx_add_napi(struct hinic_rxq *rxq)
 480{
 481        struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 482
 483        netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
 484        napi_enable(&rxq->napi);
 485}
 486
 487static void rx_del_napi(struct hinic_rxq *rxq)
 488{
 489        napi_disable(&rxq->napi);
 490        netif_napi_del(&rxq->napi);
 491}
 492
 493static irqreturn_t rx_irq(int irq, void *data)
 494{
 495        struct hinic_rxq *rxq = (struct hinic_rxq *)data;
 496        struct hinic_rq *rq = rxq->rq;
 497        struct hinic_dev *nic_dev;
 498
 499        /* Disable the interrupt until napi will be completed */
 500        nic_dev = netdev_priv(rxq->netdev);
 501        if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
 502                hinic_hwdev_set_msix_state(nic_dev->hwdev,
 503                                           rq->msix_entry,
 504                                           HINIC_MSIX_DISABLE);
 505
 506        nic_dev = netdev_priv(rxq->netdev);
 507        hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
 508
 509        napi_schedule(&rxq->napi);
 510        return IRQ_HANDLED;
 511}
 512
 513static int rx_request_irq(struct hinic_rxq *rxq)
 514{
 515        struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
 516        struct hinic_msix_config interrupt_info = {0};
 517        struct hinic_intr_coal_info *intr_coal = NULL;
 518        struct hinic_hwdev *hwdev = nic_dev->hwdev;
 519        struct hinic_rq *rq = rxq->rq;
 520        struct hinic_qp *qp;
 521        int err;
 522
 523        qp = container_of(rq, struct hinic_qp, rq);
 524
 525        rx_add_napi(rxq);
 526
 527        hinic_hwdev_msix_set(hwdev, rq->msix_entry,
 528                             RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
 529                             RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
 530                             RX_IRQ_NO_RESEND_TIMER);
 531
 532        intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id];
 533        interrupt_info.msix_index = rq->msix_entry;
 534        interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
 535        interrupt_info.pending_cnt = intr_coal->pending_limt;
 536        interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
 537
 538        err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
 539        if (err) {
 540                netif_err(nic_dev, drv, rxq->netdev,
 541                          "Failed to set RX interrupt coalescing attribute\n");
 542                goto err_req_irq;
 543        }
 544
 545        err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
 546        if (err)
 547                goto err_req_irq;
 548
 549        cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
 550        err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
 551        if (err)
 552                goto err_irq_affinity;
 553
 554        return 0;
 555
 556err_irq_affinity:
 557        free_irq(rq->irq, rxq);
 558err_req_irq:
 559        rx_del_napi(rxq);
 560        return err;
 561}
 562
 563static void rx_free_irq(struct hinic_rxq *rxq)
 564{
 565        struct hinic_rq *rq = rxq->rq;
 566
 567        irq_set_affinity_hint(rq->irq, NULL);
 568        free_irq(rq->irq, rxq);
 569        rx_del_napi(rxq);
 570}
 571
 572/**
 573 * hinic_init_rxq - Initialize the Rx Queue
 574 * @rxq: Logical Rx Queue
 575 * @rq: Hardware Rx Queue to connect the Logical queue with
 576 * @netdev: network device to connect the Logical queue with
 577 *
 578 * Return 0 - Success, negative - Failure
 579 **/
 580int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
 581                   struct net_device *netdev)
 582{
 583        struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
 584        int err, pkts;
 585
 586        rxq->netdev = netdev;
 587        rxq->rq = rq;
 588        rxq->buf_len = HINIC_RX_BUF_SZ;
 589        rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
 590
 591        rxq_stats_init(rxq);
 592
 593        rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
 594                                       "%s_rxq%d", netdev->name, qp->q_id);
 595        if (!rxq->irq_name)
 596                return -ENOMEM;
 597
 598        pkts = rx_alloc_pkts(rxq);
 599        if (!pkts) {
 600                err = -ENOMEM;
 601                goto err_rx_pkts;
 602        }
 603
 604        err = rx_request_irq(rxq);
 605        if (err) {
 606                netdev_err(netdev, "Failed to request Rx irq\n");
 607                goto err_req_rx_irq;
 608        }
 609
 610        return 0;
 611
 612err_req_rx_irq:
 613err_rx_pkts:
 614        free_all_rx_skbs(rxq);
 615        devm_kfree(&netdev->dev, rxq->irq_name);
 616        return err;
 617}
 618
 619/**
 620 * hinic_clean_rxq - Clean the Rx Queue
 621 * @rxq: Logical Rx Queue
 622 **/
 623void hinic_clean_rxq(struct hinic_rxq *rxq)
 624{
 625        struct net_device *netdev = rxq->netdev;
 626
 627        rx_free_irq(rxq);
 628
 629        free_all_rx_skbs(rxq);
 630        devm_kfree(&netdev->dev, rxq->irq_name);
 631}
 632