linux/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/ip.h>
  34#include <linux/ipv6.h>
  35#include <linux/tcp.h>
  36#include <net/ip6_checksum.h>
  37#include <net/page_pool.h>
  38#include <net/inet_ecn.h>
  39#include "en.h"
  40#include "en/txrx.h"
  41#include "en_tc.h"
  42#include "eswitch.h"
  43#include "en_rep.h"
  44#include "en/rep/tc.h"
  45#include "ipoib/ipoib.h"
  46#include "accel/ipsec.h"
  47#include "fpga/ipsec.h"
  48#include "en_accel/ipsec_rxtx.h"
  49#include "en_accel/tls_rxtx.h"
  50#include "en/xdp.h"
  51#include "en/xsk/rx.h"
  52#include "en/health.h"
  53#include "en/params.h"
  54#include "devlink.h"
  55#include "en/devlink.h"
  56
  57static struct sk_buff *
  58mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
  59                                u16 cqe_bcnt, u32 head_offset, u32 page_idx);
  60static struct sk_buff *
  61mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
  62                                   u16 cqe_bcnt, u32 head_offset, u32 page_idx);
  63static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
  64static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
  65
  66const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
  67        .handle_rx_cqe       = mlx5e_handle_rx_cqe,
  68        .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
  69};
  70
  71static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
  72{
  73        return config->rx_filter == HWTSTAMP_FILTER_ALL;
  74}
  75
  76static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
  77                                       u32 cqcc, void *data)
  78{
  79        u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
  80
  81        memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
  82}
  83
  84static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
  85                                         struct mlx5_cqwq *wq,
  86                                         u32 cqcc)
  87{
  88        struct mlx5e_cq_decomp *cqd = &rq->cqd;
  89        struct mlx5_cqe64 *title = &cqd->title;
  90
  91        mlx5e_read_cqe_slot(wq, cqcc, title);
  92        cqd->left        = be32_to_cpu(title->byte_cnt);
  93        cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
  94        rq->stats->cqe_compress_blks++;
  95}
  96
  97static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
  98                                            struct mlx5e_cq_decomp *cqd,
  99                                            u32 cqcc)
 100{
 101        mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
 102        cqd->mini_arr_idx = 0;
 103}
 104
 105static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
 106{
 107        u32 cqcc   = wq->cc;
 108        u8  op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
 109        u32 ci     = mlx5_cqwq_ctr2ix(wq, cqcc);
 110        u32 wq_sz  = mlx5_cqwq_get_size(wq);
 111        u32 ci_top = min_t(u32, wq_sz, ci + n);
 112
 113        for (; ci < ci_top; ci++, n--) {
 114                struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
 115
 116                cqe->op_own = op_own;
 117        }
 118
 119        if (unlikely(ci == wq_sz)) {
 120                op_own = !op_own;
 121                for (ci = 0; ci < n; ci++) {
 122                        struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
 123
 124                        cqe->op_own = op_own;
 125                }
 126        }
 127}
 128
 129static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
 130                                        struct mlx5_cqwq *wq,
 131                                        u32 cqcc)
 132{
 133        struct mlx5e_cq_decomp *cqd = &rq->cqd;
 134        struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
 135        struct mlx5_cqe64 *title = &cqd->title;
 136
 137        title->byte_cnt     = mini_cqe->byte_cnt;
 138        title->check_sum    = mini_cqe->checksum;
 139        title->op_own      &= 0xf0;
 140        title->op_own      |= 0x01 & (cqcc >> wq->fbc.log_sz);
 141
 142        /* state bit set implies linked-list striding RQ wq type and
 143         * HW stride index capability supported
 144         */
 145        if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
 146                title->wqe_counter = mini_cqe->stridx;
 147                return;
 148        }
 149
 150        /* HW stride index capability not supported */
 151        title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
 152        if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
 153                cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
 154        else
 155                cqd->wqe_counter =
 156                        mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
 157}
 158
 159static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
 160                                                struct mlx5_cqwq *wq,
 161                                                u32 cqcc)
 162{
 163        struct mlx5e_cq_decomp *cqd = &rq->cqd;
 164
 165        mlx5e_decompress_cqe(rq, wq, cqcc);
 166        cqd->title.rss_hash_type   = 0;
 167        cqd->title.rss_hash_result = 0;
 168}
 169
 170static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
 171                                             struct mlx5_cqwq *wq,
 172                                             int update_owner_only,
 173                                             int budget_rem)
 174{
 175        struct mlx5e_cq_decomp *cqd = &rq->cqd;
 176        u32 cqcc = wq->cc + update_owner_only;
 177        u32 cqe_count;
 178        u32 i;
 179
 180        cqe_count = min_t(u32, cqd->left, budget_rem);
 181
 182        for (i = update_owner_only; i < cqe_count;
 183             i++, cqd->mini_arr_idx++, cqcc++) {
 184                if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
 185                        mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
 186
 187                mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
 188                INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
 189                                mlx5e_handle_rx_cqe, rq, &cqd->title);
 190        }
 191        mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
 192        wq->cc = cqcc;
 193        cqd->left -= cqe_count;
 194        rq->stats->cqe_compress_pkts += cqe_count;
 195
 196        return cqe_count;
 197}
 198
 199static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
 200                                              struct mlx5_cqwq *wq,
 201                                              int budget_rem)
 202{
 203        struct mlx5e_cq_decomp *cqd = &rq->cqd;
 204        u32 cc = wq->cc;
 205
 206        mlx5e_read_title_slot(rq, wq, cc);
 207        mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
 208        mlx5e_decompress_cqe(rq, wq, cc);
 209        INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
 210                        mlx5e_handle_rx_cqe, rq, &cqd->title);
 211        cqd->mini_arr_idx++;
 212
 213        return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
 214}
 215
 216static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
 217                                      struct mlx5e_dma_info *dma_info)
 218{
 219        struct mlx5e_page_cache *cache = &rq->page_cache;
 220        u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
 221        struct mlx5e_rq_stats *stats = rq->stats;
 222
 223        if (tail_next == cache->head) {
 224                stats->cache_full++;
 225                return false;
 226        }
 227
 228        if (!dev_page_is_reusable(dma_info->page)) {
 229                stats->cache_waive++;
 230                return false;
 231        }
 232
 233        cache->page_cache[cache->tail] = *dma_info;
 234        cache->tail = tail_next;
 235        return true;
 236}
 237
 238static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
 239                                      struct mlx5e_dma_info *dma_info)
 240{
 241        struct mlx5e_page_cache *cache = &rq->page_cache;
 242        struct mlx5e_rq_stats *stats = rq->stats;
 243
 244        if (unlikely(cache->head == cache->tail)) {
 245                stats->cache_empty++;
 246                return false;
 247        }
 248
 249        if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
 250                stats->cache_busy++;
 251                return false;
 252        }
 253
 254        *dma_info = cache->page_cache[cache->head];
 255        cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
 256        stats->cache_reuse++;
 257
 258        dma_sync_single_for_device(rq->pdev, dma_info->addr,
 259                                   PAGE_SIZE,
 260                                   DMA_FROM_DEVICE);
 261        return true;
 262}
 263
 264static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
 265                                        struct mlx5e_dma_info *dma_info)
 266{
 267        if (mlx5e_rx_cache_get(rq, dma_info))
 268                return 0;
 269
 270        dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
 271        if (unlikely(!dma_info->page))
 272                return -ENOMEM;
 273
 274        dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
 275                                      PAGE_SIZE, rq->buff.map_dir);
 276        if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
 277                page_pool_recycle_direct(rq->page_pool, dma_info->page);
 278                dma_info->page = NULL;
 279                return -ENOMEM;
 280        }
 281
 282        return 0;
 283}
 284
 285static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
 286                                   struct mlx5e_dma_info *dma_info)
 287{
 288        if (rq->xsk_pool)
 289                return mlx5e_xsk_page_alloc_pool(rq, dma_info);
 290        else
 291                return mlx5e_page_alloc_pool(rq, dma_info);
 292}
 293
 294void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
 295{
 296        dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
 297}
 298
 299void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
 300                                struct mlx5e_dma_info *dma_info,
 301                                bool recycle)
 302{
 303        if (likely(recycle)) {
 304                if (mlx5e_rx_cache_put(rq, dma_info))
 305                        return;
 306
 307                mlx5e_page_dma_unmap(rq, dma_info);
 308                page_pool_recycle_direct(rq->page_pool, dma_info->page);
 309        } else {
 310                mlx5e_page_dma_unmap(rq, dma_info);
 311                page_pool_release_page(rq->page_pool, dma_info->page);
 312                put_page(dma_info->page);
 313        }
 314}
 315
 316static inline void mlx5e_page_release(struct mlx5e_rq *rq,
 317                                      struct mlx5e_dma_info *dma_info,
 318                                      bool recycle)
 319{
 320        if (rq->xsk_pool)
 321                /* The `recycle` parameter is ignored, and the page is always
 322                 * put into the Reuse Ring, because there is no way to return
 323                 * the page to the userspace when the interface goes down.
 324                 */
 325                xsk_buff_free(dma_info->xsk);
 326        else
 327                mlx5e_page_release_dynamic(rq, dma_info, recycle);
 328}
 329
 330static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
 331                                    struct mlx5e_wqe_frag_info *frag)
 332{
 333        int err = 0;
 334
 335        if (!frag->offset)
 336                /* On first frag (offset == 0), replenish page (dma_info actually).
 337                 * Other frags that point to the same dma_info (with a different
 338                 * offset) should just use the new one without replenishing again
 339                 * by themselves.
 340                 */
 341                err = mlx5e_page_alloc(rq, frag->di);
 342
 343        return err;
 344}
 345
 346static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
 347                                     struct mlx5e_wqe_frag_info *frag,
 348                                     bool recycle)
 349{
 350        if (frag->last_in_page)
 351                mlx5e_page_release(rq, frag->di, recycle);
 352}
 353
 354static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
 355{
 356        return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
 357}
 358
 359static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
 360                              u16 ix)
 361{
 362        struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
 363        int err;
 364        int i;
 365
 366        for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
 367                err = mlx5e_get_rx_frag(rq, frag);
 368                if (unlikely(err))
 369                        goto free_frags;
 370
 371                wqe->data[i].addr = cpu_to_be64(frag->di->addr +
 372                                                frag->offset + rq->buff.headroom);
 373        }
 374
 375        return 0;
 376
 377free_frags:
 378        while (--i >= 0)
 379                mlx5e_put_rx_frag(rq, --frag, true);
 380
 381        return err;
 382}
 383
 384static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
 385                                     struct mlx5e_wqe_frag_info *wi,
 386                                     bool recycle)
 387{
 388        int i;
 389
 390        for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
 391                mlx5e_put_rx_frag(rq, wi, recycle);
 392}
 393
 394static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
 395{
 396        struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
 397
 398        mlx5e_free_rx_wqe(rq, wi, false);
 399}
 400
 401static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
 402{
 403        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
 404        int err;
 405        int i;
 406
 407        if (rq->xsk_pool) {
 408                int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
 409
 410                /* Check in advance that we have enough frames, instead of
 411                 * allocating one-by-one, failing and moving frames to the
 412                 * Reuse Ring.
 413                 */
 414                if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
 415                        return -ENOMEM;
 416        }
 417
 418        for (i = 0; i < wqe_bulk; i++) {
 419                struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
 420
 421                err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
 422                if (unlikely(err))
 423                        goto free_wqes;
 424        }
 425
 426        return 0;
 427
 428free_wqes:
 429        while (--i >= 0)
 430                mlx5e_dealloc_rx_wqe(rq, ix + i);
 431
 432        return err;
 433}
 434
 435static inline void
 436mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
 437                   struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
 438                   unsigned int truesize)
 439{
 440        dma_sync_single_for_cpu(rq->pdev,
 441                                di->addr + frag_offset,
 442                                len, DMA_FROM_DEVICE);
 443        page_ref_inc(di->page);
 444        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 445                        di->page, frag_offset, len, truesize);
 446}
 447
 448static inline void
 449mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
 450                      struct mlx5e_dma_info *dma_info,
 451                      int offset_from, u32 headlen)
 452{
 453        const void *from = page_address(dma_info->page) + offset_from;
 454        /* Aligning len to sizeof(long) optimizes memcpy performance */
 455        unsigned int len = ALIGN(headlen, sizeof(long));
 456
 457        dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
 458                                DMA_FROM_DEVICE);
 459        skb_copy_to_linear_data(skb, from, len);
 460}
 461
 462static void
 463mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
 464{
 465        bool no_xdp_xmit;
 466        struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
 467        int i;
 468
 469        /* A common case for AF_XDP. */
 470        if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE))
 471                return;
 472
 473        no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap,
 474                                   MLX5_MPWRQ_PAGES_PER_WQE);
 475
 476        for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
 477                if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
 478                        mlx5e_page_release(rq, &dma_info[i], recycle);
 479}
 480
 481static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
 482{
 483        struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
 484
 485        do {
 486                u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
 487
 488                mlx5_wq_ll_push(wq, next_wqe_index);
 489        } while (--n);
 490
 491        /* ensure wqes are visible to device before updating doorbell record */
 492        dma_wmb();
 493
 494        mlx5_wq_ll_update_db_record(wq);
 495}
 496
 497static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
 498{
 499        struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
 500        struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
 501        struct mlx5e_icosq *sq = rq->icosq;
 502        struct mlx5_wq_cyc *wq = &sq->wq;
 503        struct mlx5e_umr_wqe *umr_wqe;
 504        u16 pi;
 505        int err;
 506        int i;
 507
 508        /* Check in advance that we have enough frames, instead of allocating
 509         * one-by-one, failing and moving frames to the Reuse Ring.
 510         */
 511        if (rq->xsk_pool &&
 512            unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) {
 513                err = -ENOMEM;
 514                goto err;
 515        }
 516
 517        pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
 518        umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
 519        memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
 520
 521        for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
 522                err = mlx5e_page_alloc(rq, dma_info);
 523                if (unlikely(err))
 524                        goto err_unmap;
 525                umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
 526        }
 527
 528        bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
 529        wi->consumed_strides = 0;
 530
 531        umr_wqe->ctrl.opmod_idx_opcode =
 532                cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
 533                            MLX5_OPCODE_UMR);
 534        umr_wqe->uctrl.xlt_offset =
 535                cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
 536
 537        sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
 538                .wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
 539                .num_wqebbs = MLX5E_UMR_WQEBBS,
 540                .umr.rq     = rq,
 541        };
 542
 543        sq->pc += MLX5E_UMR_WQEBBS;
 544
 545        sq->doorbell_cseg = &umr_wqe->ctrl;
 546
 547        return 0;
 548
 549err_unmap:
 550        while (--i >= 0) {
 551                dma_info--;
 552                mlx5e_page_release(rq, dma_info, true);
 553        }
 554
 555err:
 556        rq->stats->buff_alloc_err++;
 557
 558        return err;
 559}
 560
 561static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
 562{
 563        struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
 564        /* Don't recycle, this function is called on rq/netdev close */
 565        mlx5e_free_rx_mpwqe(rq, wi, false);
 566}
 567
 568INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 569{
 570        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
 571        u8 wqe_bulk;
 572        int err;
 573
 574        if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
 575                return false;
 576
 577        wqe_bulk = rq->wqe.info.wqe_bulk;
 578
 579        if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
 580                return false;
 581
 582        if (rq->page_pool)
 583                page_pool_nid_changed(rq->page_pool, numa_mem_id());
 584
 585        do {
 586                u16 head = mlx5_wq_cyc_get_head(wq);
 587
 588                err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
 589                if (unlikely(err)) {
 590                        rq->stats->buff_alloc_err++;
 591                        break;
 592                }
 593
 594                mlx5_wq_cyc_push_n(wq, wqe_bulk);
 595        } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
 596
 597        /* ensure wqes are visible to device before updating doorbell record */
 598        dma_wmb();
 599
 600        mlx5_wq_cyc_update_db_record(wq);
 601
 602        return !!err;
 603}
 604
 605void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
 606{
 607        u16 sqcc;
 608
 609        sqcc = sq->cc;
 610
 611        while (sqcc != sq->pc) {
 612                struct mlx5e_icosq_wqe_info *wi;
 613                u16 ci;
 614
 615                ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
 616                wi = &sq->db.wqe_info[ci];
 617                sqcc += wi->num_wqebbs;
 618#ifdef CONFIG_MLX5_EN_TLS
 619                switch (wi->wqe_type) {
 620                case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
 621                        mlx5e_ktls_handle_ctx_completion(wi);
 622                        break;
 623                case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
 624                        mlx5e_ktls_handle_get_psv_completion(wi, sq);
 625                        break;
 626                }
 627#endif
 628        }
 629        sq->cc = sqcc;
 630}
 631
 632int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 633{
 634        struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
 635        struct mlx5_cqe64 *cqe;
 636        u16 sqcc;
 637        int i;
 638
 639        if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
 640                return 0;
 641
 642        cqe = mlx5_cqwq_get_cqe(&cq->wq);
 643        if (likely(!cqe))
 644                return 0;
 645
 646        /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
 647         * otherwise a cq overrun may occur
 648         */
 649        sqcc = sq->cc;
 650
 651        i = 0;
 652        do {
 653                u16 wqe_counter;
 654                bool last_wqe;
 655
 656                mlx5_cqwq_pop(&cq->wq);
 657
 658                wqe_counter = be16_to_cpu(cqe->wqe_counter);
 659
 660                do {
 661                        struct mlx5e_icosq_wqe_info *wi;
 662                        u16 ci;
 663
 664                        last_wqe = (sqcc == wqe_counter);
 665
 666                        ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
 667                        wi = &sq->db.wqe_info[ci];
 668                        sqcc += wi->num_wqebbs;
 669
 670                        if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
 671                                netdev_WARN_ONCE(cq->netdev,
 672                                                 "Bad OP in ICOSQ CQE: 0x%x\n",
 673                                                 get_cqe_opcode(cqe));
 674                                mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
 675                                                     (struct mlx5_err_cqe *)cqe);
 676                                mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
 677                                if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
 678                                        queue_work(cq->priv->wq, &sq->recover_work);
 679                                break;
 680                        }
 681
 682                        switch (wi->wqe_type) {
 683                        case MLX5E_ICOSQ_WQE_UMR_RX:
 684                                wi->umr.rq->mpwqe.umr_completed++;
 685                                break;
 686                        case MLX5E_ICOSQ_WQE_NOP:
 687                                break;
 688#ifdef CONFIG_MLX5_EN_TLS
 689                        case MLX5E_ICOSQ_WQE_UMR_TLS:
 690                                break;
 691                        case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
 692                                mlx5e_ktls_handle_ctx_completion(wi);
 693                                break;
 694                        case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
 695                                mlx5e_ktls_handle_get_psv_completion(wi, sq);
 696                                break;
 697#endif
 698                        default:
 699                                netdev_WARN_ONCE(cq->netdev,
 700                                                 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
 701                                                 wi->wqe_type);
 702                        }
 703                } while (!last_wqe);
 704        } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 705
 706        sq->cc = sqcc;
 707
 708        mlx5_cqwq_update_db_record(&cq->wq);
 709
 710        return i;
 711}
 712
 713INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
 714{
 715        struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
 716        u8  umr_completed = rq->mpwqe.umr_completed;
 717        struct mlx5e_icosq *sq = rq->icosq;
 718        int alloc_err = 0;
 719        u8  missing, i;
 720        u16 head;
 721
 722        if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
 723                return false;
 724
 725        if (umr_completed) {
 726                mlx5e_post_rx_mpwqe(rq, umr_completed);
 727                rq->mpwqe.umr_in_progress -= umr_completed;
 728                rq->mpwqe.umr_completed = 0;
 729        }
 730
 731        missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
 732
 733        if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
 734                rq->stats->congst_umr++;
 735
 736#define UMR_WQE_BULK (2)
 737        if (likely(missing < UMR_WQE_BULK))
 738                return false;
 739
 740        if (rq->page_pool)
 741                page_pool_nid_changed(rq->page_pool, numa_mem_id());
 742
 743        head = rq->mpwqe.actual_wq_head;
 744        i = missing;
 745        do {
 746                alloc_err = mlx5e_alloc_rx_mpwqe(rq, head);
 747
 748                if (unlikely(alloc_err))
 749                        break;
 750                head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
 751        } while (--i);
 752
 753        rq->mpwqe.umr_last_bulk    = missing - i;
 754        if (sq->doorbell_cseg) {
 755                mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
 756                sq->doorbell_cseg = NULL;
 757        }
 758
 759        rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
 760        rq->mpwqe.actual_wq_head   = head;
 761
 762        /* If XSK Fill Ring doesn't have enough frames, report the error, so
 763         * that one of the actions can be performed:
 764         * 1. If need_wakeup is used, signal that the application has to kick
 765         * the driver when it refills the Fill Ring.
 766         * 2. Otherwise, busy poll by rescheduling the NAPI poll.
 767         */
 768        if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
 769                return true;
 770
 771        return false;
 772}
 773
 774static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
 775{
 776        u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
 777        u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
 778                         (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
 779
 780        tcp->check                      = 0;
 781        tcp->psh                        = get_cqe_lro_tcppsh(cqe);
 782
 783        if (tcp_ack) {
 784                tcp->ack                = 1;
 785                tcp->ack_seq            = cqe->lro_ack_seq_num;
 786                tcp->window             = cqe->lro_tcp_win;
 787        }
 788}
 789
 790static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
 791                                 u32 cqe_bcnt)
 792{
 793        struct ethhdr   *eth = (struct ethhdr *)(skb->data);
 794        struct tcphdr   *tcp;
 795        int network_depth = 0;
 796        __wsum check;
 797        __be16 proto;
 798        u16 tot_len;
 799        void *ip_p;
 800
 801        proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
 802
 803        tot_len = cqe_bcnt - network_depth;
 804        ip_p = skb->data + network_depth;
 805
 806        if (proto == htons(ETH_P_IP)) {
 807                struct iphdr *ipv4 = ip_p;
 808
 809                tcp = ip_p + sizeof(struct iphdr);
 810                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 811
 812                ipv4->ttl               = cqe->lro_min_ttl;
 813                ipv4->tot_len           = cpu_to_be16(tot_len);
 814                ipv4->check             = 0;
 815                ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
 816                                                       ipv4->ihl);
 817
 818                mlx5e_lro_update_tcp_hdr(cqe, tcp);
 819                check = csum_partial(tcp, tcp->doff * 4,
 820                                     csum_unfold((__force __sum16)cqe->check_sum));
 821                /* Almost done, don't forget the pseudo header */
 822                tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
 823                                               tot_len - sizeof(struct iphdr),
 824                                               IPPROTO_TCP, check);
 825        } else {
 826                u16 payload_len = tot_len - sizeof(struct ipv6hdr);
 827                struct ipv6hdr *ipv6 = ip_p;
 828
 829                tcp = ip_p + sizeof(struct ipv6hdr);
 830                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 831
 832                ipv6->hop_limit         = cqe->lro_min_ttl;
 833                ipv6->payload_len       = cpu_to_be16(payload_len);
 834
 835                mlx5e_lro_update_tcp_hdr(cqe, tcp);
 836                check = csum_partial(tcp, tcp->doff * 4,
 837                                     csum_unfold((__force __sum16)cqe->check_sum));
 838                /* Almost done, don't forget the pseudo header */
 839                tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
 840                                             IPPROTO_TCP, check);
 841        }
 842}
 843
 844static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
 845                                      struct sk_buff *skb)
 846{
 847        u8 cht = cqe->rss_hash_type;
 848        int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
 849                 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
 850                                            PKT_HASH_TYPE_NONE;
 851        skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
 852}
 853
 854static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
 855                                        __be16 *proto)
 856{
 857        *proto = ((struct ethhdr *)skb->data)->h_proto;
 858        *proto = __vlan_get_protocol(skb, *proto, network_depth);
 859
 860        if (*proto == htons(ETH_P_IP))
 861                return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
 862
 863        if (*proto == htons(ETH_P_IPV6))
 864                return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
 865
 866        return false;
 867}
 868
 869static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
 870{
 871        int network_depth = 0;
 872        __be16 proto;
 873        void *ip;
 874        int rc;
 875
 876        if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
 877                return;
 878
 879        ip = skb->data + network_depth;
 880        rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
 881                                         IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
 882
 883        rq->stats->ecn_mark += !!rc;
 884}
 885
 886static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 887{
 888        void *ip_p = skb->data + network_depth;
 889
 890        return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
 891                                            ((struct ipv6hdr *)ip_p)->nexthdr;
 892}
 893
 894#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
 895
 896#define MAX_PADDING 8
 897
 898static void
 899tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
 900                       struct mlx5e_rq_stats *stats)
 901{
 902        stats->csum_complete_tail_slow++;
 903        skb->csum = csum_block_add(skb->csum,
 904                                   skb_checksum(skb, offset, len, 0),
 905                                   offset);
 906}
 907
 908static void
 909tail_padding_csum(struct sk_buff *skb, int offset,
 910                  struct mlx5e_rq_stats *stats)
 911{
 912        u8 tail_padding[MAX_PADDING];
 913        int len = skb->len - offset;
 914        void *tail;
 915
 916        if (unlikely(len > MAX_PADDING)) {
 917                tail_padding_csum_slow(skb, offset, len, stats);
 918                return;
 919        }
 920
 921        tail = skb_header_pointer(skb, offset, len, tail_padding);
 922        if (unlikely(!tail)) {
 923                tail_padding_csum_slow(skb, offset, len, stats);
 924                return;
 925        }
 926
 927        stats->csum_complete_tail++;
 928        skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
 929}
 930
 931static void
 932mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
 933                     struct mlx5e_rq_stats *stats)
 934{
 935        struct ipv6hdr *ip6;
 936        struct iphdr   *ip4;
 937        int pkt_len;
 938
 939        /* Fixup vlan headers, if any */
 940        if (network_depth > ETH_HLEN)
 941                /* CQE csum is calculated from the IP header and does
 942                 * not cover VLAN headers (if present). This will add
 943                 * the checksum manually.
 944                 */
 945                skb->csum = csum_partial(skb->data + ETH_HLEN,
 946                                         network_depth - ETH_HLEN,
 947                                         skb->csum);
 948
 949        /* Fixup tail padding, if any */
 950        switch (proto) {
 951        case htons(ETH_P_IP):
 952                ip4 = (struct iphdr *)(skb->data + network_depth);
 953                pkt_len = network_depth + ntohs(ip4->tot_len);
 954                break;
 955        case htons(ETH_P_IPV6):
 956                ip6 = (struct ipv6hdr *)(skb->data + network_depth);
 957                pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
 958                break;
 959        default:
 960                return;
 961        }
 962
 963        if (likely(pkt_len >= skb->len))
 964                return;
 965
 966        tail_padding_csum(skb, pkt_len, stats);
 967}
 968
 969static inline void mlx5e_handle_csum(struct net_device *netdev,
 970                                     struct mlx5_cqe64 *cqe,
 971                                     struct mlx5e_rq *rq,
 972                                     struct sk_buff *skb,
 973                                     bool   lro)
 974{
 975        struct mlx5e_rq_stats *stats = rq->stats;
 976        int network_depth = 0;
 977        __be16 proto;
 978
 979        if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
 980                goto csum_none;
 981
 982        if (lro) {
 983                skb->ip_summed = CHECKSUM_UNNECESSARY;
 984                stats->csum_unnecessary++;
 985                return;
 986        }
 987
 988        /* True when explicitly set via priv flag, or XDP prog is loaded */
 989        if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
 990                goto csum_unnecessary;
 991
 992        /* CQE csum doesn't cover padding octets in short ethernet
 993         * frames. And the pad field is appended prior to calculating
 994         * and appending the FCS field.
 995         *
 996         * Detecting these padded frames requires to verify and parse
 997         * IP headers, so we simply force all those small frames to be
 998         * CHECKSUM_UNNECESSARY even if they are not padded.
 999         */
1000        if (short_frame(skb->len))
1001                goto csum_unnecessary;
1002
1003        if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1004                if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
1005                        goto csum_unnecessary;
1006
1007                stats->csum_complete++;
1008                skb->ip_summed = CHECKSUM_COMPLETE;
1009                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1010
1011                if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1012                        return; /* CQE csum covers all received bytes */
1013
1014                /* csum might need some fixups ...*/
1015                mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1016                return;
1017        }
1018
1019csum_unnecessary:
1020        if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
1021                   (cqe->hds_ip_ext & CQE_L4_OK))) {
1022                skb->ip_summed = CHECKSUM_UNNECESSARY;
1023                if (cqe_is_tunneled(cqe)) {
1024                        skb->csum_level = 1;
1025                        skb->encapsulation = 1;
1026                        stats->csum_unnecessary_inner++;
1027                        return;
1028                }
1029                stats->csum_unnecessary++;
1030                return;
1031        }
1032csum_none:
1033        skb->ip_summed = CHECKSUM_NONE;
1034        stats->csum_none++;
1035}
1036
1037#define MLX5E_CE_BIT_MASK 0x80
1038
1039static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
1040                                      u32 cqe_bcnt,
1041                                      struct mlx5e_rq *rq,
1042                                      struct sk_buff *skb)
1043{
1044        u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
1045        struct mlx5e_rq_stats *stats = rq->stats;
1046        struct net_device *netdev = rq->netdev;
1047
1048        skb->mac_len = ETH_HLEN;
1049
1050        mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1051
1052        if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1053                mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
1054
1055        if (lro_num_seg > 1) {
1056                mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
1057                skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
1058                /* Subtract one since we already counted this as one
1059                 * "regular" packet in mlx5e_complete_rx_cqe()
1060                 */
1061                stats->packets += lro_num_seg - 1;
1062                stats->lro_packets++;
1063                stats->lro_bytes += cqe_bcnt;
1064        }
1065
1066        if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1067                skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1068                                                                  rq->clock, get_cqe_ts(cqe));
1069        skb_record_rx_queue(skb, rq->ix);
1070
1071        if (likely(netdev->features & NETIF_F_RXHASH))
1072                mlx5e_skb_set_hash(cqe, skb);
1073
1074        if (cqe_has_vlan(cqe)) {
1075                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1076                                       be16_to_cpu(cqe->vlan_info));
1077                stats->removed_vlan_packets++;
1078        }
1079
1080        skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1081
1082        mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1083        /* checking CE bit in cqe - MSB in ml_path field */
1084        if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1085                mlx5e_enable_ecn(rq, skb);
1086
1087        skb->protocol = eth_type_trans(skb, netdev);
1088
1089        if (unlikely(mlx5e_skb_is_multicast(skb)))
1090                stats->mcast_packets++;
1091}
1092
1093static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1094                                         struct mlx5_cqe64 *cqe,
1095                                         u32 cqe_bcnt,
1096                                         struct sk_buff *skb)
1097{
1098        struct mlx5e_rq_stats *stats = rq->stats;
1099
1100        stats->packets++;
1101        stats->bytes += cqe_bcnt;
1102        mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1103}
1104
1105static inline
1106struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1107                                       u32 frag_size, u16 headroom,
1108                                       u32 cqe_bcnt)
1109{
1110        struct sk_buff *skb = build_skb(va, frag_size);
1111
1112        if (unlikely(!skb)) {
1113                rq->stats->buff_alloc_err++;
1114                return NULL;
1115        }
1116
1117        skb_reserve(skb, headroom);
1118        skb_put(skb, cqe_bcnt);
1119
1120        return skb;
1121}
1122
1123static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
1124                                u32 len, struct xdp_buff *xdp)
1125{
1126        xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
1127        xdp_prepare_buff(xdp, va, headroom, len, false);
1128}
1129
1130static struct sk_buff *
1131mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1132                          struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
1133{
1134        struct mlx5e_dma_info *di = wi->di;
1135        u16 rx_headroom = rq->buff.headroom;
1136        struct xdp_buff xdp;
1137        struct sk_buff *skb;
1138        void *va, *data;
1139        u32 frag_size;
1140
1141        va             = page_address(di->page) + wi->offset;
1142        data           = va + rx_headroom;
1143        frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1144
1145        dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
1146                                      frag_size, DMA_FROM_DEVICE);
1147        net_prefetchw(va); /* xdp_frame data area */
1148        net_prefetch(data);
1149
1150        mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
1151        if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
1152                return NULL; /* page/packet was consumed by XDP */
1153
1154        rx_headroom = xdp.data - xdp.data_hard_start;
1155        frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1156        skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
1157        if (unlikely(!skb))
1158                return NULL;
1159
1160        /* queue up for recycling/reuse */
1161        page_ref_inc(di->page);
1162
1163        return skb;
1164}
1165
1166static struct sk_buff *
1167mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1168                             struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
1169{
1170        struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1171        struct mlx5e_wqe_frag_info *head_wi = wi;
1172        u16 headlen      = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1173        u16 frag_headlen = headlen;
1174        u16 byte_cnt     = cqe_bcnt - headlen;
1175        struct sk_buff *skb;
1176
1177        /* XDP is not supported in this configuration, as incoming packets
1178         * might spread among multiple pages.
1179         */
1180        skb = napi_alloc_skb(rq->cq.napi,
1181                             ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1182        if (unlikely(!skb)) {
1183                rq->stats->buff_alloc_err++;
1184                return NULL;
1185        }
1186
1187        net_prefetchw(skb->data);
1188
1189        while (byte_cnt) {
1190                u16 frag_consumed_bytes =
1191                        min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
1192
1193                mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
1194                                   frag_consumed_bytes, frag_info->frag_stride);
1195                byte_cnt -= frag_consumed_bytes;
1196                frag_headlen = 0;
1197                frag_info++;
1198                wi++;
1199        }
1200
1201        /* copy header */
1202        mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
1203        /* skb linear part was allocated with headlen and aligned to long */
1204        skb->tail += headlen;
1205        skb->len  += headlen;
1206
1207        return skb;
1208}
1209
1210static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1211{
1212        struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1213        struct mlx5e_priv *priv = rq->priv;
1214
1215        if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1216            !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1217                mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1218                queue_work(priv->wq, &rq->recover_work);
1219        }
1220}
1221
1222static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1223{
1224        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1225        struct mlx5e_wqe_frag_info *wi;
1226        struct sk_buff *skb;
1227        u32 cqe_bcnt;
1228        u16 ci;
1229
1230        ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1231        wi       = get_frag(rq, ci);
1232        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1233
1234        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1235                trigger_report(rq, cqe);
1236                rq->stats->wqe_err++;
1237                goto free_wqe;
1238        }
1239
1240        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1241                              mlx5e_skb_from_cqe_linear,
1242                              mlx5e_skb_from_cqe_nonlinear,
1243                              rq, cqe, wi, cqe_bcnt);
1244        if (!skb) {
1245                /* probably for XDP */
1246                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1247                        /* do not return page to cache,
1248                         * it will be returned on XDP_TX completion.
1249                         */
1250                        goto wq_cyc_pop;
1251                }
1252                goto free_wqe;
1253        }
1254
1255        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1256
1257        if (mlx5e_cqe_regb_chain(cqe))
1258                if (!mlx5e_tc_update_skb(cqe, skb)) {
1259                        dev_kfree_skb_any(skb);
1260                        goto free_wqe;
1261                }
1262
1263        napi_gro_receive(rq->cq.napi, skb);
1264
1265free_wqe:
1266        mlx5e_free_rx_wqe(rq, wi, true);
1267wq_cyc_pop:
1268        mlx5_wq_cyc_pop(wq);
1269}
1270
1271#ifdef CONFIG_MLX5_ESWITCH
1272static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1273{
1274        struct net_device *netdev = rq->netdev;
1275        struct mlx5e_priv *priv = netdev_priv(netdev);
1276        struct mlx5e_rep_priv *rpriv  = priv->ppriv;
1277        struct mlx5_eswitch_rep *rep = rpriv->rep;
1278        struct mlx5e_tc_update_priv tc_priv = {};
1279        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1280        struct mlx5e_wqe_frag_info *wi;
1281        struct sk_buff *skb;
1282        u32 cqe_bcnt;
1283        u16 ci;
1284
1285        ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1286        wi       = get_frag(rq, ci);
1287        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1288
1289        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1290                rq->stats->wqe_err++;
1291                goto free_wqe;
1292        }
1293
1294        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1295                              mlx5e_skb_from_cqe_linear,
1296                              mlx5e_skb_from_cqe_nonlinear,
1297                              rq, cqe, wi, cqe_bcnt);
1298        if (!skb) {
1299                /* probably for XDP */
1300                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1301                        /* do not return page to cache,
1302                         * it will be returned on XDP_TX completion.
1303                         */
1304                        goto wq_cyc_pop;
1305                }
1306                goto free_wqe;
1307        }
1308
1309        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1310
1311        if (rep->vlan && skb_vlan_tag_present(skb))
1312                skb_vlan_pop(skb);
1313
1314        if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
1315                     !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
1316                dev_kfree_skb_any(skb);
1317                goto free_wqe;
1318        }
1319
1320        napi_gro_receive(rq->cq.napi, skb);
1321
1322        mlx5_rep_tc_post_napi_receive(&tc_priv);
1323
1324free_wqe:
1325        mlx5e_free_rx_wqe(rq, wi, true);
1326wq_cyc_pop:
1327        mlx5_wq_cyc_pop(wq);
1328}
1329
1330static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1331{
1332        u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1333        u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1334        struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
1335        u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1336        u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1337        u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
1338        u32 page_idx       = wqe_offset >> PAGE_SHIFT;
1339        struct mlx5e_tc_update_priv tc_priv = {};
1340        struct mlx5e_rx_wqe_ll *wqe;
1341        struct mlx5_wq_ll *wq;
1342        struct sk_buff *skb;
1343        u16 cqe_bcnt;
1344
1345        wi->consumed_strides += cstrides;
1346
1347        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1348                trigger_report(rq, cqe);
1349                rq->stats->wqe_err++;
1350                goto mpwrq_cqe_out;
1351        }
1352
1353        if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1354                struct mlx5e_rq_stats *stats = rq->stats;
1355
1356                stats->mpwqe_filler_cqes++;
1357                stats->mpwqe_filler_strides += cstrides;
1358                goto mpwrq_cqe_out;
1359        }
1360
1361        cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1362
1363        skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1364                              mlx5e_skb_from_cqe_mpwrq_linear,
1365                              mlx5e_skb_from_cqe_mpwrq_nonlinear,
1366                              rq, wi, cqe_bcnt, head_offset, page_idx);
1367        if (!skb)
1368                goto mpwrq_cqe_out;
1369
1370        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1371
1372        if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
1373                     !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
1374                dev_kfree_skb_any(skb);
1375                goto mpwrq_cqe_out;
1376        }
1377
1378        napi_gro_receive(rq->cq.napi, skb);
1379
1380        mlx5_rep_tc_post_napi_receive(&tc_priv);
1381
1382mpwrq_cqe_out:
1383        if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1384                return;
1385
1386        wq  = &rq->mpwqe.wq;
1387        wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1388        mlx5e_free_rx_mpwqe(rq, wi, true);
1389        mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1390}
1391
1392const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
1393        .handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1394        .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1395};
1396#endif
1397
1398static struct sk_buff *
1399mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1400                                   u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1401{
1402        u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1403        struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1404        u32 frag_offset    = head_offset + headlen;
1405        u32 byte_cnt       = cqe_bcnt - headlen;
1406        struct mlx5e_dma_info *head_di = di;
1407        struct sk_buff *skb;
1408
1409        skb = napi_alloc_skb(rq->cq.napi,
1410                             ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1411        if (unlikely(!skb)) {
1412                rq->stats->buff_alloc_err++;
1413                return NULL;
1414        }
1415
1416        net_prefetchw(skb->data);
1417
1418        if (unlikely(frag_offset >= PAGE_SIZE)) {
1419                di++;
1420                frag_offset -= PAGE_SIZE;
1421        }
1422
1423        while (byte_cnt) {
1424                u32 pg_consumed_bytes =
1425                        min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
1426                unsigned int truesize =
1427                        ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
1428
1429                mlx5e_add_skb_frag(rq, skb, di, frag_offset,
1430                                   pg_consumed_bytes, truesize);
1431                byte_cnt -= pg_consumed_bytes;
1432                frag_offset = 0;
1433                di++;
1434        }
1435        /* copy header */
1436        mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
1437        /* skb linear part was allocated with headlen and aligned to long */
1438        skb->tail += headlen;
1439        skb->len  += headlen;
1440
1441        return skb;
1442}
1443
1444static struct sk_buff *
1445mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1446                                u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1447{
1448        struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1449        u16 rx_headroom = rq->buff.headroom;
1450        u32 cqe_bcnt32 = cqe_bcnt;
1451        struct xdp_buff xdp;
1452        struct sk_buff *skb;
1453        void *va, *data;
1454        u32 frag_size;
1455
1456        /* Check packet size. Note LRO doesn't use linear SKB */
1457        if (unlikely(cqe_bcnt > rq->hw_mtu)) {
1458                rq->stats->oversize_pkts_sw_drop++;
1459                return NULL;
1460        }
1461
1462        va             = page_address(di->page) + head_offset;
1463        data           = va + rx_headroom;
1464        frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
1465
1466        dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
1467                                      frag_size, DMA_FROM_DEVICE);
1468        net_prefetchw(va); /* xdp_frame data area */
1469        net_prefetch(data);
1470
1471        mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
1472        if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
1473                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1474                        __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
1475                return NULL; /* page/packet was consumed by XDP */
1476        }
1477
1478        rx_headroom = xdp.data - xdp.data_hard_start;
1479        frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
1480        skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
1481        if (unlikely(!skb))
1482                return NULL;
1483
1484        /* queue up for recycling/reuse */
1485        page_ref_inc(di->page);
1486
1487        return skb;
1488}
1489
1490static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1491{
1492        u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1493        u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1494        struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
1495        u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1496        u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1497        u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
1498        u32 page_idx       = wqe_offset >> PAGE_SHIFT;
1499        struct mlx5e_rx_wqe_ll *wqe;
1500        struct mlx5_wq_ll *wq;
1501        struct sk_buff *skb;
1502        u16 cqe_bcnt;
1503
1504        wi->consumed_strides += cstrides;
1505
1506        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1507                trigger_report(rq, cqe);
1508                rq->stats->wqe_err++;
1509                goto mpwrq_cqe_out;
1510        }
1511
1512        if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1513                struct mlx5e_rq_stats *stats = rq->stats;
1514
1515                stats->mpwqe_filler_cqes++;
1516                stats->mpwqe_filler_strides += cstrides;
1517                goto mpwrq_cqe_out;
1518        }
1519
1520        cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1521
1522        skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1523                              mlx5e_skb_from_cqe_mpwrq_linear,
1524                              mlx5e_skb_from_cqe_mpwrq_nonlinear,
1525                              rq, wi, cqe_bcnt, head_offset, page_idx);
1526        if (!skb)
1527                goto mpwrq_cqe_out;
1528
1529        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1530
1531        if (mlx5e_cqe_regb_chain(cqe))
1532                if (!mlx5e_tc_update_skb(cqe, skb)) {
1533                        dev_kfree_skb_any(skb);
1534                        goto mpwrq_cqe_out;
1535                }
1536
1537        napi_gro_receive(rq->cq.napi, skb);
1538
1539mpwrq_cqe_out:
1540        if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1541                return;
1542
1543        wq  = &rq->mpwqe.wq;
1544        wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1545        mlx5e_free_rx_mpwqe(rq, wi, true);
1546        mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1547}
1548
1549int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
1550{
1551        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
1552        struct mlx5_cqwq *cqwq = &cq->wq;
1553        struct mlx5_cqe64 *cqe;
1554        int work_done = 0;
1555
1556        if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1557                return 0;
1558
1559        if (rq->cqd.left) {
1560                work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
1561                if (work_done >= budget)
1562                        goto out;
1563        }
1564
1565        cqe = mlx5_cqwq_get_cqe(cqwq);
1566        if (!cqe) {
1567                if (unlikely(work_done))
1568                        goto out;
1569                return 0;
1570        }
1571
1572        do {
1573                if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
1574                        work_done +=
1575                                mlx5e_decompress_cqes_start(rq, cqwq,
1576                                                            budget - work_done);
1577                        continue;
1578                }
1579
1580                mlx5_cqwq_pop(cqwq);
1581
1582                INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
1583                                mlx5e_handle_rx_cqe, rq, cqe);
1584        } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
1585
1586out:
1587        if (rcu_access_pointer(rq->xdp_prog))
1588                mlx5e_xdp_rx_poll_complete(rq);
1589
1590        mlx5_cqwq_update_db_record(cqwq);
1591
1592        /* ensure cq space is freed before enabling more cqes */
1593        wmb();
1594
1595        return work_done;
1596}
1597
1598#ifdef CONFIG_MLX5_CORE_IPOIB
1599
1600#define MLX5_IB_GRH_SGID_OFFSET 8
1601#define MLX5_IB_GRH_DGID_OFFSET 24
1602#define MLX5_GID_SIZE           16
1603
1604static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1605                                         struct mlx5_cqe64 *cqe,
1606                                         u32 cqe_bcnt,
1607                                         struct sk_buff *skb)
1608{
1609        struct hwtstamp_config *tstamp;
1610        struct mlx5e_rq_stats *stats;
1611        struct net_device *netdev;
1612        struct mlx5e_priv *priv;
1613        char *pseudo_header;
1614        u32 flags_rqpn;
1615        u32 qpn;
1616        u8 *dgid;
1617        u8 g;
1618
1619        qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
1620        netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
1621
1622        /* No mapping present, cannot process SKB. This might happen if a child
1623         * interface is going down while having unprocessed CQEs on parent RQ
1624         */
1625        if (unlikely(!netdev)) {
1626                /* TODO: add drop counters support */
1627                skb->dev = NULL;
1628                pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
1629                return;
1630        }
1631
1632        priv = mlx5i_epriv(netdev);
1633        tstamp = &priv->tstamp;
1634        stats = &priv->channel_stats[rq->ix].rq;
1635
1636        flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
1637        g = (flags_rqpn >> 28) & 3;
1638        dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
1639        if ((!g) || dgid[0] != 0xff)
1640                skb->pkt_type = PACKET_HOST;
1641        else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
1642                skb->pkt_type = PACKET_BROADCAST;
1643        else
1644                skb->pkt_type = PACKET_MULTICAST;
1645
1646        /* Drop packets that this interface sent, ie multicast packets
1647         * that the HCA has replicated.
1648         */
1649        if (g && (qpn == (flags_rqpn & 0xffffff)) &&
1650            (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
1651                    MLX5_GID_SIZE) == 0)) {
1652                skb->dev = NULL;
1653                return;
1654        }
1655
1656        skb_pull(skb, MLX5_IB_GRH_BYTES);
1657
1658        skb->protocol = *((__be16 *)(skb->data));
1659
1660        if (netdev->features & NETIF_F_RXCSUM) {
1661                skb->ip_summed = CHECKSUM_COMPLETE;
1662                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1663                stats->csum_complete++;
1664        } else {
1665                skb->ip_summed = CHECKSUM_NONE;
1666                stats->csum_none++;
1667        }
1668
1669        if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
1670                skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1671                                                                  rq->clock, get_cqe_ts(cqe));
1672        skb_record_rx_queue(skb, rq->ix);
1673
1674        if (likely(netdev->features & NETIF_F_RXHASH))
1675                mlx5e_skb_set_hash(cqe, skb);
1676
1677        /* 20 bytes of ipoib header and 4 for encap existing */
1678        pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
1679        memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
1680        skb_reset_mac_header(skb);
1681        skb_pull(skb, MLX5_IPOIB_HARD_LEN);
1682
1683        skb->dev = netdev;
1684
1685        stats->packets++;
1686        stats->bytes += cqe_bcnt;
1687}
1688
1689static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1690{
1691        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1692        struct mlx5e_wqe_frag_info *wi;
1693        struct sk_buff *skb;
1694        u32 cqe_bcnt;
1695        u16 ci;
1696
1697        ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1698        wi       = get_frag(rq, ci);
1699        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1700
1701        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1702                rq->stats->wqe_err++;
1703                goto wq_free_wqe;
1704        }
1705
1706        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1707                              mlx5e_skb_from_cqe_linear,
1708                              mlx5e_skb_from_cqe_nonlinear,
1709                              rq, cqe, wi, cqe_bcnt);
1710        if (!skb)
1711                goto wq_free_wqe;
1712
1713        mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1714        if (unlikely(!skb->dev)) {
1715                dev_kfree_skb_any(skb);
1716                goto wq_free_wqe;
1717        }
1718        napi_gro_receive(rq->cq.napi, skb);
1719
1720wq_free_wqe:
1721        mlx5e_free_rx_wqe(rq, wi, true);
1722        mlx5_wq_cyc_pop(wq);
1723}
1724
1725const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
1726        .handle_rx_cqe       = mlx5i_handle_rx_cqe,
1727        .handle_rx_cqe_mpwqe = NULL, /* Not supported */
1728};
1729#endif /* CONFIG_MLX5_CORE_IPOIB */
1730
1731#ifdef CONFIG_MLX5_EN_IPSEC
1732
1733static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1734{
1735        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1736        struct mlx5e_wqe_frag_info *wi;
1737        struct sk_buff *skb;
1738        u32 cqe_bcnt;
1739        u16 ci;
1740
1741        ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1742        wi       = get_frag(rq, ci);
1743        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1744
1745        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1746                rq->stats->wqe_err++;
1747                goto wq_free_wqe;
1748        }
1749
1750        skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1751                              mlx5e_skb_from_cqe_linear,
1752                              mlx5e_skb_from_cqe_nonlinear,
1753                              rq, cqe, wi, cqe_bcnt);
1754        if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
1755                goto wq_free_wqe;
1756
1757        skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
1758        if (unlikely(!skb))
1759                goto wq_free_wqe;
1760
1761        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1762        napi_gro_receive(rq->cq.napi, skb);
1763
1764wq_free_wqe:
1765        mlx5e_free_rx_wqe(rq, wi, true);
1766        mlx5_wq_cyc_pop(wq);
1767}
1768
1769#endif /* CONFIG_MLX5_EN_IPSEC */
1770
1771int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
1772{
1773        struct net_device *netdev = rq->netdev;
1774        struct mlx5_core_dev *mdev = rq->mdev;
1775        struct mlx5e_priv *priv = rq->priv;
1776
1777        switch (rq->wq_type) {
1778        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1779                rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
1780                        mlx5e_xsk_skb_from_cqe_mpwrq_linear :
1781                        mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
1782                                mlx5e_skb_from_cqe_mpwrq_linear :
1783                                mlx5e_skb_from_cqe_mpwrq_nonlinear;
1784                rq->post_wqes = mlx5e_post_rx_mpwqes;
1785                rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
1786
1787                rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
1788                if (mlx5_fpga_is_ipsec_device(mdev)) {
1789                        netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
1790                        return -EINVAL;
1791                }
1792                if (!rq->handle_rx_cqe) {
1793                        netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
1794                        return -EINVAL;
1795                }
1796                break;
1797        default: /* MLX5_WQ_TYPE_CYCLIC */
1798                rq->wqe.skb_from_cqe = xsk ?
1799                        mlx5e_xsk_skb_from_cqe_linear :
1800                        mlx5e_rx_is_linear_skb(params, NULL) ?
1801                                mlx5e_skb_from_cqe_linear :
1802                                mlx5e_skb_from_cqe_nonlinear;
1803                rq->post_wqes = mlx5e_post_rx_wqes;
1804                rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
1805
1806#ifdef CONFIG_MLX5_EN_IPSEC
1807                if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
1808                    priv->ipsec)
1809                        rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
1810                else
1811#endif
1812                        rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
1813                if (!rq->handle_rx_cqe) {
1814                        netdev_err(netdev, "RX handler of RQ is not set\n");
1815                        return -EINVAL;
1816                }
1817        }
1818
1819        return 0;
1820}
1821
1822static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1823{
1824        struct mlx5e_priv *priv = netdev_priv(rq->netdev);
1825        struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1826        struct mlx5e_wqe_frag_info *wi;
1827        struct devlink_port *dl_port;
1828        struct sk_buff *skb;
1829        u32 cqe_bcnt;
1830        u16 trap_id;
1831        u16 ci;
1832
1833        trap_id  = get_cqe_flow_tag(cqe);
1834        ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1835        wi       = get_frag(rq, ci);
1836        cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1837
1838        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1839                rq->stats->wqe_err++;
1840                goto free_wqe;
1841        }
1842
1843        skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt);
1844        if (!skb)
1845                goto free_wqe;
1846
1847        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1848        skb_push(skb, ETH_HLEN);
1849
1850        dl_port = mlx5e_devlink_get_dl_port(priv);
1851        mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port);
1852        dev_kfree_skb_any(skb);
1853
1854free_wqe:
1855        mlx5e_free_rx_wqe(rq, wi, false);
1856        mlx5_wq_cyc_pop(wq);
1857}
1858
1859void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
1860{
1861        rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ?
1862                               mlx5e_skb_from_cqe_linear :
1863                               mlx5e_skb_from_cqe_nonlinear;
1864        rq->post_wqes = mlx5e_post_rx_wqes;
1865        rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
1866        rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
1867}
1868