linux/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
   2/* Copyright (c) 2019 Mellanox Technologies. */
   3
   4#ifndef __MLX5_EN_TXRX_H___
   5#define __MLX5_EN_TXRX_H___
   6
   7#include "en.h"
   8#include <linux/indirect_call_wrapper.h>
   9
  10#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
  11
  12#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
  13
  14#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
  15
  16static inline
  17ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
  18{
  19        return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time,
  20                               clock, cqe_ts);
  21}
  22
  23enum mlx5e_icosq_wqe_type {
  24        MLX5E_ICOSQ_WQE_NOP,
  25        MLX5E_ICOSQ_WQE_UMR_RX,
  26        MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
  27#ifdef CONFIG_MLX5_EN_TLS
  28        MLX5E_ICOSQ_WQE_UMR_TLS,
  29        MLX5E_ICOSQ_WQE_SET_PSV_TLS,
  30        MLX5E_ICOSQ_WQE_GET_PSV_TLS,
  31#endif
  32};
  33
  34/* General */
  35static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
  36{
  37        return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST;
  38}
  39
  40void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
  41void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
  42void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
  43int mlx5e_napi_poll(struct napi_struct *napi, int budget);
  44int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
  45
  46/* RX */
  47void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
  48void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
  49INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
  50INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
  51int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
  52void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
  53void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
  54
  55/* TX */
  56netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
  57bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
  58void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
  59
  60static inline bool
  61mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
  62{
  63        return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
  64}
  65
  66static inline void *mlx5e_fetch_wqe(struct mlx5_wq_cyc *wq, u16 pi, size_t wqe_size)
  67{
  68        void *wqe;
  69
  70        wqe = mlx5_wq_cyc_get_wqe(wq, pi);
  71        memset(wqe, 0, wqe_size);
  72
  73        return wqe;
  74}
  75
  76#define MLX5E_TX_FETCH_WQE(sq, pi) \
  77        ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_tx_wqe)))
  78
  79static inline struct mlx5e_tx_wqe *
  80mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
  81{
  82        u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
  83        struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
  84        struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
  85
  86        memset(cseg, 0, sizeof(*cseg));
  87
  88        cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
  89        cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
  90
  91        (*pc)++;
  92
  93        return wqe;
  94}
  95
  96static inline struct mlx5e_tx_wqe *
  97mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
  98{
  99        u16                         pi   = mlx5_wq_cyc_ctr2ix(wq, *pc);
 100        struct mlx5e_tx_wqe        *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
 101        struct mlx5_wqe_ctrl_seg   *cseg = &wqe->ctrl;
 102
 103        memset(cseg, 0, sizeof(*cseg));
 104
 105        cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
 106        cseg->qpn_ds           = cpu_to_be32((sqn << 8) | 0x01);
 107        cseg->fm_ce_se         = MLX5_FENCE_MODE_INITIATOR_SMALL;
 108
 109        (*pc)++;
 110
 111        return wqe;
 112}
 113
 114struct mlx5e_tx_wqe_info {
 115        struct sk_buff *skb;
 116        u32 num_bytes;
 117        u8 num_wqebbs;
 118        u8 num_dma;
 119        u8 num_fifo_pkts;
 120#ifdef CONFIG_MLX5_EN_TLS
 121        struct page *resync_dump_frag_page;
 122#endif
 123};
 124
 125static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
 126{
 127        struct mlx5_wq_cyc *wq = &sq->wq;
 128        u16 pi, contig_wqebbs;
 129
 130        pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 131        contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
 132        if (unlikely(contig_wqebbs < size)) {
 133                struct mlx5e_tx_wqe_info *wi, *edge_wi;
 134
 135                wi = &sq->db.wqe_info[pi];
 136                edge_wi = wi + contig_wqebbs;
 137
 138                /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
 139                for (; wi < edge_wi; wi++) {
 140                        *wi = (struct mlx5e_tx_wqe_info) {
 141                                .num_wqebbs = 1,
 142                        };
 143                        mlx5e_post_nop(wq, sq->sqn, &sq->pc);
 144                }
 145                sq->stats->nop += contig_wqebbs;
 146
 147                pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 148        }
 149
 150        return pi;
 151}
 152
 153static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
 154{
 155        return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
 156}
 157
 158struct mlx5e_shampo_umr {
 159        u16 len;
 160};
 161
 162struct mlx5e_icosq_wqe_info {
 163        u8 wqe_type;
 164        u8 num_wqebbs;
 165
 166        /* Auxiliary data for different wqe types. */
 167        union {
 168                struct {
 169                        struct mlx5e_rq *rq;
 170                } umr;
 171                struct mlx5e_shampo_umr shampo;
 172#ifdef CONFIG_MLX5_EN_TLS
 173                struct {
 174                        struct mlx5e_ktls_offload_context_rx *priv_rx;
 175                } tls_set_params;
 176                struct {
 177                        struct mlx5e_ktls_rx_resync_buf *buf;
 178                } tls_get_params;
 179#endif
 180        };
 181};
 182
 183void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
 184
 185static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
 186{
 187        struct mlx5_wq_cyc *wq = &sq->wq;
 188        u16 pi, contig_wqebbs;
 189
 190        pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 191        contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
 192        if (unlikely(contig_wqebbs < size)) {
 193                struct mlx5e_icosq_wqe_info *wi, *edge_wi;
 194
 195                wi = &sq->db.wqe_info[pi];
 196                edge_wi = wi + contig_wqebbs;
 197
 198                /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */
 199                for (; wi < edge_wi; wi++) {
 200                        *wi = (struct mlx5e_icosq_wqe_info) {
 201                                .wqe_type   = MLX5E_ICOSQ_WQE_NOP,
 202                                .num_wqebbs = 1,
 203                        };
 204                        mlx5e_post_nop(wq, sq->sqn, &sq->pc);
 205                }
 206
 207                pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 208        }
 209
 210        return pi;
 211}
 212
 213static inline void
 214mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
 215                struct mlx5_wqe_ctrl_seg *ctrl)
 216{
 217        ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
 218        /* ensure wqe is visible to device before updating doorbell record */
 219        dma_wmb();
 220
 221        *wq->db = cpu_to_be32(pc);
 222
 223        /* ensure doorbell record is visible to device before ringing the
 224         * doorbell
 225         */
 226        wmb();
 227
 228        mlx5_write64((__be32 *)ctrl, uar_map);
 229}
 230
 231static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
 232{
 233        struct mlx5_core_cq *mcq;
 234
 235        mcq = &cq->mcq;
 236        mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
 237}
 238
 239static inline struct mlx5e_sq_dma *
 240mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
 241{
 242        return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
 243}
 244
 245static inline void
 246mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
 247               enum mlx5e_dma_map_type map_type)
 248{
 249        struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++);
 250
 251        dma->addr = addr;
 252        dma->size = size;
 253        dma->type = map_type;
 254}
 255
 256static inline
 257struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i)
 258{
 259        return &fifo->fifo[i & fifo->mask];
 260}
 261
 262static inline
 263void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
 264{
 265        struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++);
 266
 267        *skb_item = skb;
 268}
 269
 270static inline
 271struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
 272{
 273        return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
 274}
 275
 276static inline void
 277mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
 278{
 279        switch (dma->type) {
 280        case MLX5E_DMA_MAP_SINGLE:
 281                dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
 282                break;
 283        case MLX5E_DMA_MAP_PAGE:
 284                dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
 285                break;
 286        default:
 287                WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
 288        }
 289}
 290
 291void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
 292void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
 293
 294static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
 295{
 296        return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
 297}
 298
 299static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
 300{
 301        if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
 302                mlx5_wq_ll_reset(&rq->mpwqe.wq);
 303                rq->mpwqe.actual_wq_head = 0;
 304        } else {
 305                mlx5_wq_cyc_reset(&rq->wqe.wq);
 306        }
 307}
 308
 309static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
 310                                        struct mlx5_err_cqe *err_cqe)
 311{
 312        struct mlx5_cqwq *wq = &cq->wq;
 313        u32 ci;
 314
 315        ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
 316
 317        netdev_err(cq->netdev,
 318                   "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
 319                   cq->mcq.cqn, ci, qn,
 320                   get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
 321                   err_cqe->syndrome, err_cqe->vendor_err_synd);
 322        mlx5_dump_err_cqe(cq->mdev, err_cqe);
 323}
 324
 325static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
 326{
 327        switch (rq->wq_type) {
 328        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 329                return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
 330        default:
 331                return mlx5_wq_cyc_get_size(&rq->wqe.wq);
 332        }
 333}
 334
 335static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
 336{
 337        switch (rq->wq_type) {
 338        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 339                return rq->mpwqe.wq.cur_sz;
 340        default:
 341                return rq->wqe.wq.cur_sz;
 342        }
 343}
 344
 345static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
 346{
 347        switch (rq->wq_type) {
 348        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 349                return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
 350        default:
 351                return mlx5_wq_cyc_get_head(&rq->wqe.wq);
 352        }
 353}
 354
 355static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
 356{
 357        switch (rq->wq_type) {
 358        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 359                return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
 360        default:
 361                return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
 362        }
 363}
 364
 365/* SW parser related functions */
 366
 367struct mlx5e_swp_spec {
 368        __be16 l3_proto;
 369        u8 l4_proto;
 370        u8 is_tun;
 371        __be16 tun_l3_proto;
 372        u8 tun_l4_proto;
 373};
 374
 375static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
 376{
 377        /* SWP offsets are in 2-bytes words */
 378        eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
 379        eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
 380        eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
 381        eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
 382}
 383
 384static inline void
 385mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
 386                   struct mlx5e_swp_spec *swp_spec)
 387{
 388        /* SWP offsets are in 2-bytes words */
 389        eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
 390        if (swp_spec->l3_proto == htons(ETH_P_IPV6))
 391                eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
 392        if (swp_spec->l4_proto) {
 393                eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
 394                if (swp_spec->l4_proto == IPPROTO_UDP)
 395                        eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
 396        }
 397
 398        if (swp_spec->is_tun) {
 399                eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
 400                if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
 401                        eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
 402        } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
 403                eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
 404                if (swp_spec->l3_proto == htons(ETH_P_IPV6))
 405                        eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
 406        }
 407        switch (swp_spec->tun_l4_proto) {
 408        case IPPROTO_UDP:
 409                eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
 410                fallthrough;
 411        case IPPROTO_TCP:
 412                eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
 413                break;
 414        }
 415}
 416
 417#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
 418
 419static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
 420{
 421        /* A WQE must not cross the page boundary, hence two conditions:
 422         * 1. Its size must not exceed the page size.
 423         * 2. If the WQE size is X, and the space remaining in a page is less
 424         *    than X, this space needs to be padded with NOPs. So, one WQE of
 425         *    size X may require up to X-1 WQEBBs of padding, which makes the
 426         *    stop room of X-1 + X.
 427         * WQE size is also limited by the hardware limit.
 428         */
 429        WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev),
 430                  "wqe_size %u is greater than max SQ WQEBBs %u",
 431                  wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
 432
 433
 434        return MLX5E_STOP_ROOM(wqe_size);
 435}
 436
 437static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
 438{
 439        return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
 440}
 441
 442static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
 443{
 444        u16 room = sq->reserved_room;
 445
 446        WARN_ONCE(wqe_size > sq->max_sq_wqebbs,
 447                  "wqe_size %u is greater than max SQ WQEBBs %u",
 448                  wqe_size, sq->max_sq_wqebbs);
 449
 450        room += MLX5E_STOP_ROOM(wqe_size);
 451
 452        return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
 453}
 454#endif
 455