linux/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/tcp.h>
  34#include <linux/if_vlan.h>
  35#include <net/dsfield.h>
  36#include "en.h"
  37#include "ipoib/ipoib.h"
  38#include "en_accel/ipsec_rxtx.h"
  39#include "lib/clock.h"
  40
  41#define MLX5E_SQ_NOPS_ROOM  MLX5_SEND_WQE_MAX_WQEBBS
  42#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
  43                            MLX5E_SQ_NOPS_ROOM)
  44
  45static inline void mlx5e_tx_dma_unmap(struct device *pdev,
  46                                      struct mlx5e_sq_dma *dma)
  47{
  48        switch (dma->type) {
  49        case MLX5E_DMA_MAP_SINGLE:
  50                dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
  51                break;
  52        case MLX5E_DMA_MAP_PAGE:
  53                dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
  54                break;
  55        default:
  56                WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
  57        }
  58}
  59
  60static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
  61                                  dma_addr_t addr,
  62                                  u32 size,
  63                                  enum mlx5e_dma_map_type map_type)
  64{
  65        u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
  66
  67        sq->db.dma_fifo[i].addr = addr;
  68        sq->db.dma_fifo[i].size = size;
  69        sq->db.dma_fifo[i].type = map_type;
  70        sq->dma_fifo_pc++;
  71}
  72
  73static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
  74{
  75        return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
  76}
  77
  78static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
  79{
  80        int i;
  81
  82        for (i = 0; i < num_dma; i++) {
  83                struct mlx5e_sq_dma *last_pushed_dma =
  84                        mlx5e_dma_get(sq, --sq->dma_fifo_pc);
  85
  86                mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
  87        }
  88}
  89
  90#ifdef CONFIG_MLX5_CORE_EN_DCB
  91static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
  92{
  93        int dscp_cp = 0;
  94
  95        if (skb->protocol == htons(ETH_P_IP))
  96                dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
  97        else if (skb->protocol == htons(ETH_P_IPV6))
  98                dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
  99
 100        return priv->dcbx_dp.dscp2prio[dscp_cp];
 101}
 102#endif
 103
 104u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 105                       void *accel_priv, select_queue_fallback_t fallback)
 106{
 107        struct mlx5e_priv *priv = netdev_priv(dev);
 108        int channel_ix = fallback(dev, skb);
 109        u16 num_channels;
 110        int up = 0;
 111
 112        if (!netdev_get_num_tc(dev))
 113                return channel_ix;
 114
 115#ifdef CONFIG_MLX5_CORE_EN_DCB
 116        if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
 117                up = mlx5e_get_dscp_up(priv, skb);
 118        else
 119#endif
 120                if (skb_vlan_tag_present(skb))
 121                        up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
 122
 123        /* channel_ix can be larger than num_channels since
 124         * dev->num_real_tx_queues = num_channels * num_tc
 125         */
 126        num_channels = priv->channels.params.num_channels;
 127        if (channel_ix >= num_channels)
 128                channel_ix = reciprocal_scale(channel_ix, num_channels);
 129
 130        return priv->channel_tc2txq[channel_ix][up];
 131}
 132
 133static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
 134{
 135#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 136
 137        return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
 138}
 139
 140static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
 141{
 142        struct flow_keys keys;
 143
 144        if (skb_transport_header_was_set(skb))
 145                return skb_transport_offset(skb);
 146        else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
 147                return keys.control.thoff;
 148        else
 149                return mlx5e_skb_l2_header_offset(skb);
 150}
 151
 152static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
 153                                        struct sk_buff *skb)
 154{
 155        u16 hlen;
 156
 157        switch (mode) {
 158        case MLX5_INLINE_MODE_NONE:
 159                return 0;
 160        case MLX5_INLINE_MODE_TCP_UDP:
 161                hlen = eth_get_headlen(skb->data, skb_headlen(skb));
 162                if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
 163                        hlen += VLAN_HLEN;
 164                break;
 165        case MLX5_INLINE_MODE_IP:
 166                /* When transport header is set to zero, it means no transport
 167                 * header. When transport header is set to 0xff's, it means
 168                 * transport header wasn't set.
 169                 */
 170                if (skb_transport_offset(skb)) {
 171                        hlen = mlx5e_skb_l3_header_offset(skb);
 172                        break;
 173                }
 174                /* fall through */
 175        case MLX5_INLINE_MODE_L2:
 176        default:
 177                hlen = mlx5e_skb_l2_header_offset(skb);
 178        }
 179        return min_t(u16, hlen, skb->len);
 180}
 181
 182static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
 183                                            unsigned int *skb_len,
 184                                            unsigned int len)
 185{
 186        *skb_len -= len;
 187        *skb_data += len;
 188}
 189
 190static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
 191                                     unsigned char **skb_data,
 192                                     unsigned int *skb_len)
 193{
 194        struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
 195        int cpy1_sz = 2 * ETH_ALEN;
 196        int cpy2_sz = ihs - cpy1_sz;
 197
 198        memcpy(vhdr, *skb_data, cpy1_sz);
 199        mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
 200        vhdr->h_vlan_proto = skb->vlan_proto;
 201        vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
 202        memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
 203        mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
 204}
 205
 206static inline void
 207mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
 208{
 209        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
 210                eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
 211                if (skb->encapsulation) {
 212                        eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
 213                                          MLX5_ETH_WQE_L4_INNER_CSUM;
 214                        sq->stats.csum_partial_inner++;
 215                } else {
 216                        eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
 217                        sq->stats.csum_partial++;
 218                }
 219        } else
 220                sq->stats.csum_none++;
 221}
 222
 223static inline u16
 224mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 225                           struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
 226{
 227        u16 ihs;
 228
 229        eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
 230
 231        if (skb->encapsulation) {
 232                ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
 233                sq->stats.tso_inner_packets++;
 234                sq->stats.tso_inner_bytes += skb->len - ihs;
 235        } else {
 236                ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
 237                sq->stats.tso_packets++;
 238                sq->stats.tso_bytes += skb->len - ihs;
 239        }
 240
 241        *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
 242        return ihs;
 243}
 244
 245static inline int
 246mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 247                        unsigned char *skb_data, u16 headlen,
 248                        struct mlx5_wqe_data_seg *dseg)
 249{
 250        dma_addr_t dma_addr = 0;
 251        u8 num_dma          = 0;
 252        int i;
 253
 254        if (headlen) {
 255                dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
 256                                          DMA_TO_DEVICE);
 257                if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
 258                        return -ENOMEM;
 259
 260                dseg->addr       = cpu_to_be64(dma_addr);
 261                dseg->lkey       = sq->mkey_be;
 262                dseg->byte_count = cpu_to_be32(headlen);
 263
 264                mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
 265                num_dma++;
 266                dseg++;
 267        }
 268
 269        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 270                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
 271                int fsz = skb_frag_size(frag);
 272
 273                dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
 274                                            DMA_TO_DEVICE);
 275                if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
 276                        return -ENOMEM;
 277
 278                dseg->addr       = cpu_to_be64(dma_addr);
 279                dseg->lkey       = sq->mkey_be;
 280                dseg->byte_count = cpu_to_be32(fsz);
 281
 282                mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
 283                num_dma++;
 284                dseg++;
 285        }
 286
 287        return num_dma;
 288}
 289
 290static inline void
 291mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 292                     u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
 293                     struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
 294{
 295        struct mlx5_wq_cyc *wq = &sq->wq;
 296        u16 pi;
 297
 298        wi->num_bytes = num_bytes;
 299        wi->num_dma = num_dma;
 300        wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 301        wi->skb = skb;
 302
 303        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
 304        cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
 305
 306        netdev_tx_sent_queue(sq->txq, num_bytes);
 307
 308        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
 309                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 310
 311        sq->pc += wi->num_wqebbs;
 312        if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
 313                netif_tx_stop_queue(sq->txq);
 314                sq->stats.stopped++;
 315        }
 316
 317        if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
 318                mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
 319
 320        /* fill sq edge with nops to avoid wqe wrap around */
 321        while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
 322                sq->db.wqe_info[pi].skb = NULL;
 323                mlx5e_post_nop(wq, sq->sqn, &sq->pc);
 324                sq->stats.nop++;
 325        }
 326}
 327
 328static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 329                                 struct mlx5e_tx_wqe *wqe, u16 pi)
 330{
 331        struct mlx5e_tx_wqe_info *wi   = &sq->db.wqe_info[pi];
 332
 333        struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
 334        struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
 335
 336        unsigned char *skb_data = skb->data;
 337        unsigned int skb_len = skb->len;
 338        u8  opcode = MLX5_OPCODE_SEND;
 339        unsigned int num_bytes;
 340        int num_dma;
 341        u16 headlen;
 342        u16 ds_cnt;
 343        u16 ihs;
 344
 345        mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
 346
 347        if (skb_is_gso(skb)) {
 348                opcode = MLX5_OPCODE_LSO;
 349                ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
 350                sq->stats.packets += skb_shinfo(skb)->gso_segs;
 351        } else {
 352                ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
 353                num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 354                sq->stats.packets++;
 355        }
 356        sq->stats.bytes += num_bytes;
 357        sq->stats.xmit_more += skb->xmit_more;
 358
 359        ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
 360        if (ihs) {
 361                if (skb_vlan_tag_present(skb)) {
 362                        mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
 363                        ihs += VLAN_HLEN;
 364                        sq->stats.added_vlan_packets++;
 365                } else {
 366                        memcpy(eseg->inline_hdr.start, skb_data, ihs);
 367                        mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
 368                }
 369                eseg->inline_hdr.sz = cpu_to_be16(ihs);
 370                ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
 371        } else if (skb_vlan_tag_present(skb)) {
 372                eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
 373                if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
 374                        eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
 375                eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
 376                sq->stats.added_vlan_packets++;
 377        }
 378
 379        headlen = skb_len - skb->data_len;
 380        num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
 381                                          (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
 382        if (unlikely(num_dma < 0))
 383                goto dma_unmap_wqe_err;
 384
 385        mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
 386                             num_bytes, num_dma, wi, cseg);
 387
 388        return NETDEV_TX_OK;
 389
 390dma_unmap_wqe_err:
 391        sq->stats.dropped++;
 392        mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
 393
 394        dev_kfree_skb_any(skb);
 395
 396        return NETDEV_TX_OK;
 397}
 398
 399netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
 400{
 401        struct mlx5e_priv *priv = netdev_priv(dev);
 402        struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
 403        struct mlx5_wq_cyc *wq = &sq->wq;
 404        u16 pi = sq->pc & wq->sz_m1;
 405        struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
 406
 407        memset(wqe, 0, sizeof(*wqe));
 408
 409#ifdef CONFIG_MLX5_EN_IPSEC
 410        if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
 411                skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb);
 412                if (unlikely(!skb))
 413                        return NETDEV_TX_OK;
 414        }
 415#endif
 416
 417        return mlx5e_sq_xmit(sq, skb, wqe, pi);
 418}
 419
 420bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 421{
 422        struct mlx5e_txqsq *sq;
 423        struct mlx5_cqe64 *cqe;
 424        u32 dma_fifo_cc;
 425        u32 nbytes;
 426        u16 npkts;
 427        u16 sqcc;
 428        int i;
 429
 430        sq = container_of(cq, struct mlx5e_txqsq, cq);
 431
 432        if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
 433                return false;
 434
 435        cqe = mlx5_cqwq_get_cqe(&cq->wq);
 436        if (!cqe)
 437                return false;
 438
 439        npkts = 0;
 440        nbytes = 0;
 441
 442        /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
 443         * otherwise a cq overrun may occur
 444         */
 445        sqcc = sq->cc;
 446
 447        /* avoid dirtying sq cache line every cqe */
 448        dma_fifo_cc = sq->dma_fifo_cc;
 449
 450        i = 0;
 451        do {
 452                u16 wqe_counter;
 453                bool last_wqe;
 454
 455                mlx5_cqwq_pop(&cq->wq);
 456
 457                wqe_counter = be16_to_cpu(cqe->wqe_counter);
 458
 459                do {
 460                        struct mlx5e_tx_wqe_info *wi;
 461                        struct sk_buff *skb;
 462                        u16 ci;
 463                        int j;
 464
 465                        last_wqe = (sqcc == wqe_counter);
 466
 467                        ci = sqcc & sq->wq.sz_m1;
 468                        wi = &sq->db.wqe_info[ci];
 469                        skb = wi->skb;
 470
 471                        if (unlikely(!skb)) { /* nop */
 472                                sqcc++;
 473                                continue;
 474                        }
 475
 476                        if (unlikely(skb_shinfo(skb)->tx_flags &
 477                                     SKBTX_HW_TSTAMP)) {
 478                                struct skb_shared_hwtstamps hwts = {};
 479
 480                                hwts.hwtstamp =
 481                                        mlx5_timecounter_cyc2time(sq->clock,
 482                                                                  get_cqe_ts(cqe));
 483                                skb_tstamp_tx(skb, &hwts);
 484                        }
 485
 486                        for (j = 0; j < wi->num_dma; j++) {
 487                                struct mlx5e_sq_dma *dma =
 488                                        mlx5e_dma_get(sq, dma_fifo_cc++);
 489
 490                                mlx5e_tx_dma_unmap(sq->pdev, dma);
 491                        }
 492
 493                        npkts++;
 494                        nbytes += wi->num_bytes;
 495                        sqcc += wi->num_wqebbs;
 496                        napi_consume_skb(skb, napi_budget);
 497                } while (!last_wqe);
 498
 499        } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 500
 501        mlx5_cqwq_update_db_record(&cq->wq);
 502
 503        /* ensure cq space is freed before enabling more cqes */
 504        wmb();
 505
 506        sq->dma_fifo_cc = dma_fifo_cc;
 507        sq->cc = sqcc;
 508
 509        netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 510
 511        if (netif_tx_queue_stopped(sq->txq) &&
 512            mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
 513                netif_tx_wake_queue(sq->txq);
 514                sq->stats.wake++;
 515        }
 516
 517        return (i == MLX5E_TX_CQ_POLL_BUDGET);
 518}
 519
 520void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
 521{
 522        struct mlx5e_tx_wqe_info *wi;
 523        struct sk_buff *skb;
 524        u16 ci;
 525        int i;
 526
 527        while (sq->cc != sq->pc) {
 528                ci = sq->cc & sq->wq.sz_m1;
 529                wi = &sq->db.wqe_info[ci];
 530                skb = wi->skb;
 531
 532                if (!skb) { /* nop */
 533                        sq->cc++;
 534                        continue;
 535                }
 536
 537                for (i = 0; i < wi->num_dma; i++) {
 538                        struct mlx5e_sq_dma *dma =
 539                                mlx5e_dma_get(sq, sq->dma_fifo_cc++);
 540
 541                        mlx5e_tx_dma_unmap(sq->pdev, dma);
 542                }
 543
 544                dev_kfree_skb_any(skb);
 545                sq->cc += wi->num_wqebbs;
 546        }
 547}
 548
 549#ifdef CONFIG_MLX5_CORE_IPOIB
 550
 551struct mlx5_wqe_eth_pad {
 552        u8 rsvd0[16];
 553};
 554
 555struct mlx5i_tx_wqe {
 556        struct mlx5_wqe_ctrl_seg     ctrl;
 557        struct mlx5_wqe_datagram_seg datagram;
 558        struct mlx5_wqe_eth_pad      pad;
 559        struct mlx5_wqe_eth_seg      eth;
 560};
 561
 562static inline void
 563mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
 564                           struct mlx5_wqe_datagram_seg *dseg)
 565{
 566        memcpy(&dseg->av, av, sizeof(struct mlx5_av));
 567        dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
 568        dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
 569}
 570
 571netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 572                          struct mlx5_av *av, u32 dqpn, u32 dqkey)
 573{
 574        struct mlx5_wq_cyc       *wq   = &sq->wq;
 575        u16                       pi   = sq->pc & wq->sz_m1;
 576        struct mlx5i_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
 577        struct mlx5e_tx_wqe_info *wi   = &sq->db.wqe_info[pi];
 578
 579        struct mlx5_wqe_ctrl_seg     *cseg = &wqe->ctrl;
 580        struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
 581        struct mlx5_wqe_eth_seg      *eseg = &wqe->eth;
 582
 583        unsigned char *skb_data = skb->data;
 584        unsigned int skb_len = skb->len;
 585        u8  opcode = MLX5_OPCODE_SEND;
 586        unsigned int num_bytes;
 587        int num_dma;
 588        u16 headlen;
 589        u16 ds_cnt;
 590        u16 ihs;
 591
 592        memset(wqe, 0, sizeof(*wqe));
 593
 594        mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
 595
 596        mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
 597
 598        if (skb_is_gso(skb)) {
 599                opcode = MLX5_OPCODE_LSO;
 600                ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
 601                sq->stats.packets += skb_shinfo(skb)->gso_segs;
 602        } else {
 603                ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
 604                num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 605                sq->stats.packets++;
 606        }
 607
 608        sq->stats.bytes += num_bytes;
 609        sq->stats.xmit_more += skb->xmit_more;
 610
 611        ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
 612        if (ihs) {
 613                memcpy(eseg->inline_hdr.start, skb_data, ihs);
 614                mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
 615                eseg->inline_hdr.sz = cpu_to_be16(ihs);
 616                ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
 617        }
 618
 619        headlen = skb_len - skb->data_len;
 620        num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
 621                                          (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
 622        if (unlikely(num_dma < 0))
 623                goto dma_unmap_wqe_err;
 624
 625        mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
 626                             num_bytes, num_dma, wi, cseg);
 627
 628        return NETDEV_TX_OK;
 629
 630dma_unmap_wqe_err:
 631        sq->stats.dropped++;
 632        mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
 633
 634        dev_kfree_skb_any(skb);
 635
 636        return NETDEV_TX_OK;
 637}
 638
 639#endif
 640