linux/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/tcp.h>
  34#include <linux/if_vlan.h>
  35#include "en.h"
  36#include "ipoib.h"
  37
  38#define MLX5E_SQ_NOPS_ROOM  MLX5_SEND_WQE_MAX_WQEBBS
  39#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
  40                            MLX5E_SQ_NOPS_ROOM)
  41
  42static inline void mlx5e_tx_dma_unmap(struct device *pdev,
  43                                      struct mlx5e_sq_dma *dma)
  44{
  45        switch (dma->type) {
  46        case MLX5E_DMA_MAP_SINGLE:
  47                dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
  48                break;
  49        case MLX5E_DMA_MAP_PAGE:
  50                dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
  51                break;
  52        default:
  53                WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
  54        }
  55}
  56
  57static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
  58                                  dma_addr_t addr,
  59                                  u32 size,
  60                                  enum mlx5e_dma_map_type map_type)
  61{
  62        u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
  63
  64        sq->db.dma_fifo[i].addr = addr;
  65        sq->db.dma_fifo[i].size = size;
  66        sq->db.dma_fifo[i].type = map_type;
  67        sq->dma_fifo_pc++;
  68}
  69
  70static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
  71{
  72        return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
  73}
  74
  75static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
  76{
  77        int i;
  78
  79        for (i = 0; i < num_dma; i++) {
  80                struct mlx5e_sq_dma *last_pushed_dma =
  81                        mlx5e_dma_get(sq, --sq->dma_fifo_pc);
  82
  83                mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
  84        }
  85}
  86
  87u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
  88                       void *accel_priv, select_queue_fallback_t fallback)
  89{
  90        struct mlx5e_priv *priv = netdev_priv(dev);
  91        int channel_ix = fallback(dev, skb);
  92        u16 num_channels;
  93        int up = 0;
  94
  95        if (!netdev_get_num_tc(dev))
  96                return channel_ix;
  97
  98        if (skb_vlan_tag_present(skb))
  99                up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
 100
 101        /* channel_ix can be larger than num_channels since
 102         * dev->num_real_tx_queues = num_channels * num_tc
 103         */
 104        num_channels = priv->channels.params.num_channels;
 105        if (channel_ix >= num_channels)
 106                channel_ix = reciprocal_scale(channel_ix, num_channels);
 107
 108        return priv->channel_tc2txq[channel_ix][up];
 109}
 110
 111static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
 112{
 113#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 114
 115        return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
 116}
 117
 118static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
 119{
 120        struct flow_keys keys;
 121
 122        if (skb_transport_header_was_set(skb))
 123                return skb_transport_offset(skb);
 124        else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
 125                return keys.control.thoff;
 126        else
 127                return mlx5e_skb_l2_header_offset(skb);
 128}
 129
 130static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
 131                                                 struct sk_buff *skb)
 132{
 133        int hlen;
 134
 135        switch (mode) {
 136        case MLX5_INLINE_MODE_NONE:
 137                return 0;
 138        case MLX5_INLINE_MODE_TCP_UDP:
 139                hlen = eth_get_headlen(skb->data, skb_headlen(skb));
 140                if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
 141                        hlen += VLAN_HLEN;
 142                return hlen;
 143        case MLX5_INLINE_MODE_IP:
 144                /* When transport header is set to zero, it means no transport
 145                 * header. When transport header is set to 0xff's, it means
 146                 * transport header wasn't set.
 147                 */
 148                if (skb_transport_offset(skb))
 149                        return mlx5e_skb_l3_header_offset(skb);
 150                /* fall through */
 151        case MLX5_INLINE_MODE_L2:
 152        default:
 153                return mlx5e_skb_l2_header_offset(skb);
 154        }
 155}
 156
 157static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
 158                                            unsigned int *skb_len,
 159                                            unsigned int len)
 160{
 161        *skb_len -= len;
 162        *skb_data += len;
 163}
 164
 165static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
 166                                     unsigned char **skb_data,
 167                                     unsigned int *skb_len)
 168{
 169        struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
 170        int cpy1_sz = 2 * ETH_ALEN;
 171        int cpy2_sz = ihs - cpy1_sz;
 172
 173        memcpy(vhdr, *skb_data, cpy1_sz);
 174        mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
 175        vhdr->h_vlan_proto = skb->vlan_proto;
 176        vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
 177        memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
 178        mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
 179}
 180
 181static inline void
 182mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
 183{
 184        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
 185                eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
 186                if (skb->encapsulation) {
 187                        eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
 188                                          MLX5_ETH_WQE_L4_INNER_CSUM;
 189                        sq->stats.csum_partial_inner++;
 190                } else {
 191                        eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
 192                }
 193        } else
 194                sq->stats.csum_none++;
 195}
 196
 197static inline u16
 198mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 199                           struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
 200{
 201        u16 ihs;
 202
 203        eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
 204
 205        if (skb->encapsulation) {
 206                ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
 207                sq->stats.tso_inner_packets++;
 208                sq->stats.tso_inner_bytes += skb->len - ihs;
 209        } else {
 210                ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
 211                sq->stats.tso_packets++;
 212                sq->stats.tso_bytes += skb->len - ihs;
 213        }
 214
 215        *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
 216        return ihs;
 217}
 218
 219static inline int
 220mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 221                        unsigned char *skb_data, u16 headlen,
 222                        struct mlx5_wqe_data_seg *dseg)
 223{
 224        dma_addr_t dma_addr = 0;
 225        u8 num_dma          = 0;
 226        int i;
 227
 228        if (headlen) {
 229                dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
 230                                          DMA_TO_DEVICE);
 231                if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
 232                        return -ENOMEM;
 233
 234                dseg->addr       = cpu_to_be64(dma_addr);
 235                dseg->lkey       = sq->mkey_be;
 236                dseg->byte_count = cpu_to_be32(headlen);
 237
 238                mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
 239                num_dma++;
 240                dseg++;
 241        }
 242
 243        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 244                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
 245                int fsz = skb_frag_size(frag);
 246
 247                dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
 248                                     DMA_TO_DEVICE);
 249                if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
 250                        return -ENOMEM;
 251
 252                dseg->addr       = cpu_to_be64(dma_addr);
 253                dseg->lkey       = sq->mkey_be;
 254                dseg->byte_count = cpu_to_be32(fsz);
 255
 256                mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
 257                num_dma++;
 258                dseg++;
 259        }
 260
 261        return num_dma;
 262}
 263
 264static inline void
 265mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 266                     u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
 267                     struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
 268{
 269        struct mlx5_wq_cyc *wq = &sq->wq;
 270        u16 pi;
 271
 272        wi->num_bytes = num_bytes;
 273        wi->num_dma = num_dma;
 274        wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 275        wi->skb = skb;
 276
 277        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
 278        cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
 279
 280        netdev_tx_sent_queue(sq->txq, num_bytes);
 281
 282        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
 283                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 284
 285        sq->pc += wi->num_wqebbs;
 286        if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
 287                netif_tx_stop_queue(sq->txq);
 288                sq->stats.stopped++;
 289        }
 290
 291        if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
 292                mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
 293
 294        /* fill sq edge with nops to avoid wqe wrap around */
 295        while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
 296                sq->db.wqe_info[pi].skb = NULL;
 297                mlx5e_post_nop(wq, sq->sqn, &sq->pc);
 298                sq->stats.nop++;
 299        }
 300}
 301
 302static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb)
 303{
 304        struct mlx5_wq_cyc       *wq   = &sq->wq;
 305
 306        u16 pi = sq->pc & wq->sz_m1;
 307        struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
 308        struct mlx5e_tx_wqe_info *wi   = &sq->db.wqe_info[pi];
 309
 310        struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
 311        struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
 312
 313        unsigned char *skb_data = skb->data;
 314        unsigned int skb_len = skb->len;
 315        u8  opcode = MLX5_OPCODE_SEND;
 316        unsigned int num_bytes;
 317        int num_dma;
 318        u16 headlen;
 319        u16 ds_cnt;
 320        u16 ihs;
 321
 322        memset(wqe, 0, sizeof(*wqe));
 323
 324        mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
 325
 326        if (skb_is_gso(skb)) {
 327                opcode = MLX5_OPCODE_LSO;
 328                ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
 329                sq->stats.packets += skb_shinfo(skb)->gso_segs;
 330        } else {
 331                ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
 332                num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 333                sq->stats.packets++;
 334        }
 335        sq->stats.bytes += num_bytes;
 336        sq->stats.xmit_more += skb->xmit_more;
 337
 338        ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
 339        if (ihs) {
 340                if (skb_vlan_tag_present(skb)) {
 341                        mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
 342                        ihs += VLAN_HLEN;
 343                } else {
 344                        memcpy(eseg->inline_hdr.start, skb_data, ihs);
 345                        mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
 346                }
 347                eseg->inline_hdr.sz = cpu_to_be16(ihs);
 348                ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
 349        } else if (skb_vlan_tag_present(skb)) {
 350                eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
 351                eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
 352        }
 353
 354        headlen = skb_len - skb->data_len;
 355        num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
 356                                          (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
 357        if (unlikely(num_dma < 0))
 358                goto dma_unmap_wqe_err;
 359
 360        mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
 361                             num_bytes, num_dma, wi, cseg);
 362
 363        return NETDEV_TX_OK;
 364
 365dma_unmap_wqe_err:
 366        sq->stats.dropped++;
 367        mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
 368
 369        dev_kfree_skb_any(skb);
 370
 371        return NETDEV_TX_OK;
 372}
 373
 374netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
 375{
 376        struct mlx5e_priv *priv = netdev_priv(dev);
 377        struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
 378
 379        return mlx5e_sq_xmit(sq, skb);
 380}
 381
 382bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 383{
 384        struct mlx5e_txqsq *sq;
 385        u32 dma_fifo_cc;
 386        u32 nbytes;
 387        u16 npkts;
 388        u16 sqcc;
 389        int i;
 390
 391        sq = container_of(cq, struct mlx5e_txqsq, cq);
 392
 393        if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
 394                return false;
 395
 396        npkts = 0;
 397        nbytes = 0;
 398
 399        /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
 400         * otherwise a cq overrun may occur
 401         */
 402        sqcc = sq->cc;
 403
 404        /* avoid dirtying sq cache line every cqe */
 405        dma_fifo_cc = sq->dma_fifo_cc;
 406
 407        for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
 408                struct mlx5_cqe64 *cqe;
 409                u16 wqe_counter;
 410                bool last_wqe;
 411
 412                cqe = mlx5e_get_cqe(cq);
 413                if (!cqe)
 414                        break;
 415
 416                mlx5_cqwq_pop(&cq->wq);
 417
 418                wqe_counter = be16_to_cpu(cqe->wqe_counter);
 419
 420                do {
 421                        struct mlx5e_tx_wqe_info *wi;
 422                        struct sk_buff *skb;
 423                        u16 ci;
 424                        int j;
 425
 426                        last_wqe = (sqcc == wqe_counter);
 427
 428                        ci = sqcc & sq->wq.sz_m1;
 429                        wi = &sq->db.wqe_info[ci];
 430                        skb = wi->skb;
 431
 432                        if (unlikely(!skb)) { /* nop */
 433                                sqcc++;
 434                                continue;
 435                        }
 436
 437                        if (unlikely(skb_shinfo(skb)->tx_flags &
 438                                     SKBTX_HW_TSTAMP)) {
 439                                struct skb_shared_hwtstamps hwts = {};
 440
 441                                mlx5e_fill_hwstamp(sq->tstamp,
 442                                                   get_cqe_ts(cqe), &hwts);
 443                                skb_tstamp_tx(skb, &hwts);
 444                        }
 445
 446                        for (j = 0; j < wi->num_dma; j++) {
 447                                struct mlx5e_sq_dma *dma =
 448                                        mlx5e_dma_get(sq, dma_fifo_cc++);
 449
 450                                mlx5e_tx_dma_unmap(sq->pdev, dma);
 451                        }
 452
 453                        npkts++;
 454                        nbytes += wi->num_bytes;
 455                        sqcc += wi->num_wqebbs;
 456                        napi_consume_skb(skb, napi_budget);
 457                } while (!last_wqe);
 458        }
 459
 460        mlx5_cqwq_update_db_record(&cq->wq);
 461
 462        /* ensure cq space is freed before enabling more cqes */
 463        wmb();
 464
 465        sq->dma_fifo_cc = dma_fifo_cc;
 466        sq->cc = sqcc;
 467
 468        netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 469
 470        if (netif_tx_queue_stopped(sq->txq) &&
 471            mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) {
 472                netif_tx_wake_queue(sq->txq);
 473                sq->stats.wake++;
 474        }
 475
 476        return (i == MLX5E_TX_CQ_POLL_BUDGET);
 477}
 478
 479void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
 480{
 481        struct mlx5e_tx_wqe_info *wi;
 482        struct sk_buff *skb;
 483        u16 ci;
 484        int i;
 485
 486        while (sq->cc != sq->pc) {
 487                ci = sq->cc & sq->wq.sz_m1;
 488                wi = &sq->db.wqe_info[ci];
 489                skb = wi->skb;
 490
 491                if (!skb) { /* nop */
 492                        sq->cc++;
 493                        continue;
 494                }
 495
 496                for (i = 0; i < wi->num_dma; i++) {
 497                        struct mlx5e_sq_dma *dma =
 498                                mlx5e_dma_get(sq, sq->dma_fifo_cc++);
 499
 500                        mlx5e_tx_dma_unmap(sq->pdev, dma);
 501                }
 502
 503                dev_kfree_skb_any(skb);
 504                sq->cc += wi->num_wqebbs;
 505        }
 506}
 507
 508#ifdef CONFIG_MLX5_CORE_IPOIB
 509
 510struct mlx5_wqe_eth_pad {
 511        u8 rsvd0[16];
 512};
 513
 514struct mlx5i_tx_wqe {
 515        struct mlx5_wqe_ctrl_seg     ctrl;
 516        struct mlx5_wqe_datagram_seg datagram;
 517        struct mlx5_wqe_eth_pad      pad;
 518        struct mlx5_wqe_eth_seg      eth;
 519};
 520
 521static inline void
 522mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
 523                           struct mlx5_wqe_datagram_seg *dseg)
 524{
 525        memcpy(&dseg->av, av, sizeof(struct mlx5_av));
 526        dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
 527        dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
 528}
 529
 530netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 531                          struct mlx5_av *av, u32 dqpn, u32 dqkey)
 532{
 533        struct mlx5_wq_cyc       *wq   = &sq->wq;
 534        u16                       pi   = sq->pc & wq->sz_m1;
 535        struct mlx5i_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
 536        struct mlx5e_tx_wqe_info *wi   = &sq->db.wqe_info[pi];
 537
 538        struct mlx5_wqe_ctrl_seg     *cseg = &wqe->ctrl;
 539        struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
 540        struct mlx5_wqe_eth_seg      *eseg = &wqe->eth;
 541
 542        unsigned char *skb_data = skb->data;
 543        unsigned int skb_len = skb->len;
 544        u8  opcode = MLX5_OPCODE_SEND;
 545        unsigned int num_bytes;
 546        int num_dma;
 547        u16 headlen;
 548        u16 ds_cnt;
 549        u16 ihs;
 550
 551        memset(wqe, 0, sizeof(*wqe));
 552
 553        mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
 554
 555        mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
 556
 557        if (skb_is_gso(skb)) {
 558                opcode = MLX5_OPCODE_LSO;
 559                ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
 560        } else {
 561                ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
 562                num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
 563        }
 564
 565        ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
 566        if (ihs) {
 567                memcpy(eseg->inline_hdr.start, skb_data, ihs);
 568                mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
 569                eseg->inline_hdr.sz = cpu_to_be16(ihs);
 570                ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
 571        }
 572
 573        headlen = skb_len - skb->data_len;
 574        num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
 575                                          (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
 576        if (unlikely(num_dma < 0))
 577                goto dma_unmap_wqe_err;
 578
 579        mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
 580                             num_bytes, num_dma, wi, cseg);
 581
 582        return NETDEV_TX_OK;
 583
 584dma_unmap_wqe_err:
 585        sq->stats.dropped++;
 586        mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
 587
 588        dev_kfree_skb_any(skb);
 589
 590        return NETDEV_TX_OK;
 591}
 592
 593#endif
 594