linux/drivers/infiniband/hw/hfi1/ipoib_tx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
   2/*
   3 * Copyright(c) 2020 Intel Corporation.
   4 *
   5 */
   6
   7/*
   8 * This file contains HFI1 support for IPOIB SDMA functionality
   9 */
  10
  11#include <linux/log2.h>
  12#include <linux/circ_buf.h>
  13
  14#include "sdma.h"
  15#include "verbs.h"
  16#include "trace_ibhdrs.h"
  17#include "ipoib.h"
  18#include "trace_tx.h"
  19
  20/* Add a convenience helper */
  21#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
  22#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
  23#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
  24
  25/**
  26 * struct ipoib_txreq - IPOIB transmit descriptor
  27 * @txreq: sdma transmit request
  28 * @sdma_hdr: 9b ib headers
  29 * @sdma_status: status returned by sdma engine
  30 * @priv: ipoib netdev private data
  31 * @txq: txq on which skb was output
  32 * @skb: skb to send
  33 */
  34struct ipoib_txreq {
  35        struct sdma_txreq           txreq;
  36        struct hfi1_sdma_header     sdma_hdr;
  37        int                         sdma_status;
  38        struct hfi1_ipoib_dev_priv *priv;
  39        struct hfi1_ipoib_txq      *txq;
  40        struct sk_buff             *skb;
  41};
  42
  43struct ipoib_txparms {
  44        struct hfi1_devdata        *dd;
  45        struct rdma_ah_attr        *ah_attr;
  46        struct hfi1_ibport         *ibp;
  47        struct hfi1_ipoib_txq      *txq;
  48        union hfi1_ipoib_flow       flow;
  49        u32                         dqpn;
  50        u8                          hdr_dwords;
  51        u8                          entropy;
  52};
  53
  54static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
  55{
  56        return sent - completed;
  57}
  58
  59static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
  60{
  61        return hfi1_ipoib_txreqs(txq->sent_txreqs,
  62                                 atomic64_read(&txq->complete_txreqs));
  63}
  64
  65static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
  66{
  67        trace_hfi1_txq_stop(txq);
  68        if (atomic_inc_return(&txq->stops) == 1)
  69                netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
  70}
  71
  72static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
  73{
  74        trace_hfi1_txq_wake(txq);
  75        if (atomic_dec_and_test(&txq->stops))
  76                netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
  77}
  78
  79static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
  80{
  81        return min_t(uint, txq->priv->netdev->tx_queue_len,
  82                     txq->tx_ring.max_items - 1);
  83}
  84
  85static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
  86{
  87        return min_t(uint, txq->priv->netdev->tx_queue_len,
  88                     txq->tx_ring.max_items) >> 1;
  89}
  90
  91static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
  92{
  93        ++txq->sent_txreqs;
  94        if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
  95            !atomic_xchg(&txq->ring_full, 1)) {
  96                trace_hfi1_txq_full(txq);
  97                hfi1_ipoib_stop_txq(txq);
  98        }
  99}
 100
 101static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
 102{
 103        struct net_device *dev = txq->priv->netdev;
 104
 105        /* If shutting down just return as queue state is irrelevant */
 106        if (unlikely(dev->reg_state != NETREG_REGISTERED))
 107                return;
 108
 109        /*
 110         * When the queue has been drained to less than half full it will be
 111         * restarted.
 112         * The size of the txreq ring is fixed at initialization.
 113         * The tx queue len can be adjusted upward while the interface is
 114         * running.
 115         * The tx queue len can be large enough to overflow the txreq_ring.
 116         * Use the minimum of the current tx_queue_len or the rings max txreqs
 117         * to protect against ring overflow.
 118         */
 119        if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
 120            atomic_xchg(&txq->ring_full, 0)) {
 121                trace_hfi1_txq_xmit_unstopped(txq);
 122                hfi1_ipoib_wake_txq(txq);
 123        }
 124}
 125
 126static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
 127{
 128        struct hfi1_ipoib_dev_priv *priv = tx->priv;
 129
 130        if (likely(!tx->sdma_status)) {
 131                dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len);
 132        } else {
 133                ++priv->netdev->stats.tx_errors;
 134                dd_dev_warn(priv->dd,
 135                            "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
 136                            __func__, tx->sdma_status,
 137                            le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
 138                            tx->txq->sde->this_idx);
 139        }
 140
 141        napi_consume_skb(tx->skb, budget);
 142        sdma_txclean(priv->dd, &tx->txreq);
 143        kmem_cache_free(priv->txreq_cache, tx);
 144}
 145
 146static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget)
 147{
 148        struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
 149        unsigned long head;
 150        unsigned long tail;
 151        unsigned int max_tx;
 152        int work_done;
 153        int tx_count;
 154
 155        spin_lock_bh(&tx_ring->consumer_lock);
 156
 157        /* Read index before reading contents at that index. */
 158        head = smp_load_acquire(&tx_ring->head);
 159        tail = tx_ring->tail;
 160        max_tx = tx_ring->max_items;
 161
 162        work_done = min_t(int, CIRC_CNT(head, tail, max_tx), budget);
 163
 164        for (tx_count = work_done; tx_count; tx_count--) {
 165                hfi1_ipoib_free_tx(tx_ring->items[tail], budget);
 166                tail = CIRC_NEXT(tail, max_tx);
 167        }
 168
 169        atomic64_add(work_done, &txq->complete_txreqs);
 170
 171        /* Finished freeing tx items so store the tail value. */
 172        smp_store_release(&tx_ring->tail, tail);
 173
 174        spin_unlock_bh(&tx_ring->consumer_lock);
 175
 176        hfi1_ipoib_check_queue_stopped(txq);
 177
 178        return work_done;
 179}
 180
 181static int hfi1_ipoib_process_tx_ring(struct napi_struct *napi, int budget)
 182{
 183        struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(napi->dev);
 184        struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis];
 185
 186        int work_done = hfi1_ipoib_drain_tx_ring(txq, budget);
 187
 188        if (work_done < budget)
 189                napi_complete_done(napi, work_done);
 190
 191        return work_done;
 192}
 193
 194static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
 195{
 196        struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring;
 197        unsigned long head;
 198        unsigned long tail;
 199        size_t max_tx;
 200
 201        spin_lock(&tx_ring->producer_lock);
 202
 203        head = tx_ring->head;
 204        tail = READ_ONCE(tx_ring->tail);
 205        max_tx = tx_ring->max_items;
 206
 207        if (likely(CIRC_SPACE(head, tail, max_tx))) {
 208                tx_ring->items[head] = tx;
 209
 210                /* Finish storing txreq before incrementing head. */
 211                smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
 212                napi_schedule_irqoff(tx->txq->napi);
 213        } else {
 214                struct hfi1_ipoib_txq *txq = tx->txq;
 215                struct hfi1_ipoib_dev_priv *priv = tx->priv;
 216
 217                /* Ring was full */
 218                hfi1_ipoib_free_tx(tx, 0);
 219                atomic64_inc(&txq->complete_txreqs);
 220                dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx);
 221        }
 222
 223        spin_unlock(&tx_ring->producer_lock);
 224}
 225
 226static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status)
 227{
 228        struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq);
 229
 230        tx->sdma_status = status;
 231
 232        hfi1_ipoib_add_tx(tx);
 233}
 234
 235static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
 236                                        struct ipoib_txparms *txp)
 237{
 238        struct hfi1_devdata *dd = txp->dd;
 239        struct sdma_txreq *txreq = &tx->txreq;
 240        struct sk_buff *skb = tx->skb;
 241        int ret = 0;
 242        int i;
 243
 244        if (skb_headlen(skb)) {
 245                ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb));
 246                if (unlikely(ret))
 247                        return ret;
 248        }
 249
 250        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 251                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 252
 253                ret = sdma_txadd_page(dd,
 254                                      txreq,
 255                                      skb_frag_page(frag),
 256                                      frag->bv_offset,
 257                                      skb_frag_size(frag));
 258                if (unlikely(ret))
 259                        break;
 260        }
 261
 262        return ret;
 263}
 264
 265static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
 266                                    struct ipoib_txparms *txp)
 267{
 268        struct hfi1_devdata *dd = txp->dd;
 269        struct sdma_txreq *txreq = &tx->txreq;
 270        struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
 271        u16 pkt_bytes =
 272                sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
 273        int ret;
 274
 275        ret = sdma_txinit(txreq, 0, pkt_bytes, hfi1_ipoib_sdma_complete);
 276        if (unlikely(ret))
 277                return ret;
 278
 279        /* add pbc + headers */
 280        ret = sdma_txadd_kvaddr(dd,
 281                                txreq,
 282                                sdma_hdr,
 283                                sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2));
 284        if (unlikely(ret))
 285                return ret;
 286
 287        /* add the ulp payload */
 288        return hfi1_ipoib_build_ulp_payload(tx, txp);
 289}
 290
 291static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
 292                                           struct ipoib_txparms *txp)
 293{
 294        struct hfi1_ipoib_dev_priv *priv = tx->priv;
 295        struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
 296        struct sk_buff *skb = tx->skb;
 297        struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
 298        struct rdma_ah_attr *ah_attr = txp->ah_attr;
 299        struct ib_other_headers *ohdr;
 300        struct ib_grh *grh;
 301        u16 dwords;
 302        u16 slid;
 303        u16 dlid;
 304        u16 lrh0;
 305        u32 bth0;
 306        u32 sqpn = (u32)(priv->netdev->dev_addr[1] << 16 |
 307                         priv->netdev->dev_addr[2] << 8 |
 308                         priv->netdev->dev_addr[3]);
 309        u16 payload_dwords;
 310        u8 pad_cnt;
 311
 312        pad_cnt = -skb->len & 3;
 313
 314        /* Includes ICRC */
 315        payload_dwords = ((skb->len + pad_cnt) >> 2) + SIZE_OF_CRC;
 316
 317        /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
 318        txp->hdr_dwords = 7;
 319
 320        if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
 321                grh = &sdma_hdr->hdr.ibh.u.l.grh;
 322                txp->hdr_dwords +=
 323                        hfi1_make_grh(txp->ibp,
 324                                      grh,
 325                                      rdma_ah_read_grh(ah_attr),
 326                                      txp->hdr_dwords - LRH_9B_DWORDS,
 327                                      payload_dwords);
 328                lrh0 = HFI1_LRH_GRH;
 329                ohdr = &sdma_hdr->hdr.ibh.u.l.oth;
 330        } else {
 331                lrh0 = HFI1_LRH_BTH;
 332                ohdr = &sdma_hdr->hdr.ibh.u.oth;
 333        }
 334
 335        lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
 336        lrh0 |= (txp->flow.sc5 & 0xf) << 12;
 337
 338        dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
 339        if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
 340                slid = be16_to_cpu(IB_LID_PERMISSIVE);
 341        } else {
 342                u16 lid = (u16)ppd->lid;
 343
 344                if (lid) {
 345                        lid |= rdma_ah_get_path_bits(ah_attr) &
 346                                ((1 << ppd->lmc) - 1);
 347                        slid = lid;
 348                } else {
 349                        slid = be16_to_cpu(IB_LID_PERMISSIVE);
 350                }
 351        }
 352
 353        /* Includes ICRC */
 354        dwords = txp->hdr_dwords + payload_dwords;
 355
 356        /* Build the lrh */
 357        sdma_hdr->hdr.hdr_type = HFI1_PKT_TYPE_9B;
 358        hfi1_make_ib_hdr(&sdma_hdr->hdr.ibh, lrh0, dwords, dlid, slid);
 359
 360        /* Build the bth */
 361        bth0 = (IB_OPCODE_UD_SEND_ONLY << 24) | (pad_cnt << 20) | priv->pkey;
 362
 363        ohdr->bth[0] = cpu_to_be32(bth0);
 364        ohdr->bth[1] = cpu_to_be32(txp->dqpn);
 365        ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs));
 366
 367        /* Build the deth */
 368        ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey);
 369        ohdr->u.ud.deth[1] = cpu_to_be32((txp->entropy <<
 370                                          HFI1_IPOIB_ENTROPY_SHIFT) | sqpn);
 371
 372        /* Construct the pbc. */
 373        sdma_hdr->pbc =
 374                cpu_to_le64(create_pbc(ppd,
 375                                       ib_is_sc5(txp->flow.sc5) <<
 376                                                              PBC_DC_INFO_SHIFT,
 377                                       0,
 378                                       sc_to_vlt(priv->dd, txp->flow.sc5),
 379                                       dwords - SIZE_OF_CRC +
 380                                                (sizeof(sdma_hdr->pbc) >> 2)));
 381}
 382
 383static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
 384                                                      struct sk_buff *skb,
 385                                                      struct ipoib_txparms *txp)
 386{
 387        struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
 388        struct ipoib_txreq *tx;
 389        int ret;
 390
 391        tx = kmem_cache_alloc_node(priv->txreq_cache,
 392                                   GFP_ATOMIC,
 393                                   priv->dd->node);
 394        if (unlikely(!tx))
 395                return ERR_PTR(-ENOMEM);
 396
 397        /* so that we can test if the sdma descriptors are there */
 398        tx->txreq.num_desc = 0;
 399        tx->priv = priv;
 400        tx->txq = txp->txq;
 401        tx->skb = skb;
 402        INIT_LIST_HEAD(&tx->txreq.list);
 403
 404        hfi1_ipoib_build_ib_tx_headers(tx, txp);
 405
 406        ret = hfi1_ipoib_build_tx_desc(tx, txp);
 407        if (likely(!ret)) {
 408                if (txp->txq->flow.as_int != txp->flow.as_int) {
 409                        txp->txq->flow.tx_queue = txp->flow.tx_queue;
 410                        txp->txq->flow.sc5 = txp->flow.sc5;
 411                        txp->txq->sde =
 412                                sdma_select_engine_sc(priv->dd,
 413                                                      txp->flow.tx_queue,
 414                                                      txp->flow.sc5);
 415                        trace_hfi1_flow_switch(txp->txq);
 416                }
 417
 418                return tx;
 419        }
 420
 421        sdma_txclean(priv->dd, &tx->txreq);
 422        kmem_cache_free(priv->txreq_cache, tx);
 423
 424        return ERR_PTR(ret);
 425}
 426
 427static int hfi1_ipoib_submit_tx_list(struct net_device *dev,
 428                                     struct hfi1_ipoib_txq *txq)
 429{
 430        int ret;
 431        u16 count_out;
 432
 433        ret = sdma_send_txlist(txq->sde,
 434                               iowait_get_ib_work(&txq->wait),
 435                               &txq->tx_list,
 436                               &count_out);
 437        if (likely(!ret) || ret == -EBUSY || ret == -ECOMM)
 438                return ret;
 439
 440        dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret);
 441
 442        return ret;
 443}
 444
 445static int hfi1_ipoib_flush_tx_list(struct net_device *dev,
 446                                    struct hfi1_ipoib_txq *txq)
 447{
 448        int ret = 0;
 449
 450        if (!list_empty(&txq->tx_list)) {
 451                /* Flush the current list */
 452                ret = hfi1_ipoib_submit_tx_list(dev, txq);
 453
 454                if (unlikely(ret))
 455                        if (ret != -EBUSY)
 456                                ++dev->stats.tx_carrier_errors;
 457        }
 458
 459        return ret;
 460}
 461
 462static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq,
 463                                struct ipoib_txreq *tx)
 464{
 465        int ret;
 466
 467        ret = sdma_send_txreq(txq->sde,
 468                              iowait_get_ib_work(&txq->wait),
 469                              &tx->txreq,
 470                              txq->pkts_sent);
 471        if (likely(!ret)) {
 472                txq->pkts_sent = true;
 473                iowait_starve_clear(txq->pkts_sent, &txq->wait);
 474        }
 475
 476        return ret;
 477}
 478
 479static int hfi1_ipoib_send_dma_single(struct net_device *dev,
 480                                      struct sk_buff *skb,
 481                                      struct ipoib_txparms *txp)
 482{
 483        struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
 484        struct hfi1_ipoib_txq *txq = txp->txq;
 485        struct ipoib_txreq *tx;
 486        int ret;
 487
 488        tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
 489        if (IS_ERR(tx)) {
 490                int ret = PTR_ERR(tx);
 491
 492                dev_kfree_skb_any(skb);
 493
 494                if (ret == -ENOMEM)
 495                        ++dev->stats.tx_errors;
 496                else
 497                        ++dev->stats.tx_carrier_errors;
 498
 499                return NETDEV_TX_OK;
 500        }
 501
 502        ret = hfi1_ipoib_submit_tx(txq, tx);
 503        if (likely(!ret)) {
 504tx_ok:
 505                trace_sdma_output_ibhdr(tx->priv->dd,
 506                                        &tx->sdma_hdr.hdr,
 507                                        ib_is_sc5(txp->flow.sc5));
 508                hfi1_ipoib_check_queue_depth(txq);
 509                return NETDEV_TX_OK;
 510        }
 511
 512        txq->pkts_sent = false;
 513
 514        if (ret == -EBUSY || ret == -ECOMM)
 515                goto tx_ok;
 516
 517        sdma_txclean(priv->dd, &tx->txreq);
 518        dev_kfree_skb_any(skb);
 519        kmem_cache_free(priv->txreq_cache, tx);
 520        ++dev->stats.tx_carrier_errors;
 521
 522        return NETDEV_TX_OK;
 523}
 524
 525static int hfi1_ipoib_send_dma_list(struct net_device *dev,
 526                                    struct sk_buff *skb,
 527                                    struct ipoib_txparms *txp)
 528{
 529        struct hfi1_ipoib_txq *txq = txp->txq;
 530        struct ipoib_txreq *tx;
 531
 532        /* Has the flow change ? */
 533        if (txq->flow.as_int != txp->flow.as_int) {
 534                int ret;
 535
 536                trace_hfi1_flow_flush(txq);
 537                ret = hfi1_ipoib_flush_tx_list(dev, txq);
 538                if (unlikely(ret)) {
 539                        if (ret == -EBUSY)
 540                                ++dev->stats.tx_dropped;
 541                        dev_kfree_skb_any(skb);
 542                        return NETDEV_TX_OK;
 543                }
 544        }
 545        tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
 546        if (IS_ERR(tx)) {
 547                int ret = PTR_ERR(tx);
 548
 549                dev_kfree_skb_any(skb);
 550
 551                if (ret == -ENOMEM)
 552                        ++dev->stats.tx_errors;
 553                else
 554                        ++dev->stats.tx_carrier_errors;
 555
 556                return NETDEV_TX_OK;
 557        }
 558
 559        list_add_tail(&tx->txreq.list, &txq->tx_list);
 560
 561        hfi1_ipoib_check_queue_depth(txq);
 562
 563        trace_sdma_output_ibhdr(tx->priv->dd,
 564                                &tx->sdma_hdr.hdr,
 565                                ib_is_sc5(txp->flow.sc5));
 566
 567        if (!netdev_xmit_more())
 568                (void)hfi1_ipoib_flush_tx_list(dev, txq);
 569
 570        return NETDEV_TX_OK;
 571}
 572
 573static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
 574{
 575        if (skb_transport_header_was_set(skb)) {
 576                u8 *hdr = (u8 *)skb_transport_header(skb);
 577
 578                return (hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3]);
 579        }
 580
 581        return (u8)skb_get_queue_mapping(skb);
 582}
 583
 584int hfi1_ipoib_send(struct net_device *dev,
 585                    struct sk_buff *skb,
 586                    struct ib_ah *address,
 587                    u32 dqpn)
 588{
 589        struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
 590        struct ipoib_txparms txp;
 591        struct rdma_netdev *rn = netdev_priv(dev);
 592
 593        if (unlikely(skb->len > rn->mtu + HFI1_IPOIB_ENCAP_LEN)) {
 594                dd_dev_warn(priv->dd, "packet len %d (> %d) too long to send, dropping\n",
 595                            skb->len,
 596                            rn->mtu + HFI1_IPOIB_ENCAP_LEN);
 597                ++dev->stats.tx_dropped;
 598                ++dev->stats.tx_errors;
 599                dev_kfree_skb_any(skb);
 600                return NETDEV_TX_OK;
 601        }
 602
 603        txp.dd = priv->dd;
 604        txp.ah_attr = &ibah_to_rvtah(address)->attr;
 605        txp.ibp = to_iport(priv->device, priv->port_num);
 606        txp.txq = &priv->txqs[skb_get_queue_mapping(skb)];
 607        txp.dqpn = dqpn;
 608        txp.flow.sc5 = txp.ibp->sl_to_sc[rdma_ah_get_sl(txp.ah_attr)];
 609        txp.flow.tx_queue = (u8)skb_get_queue_mapping(skb);
 610        txp.entropy = hfi1_ipoib_calc_entropy(skb);
 611
 612        if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list))
 613                return hfi1_ipoib_send_dma_list(dev, skb, &txp);
 614
 615        return hfi1_ipoib_send_dma_single(dev, skb,  &txp);
 616}
 617
 618/*
 619 * hfi1_ipoib_sdma_sleep - ipoib sdma sleep function
 620 *
 621 * This function gets called from sdma_send_txreq() when there are not enough
 622 * sdma descriptors available to send the packet. It adds Tx queue's wait
 623 * structure to sdma engine's dmawait list to be woken up when descriptors
 624 * become available.
 625 */
 626static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
 627                                 struct iowait_work *wait,
 628                                 struct sdma_txreq *txreq,
 629                                 uint seq,
 630                                 bool pkts_sent)
 631{
 632        struct hfi1_ipoib_txq *txq =
 633                container_of(wait->iow, struct hfi1_ipoib_txq, wait);
 634
 635        write_seqlock(&sde->waitlock);
 636
 637        if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) {
 638                if (sdma_progress(sde, seq, txreq)) {
 639                        write_sequnlock(&sde->waitlock);
 640                        return -EAGAIN;
 641                }
 642
 643                if (list_empty(&txreq->list))
 644                        /* came from non-list submit */
 645                        list_add_tail(&txreq->list, &txq->tx_list);
 646                if (list_empty(&txq->wait.list)) {
 647                        struct hfi1_ibport *ibp = &sde->ppd->ibport_data;
 648
 649                        if (!atomic_xchg(&txq->no_desc, 1)) {
 650                                trace_hfi1_txq_queued(txq);
 651                                hfi1_ipoib_stop_txq(txq);
 652                        }
 653                        ibp->rvp.n_dmawait++;
 654                        iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
 655                }
 656
 657                write_sequnlock(&sde->waitlock);
 658                return -EBUSY;
 659        }
 660
 661        write_sequnlock(&sde->waitlock);
 662        return -EINVAL;
 663}
 664
 665/*
 666 * hfi1_ipoib_sdma_wakeup - ipoib sdma wakeup function
 667 *
 668 * This function gets called when SDMA descriptors becomes available and Tx
 669 * queue's wait structure was previously added to sdma engine's dmawait list.
 670 */
 671static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
 672{
 673        struct hfi1_ipoib_txq *txq =
 674                container_of(wait, struct hfi1_ipoib_txq, wait);
 675
 676        trace_hfi1_txq_wakeup(txq);
 677        if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
 678                iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
 679}
 680
 681static void hfi1_ipoib_flush_txq(struct work_struct *work)
 682{
 683        struct iowait_work *ioww =
 684                container_of(work, struct iowait_work, iowork);
 685        struct iowait *wait = iowait_ioww_to_iow(ioww);
 686        struct hfi1_ipoib_txq *txq =
 687                container_of(wait, struct hfi1_ipoib_txq, wait);
 688        struct net_device *dev = txq->priv->netdev;
 689
 690        if (likely(dev->reg_state == NETREG_REGISTERED) &&
 691            likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
 692                if (atomic_xchg(&txq->no_desc, 0))
 693                        hfi1_ipoib_wake_txq(txq);
 694}
 695
 696int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
 697{
 698        struct net_device *dev = priv->netdev;
 699        char buf[HFI1_IPOIB_TXREQ_NAME_LEN];
 700        unsigned long tx_ring_size;
 701        int i;
 702
 703        /*
 704         * Ring holds 1 less than tx_ring_size
 705         * Round up to next power of 2 in order to hold at least tx_queue_len
 706         */
 707        tx_ring_size = roundup_pow_of_two((unsigned long)dev->tx_queue_len + 1);
 708
 709        snprintf(buf, sizeof(buf), "hfi1_%u_ipoib_txreq_cache", priv->dd->unit);
 710        priv->txreq_cache = kmem_cache_create(buf,
 711                                              sizeof(struct ipoib_txreq),
 712                                              0,
 713                                              0,
 714                                              NULL);
 715        if (!priv->txreq_cache)
 716                return -ENOMEM;
 717
 718        priv->tx_napis = kcalloc_node(dev->num_tx_queues,
 719                                      sizeof(struct napi_struct),
 720                                      GFP_KERNEL,
 721                                      priv->dd->node);
 722        if (!priv->tx_napis)
 723                goto free_txreq_cache;
 724
 725        priv->txqs = kcalloc_node(dev->num_tx_queues,
 726                                  sizeof(struct hfi1_ipoib_txq),
 727                                  GFP_KERNEL,
 728                                  priv->dd->node);
 729        if (!priv->txqs)
 730                goto free_tx_napis;
 731
 732        for (i = 0; i < dev->num_tx_queues; i++) {
 733                struct hfi1_ipoib_txq *txq = &priv->txqs[i];
 734
 735                iowait_init(&txq->wait,
 736                            0,
 737                            hfi1_ipoib_flush_txq,
 738                            NULL,
 739                            hfi1_ipoib_sdma_sleep,
 740                            hfi1_ipoib_sdma_wakeup,
 741                            NULL,
 742                            NULL);
 743                txq->priv = priv;
 744                txq->sde = NULL;
 745                INIT_LIST_HEAD(&txq->tx_list);
 746                atomic64_set(&txq->complete_txreqs, 0);
 747                atomic_set(&txq->stops, 0);
 748                atomic_set(&txq->ring_full, 0);
 749                atomic_set(&txq->no_desc, 0);
 750                txq->q_idx = i;
 751                txq->flow.tx_queue = 0xff;
 752                txq->flow.sc5 = 0xff;
 753                txq->pkts_sent = false;
 754
 755                netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
 756                                             priv->dd->node);
 757
 758                txq->tx_ring.items =
 759                        kcalloc_node(tx_ring_size,
 760                                     sizeof(struct ipoib_txreq *),
 761                                     GFP_KERNEL, priv->dd->node);
 762                if (!txq->tx_ring.items)
 763                        goto free_txqs;
 764
 765                spin_lock_init(&txq->tx_ring.producer_lock);
 766                spin_lock_init(&txq->tx_ring.consumer_lock);
 767                txq->tx_ring.max_items = tx_ring_size;
 768
 769                txq->napi = &priv->tx_napis[i];
 770                netif_tx_napi_add(dev, txq->napi,
 771                                  hfi1_ipoib_process_tx_ring,
 772                                  NAPI_POLL_WEIGHT);
 773        }
 774
 775        return 0;
 776
 777free_txqs:
 778        for (i--; i >= 0; i--) {
 779                struct hfi1_ipoib_txq *txq = &priv->txqs[i];
 780
 781                netif_napi_del(txq->napi);
 782                kfree(txq->tx_ring.items);
 783        }
 784
 785        kfree(priv->txqs);
 786        priv->txqs = NULL;
 787
 788free_tx_napis:
 789        kfree(priv->tx_napis);
 790        priv->tx_napis = NULL;
 791
 792free_txreq_cache:
 793        kmem_cache_destroy(priv->txreq_cache);
 794        priv->txreq_cache = NULL;
 795        return -ENOMEM;
 796}
 797
 798static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
 799{
 800        struct sdma_txreq *txreq;
 801        struct sdma_txreq *txreq_tmp;
 802        atomic64_t *complete_txreqs = &txq->complete_txreqs;
 803
 804        list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
 805                struct ipoib_txreq *tx =
 806                        container_of(txreq, struct ipoib_txreq, txreq);
 807
 808                list_del(&txreq->list);
 809                sdma_txclean(txq->priv->dd, &tx->txreq);
 810                dev_kfree_skb_any(tx->skb);
 811                kmem_cache_free(txq->priv->txreq_cache, tx);
 812                atomic64_inc(complete_txreqs);
 813        }
 814
 815        if (hfi1_ipoib_used(txq))
 816                dd_dev_warn(txq->priv->dd,
 817                            "txq %d not empty found %llu requests\n",
 818                            txq->q_idx,
 819                            hfi1_ipoib_txreqs(txq->sent_txreqs,
 820                                              atomic64_read(complete_txreqs)));
 821}
 822
 823void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
 824{
 825        int i;
 826
 827        for (i = 0; i < priv->netdev->num_tx_queues; i++) {
 828                struct hfi1_ipoib_txq *txq = &priv->txqs[i];
 829
 830                iowait_cancel_work(&txq->wait);
 831                iowait_sdma_drain(&txq->wait);
 832                hfi1_ipoib_drain_tx_list(txq);
 833                netif_napi_del(txq->napi);
 834                (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
 835                kfree(txq->tx_ring.items);
 836        }
 837
 838        kfree(priv->txqs);
 839        priv->txqs = NULL;
 840
 841        kfree(priv->tx_napis);
 842        priv->tx_napis = NULL;
 843
 844        kmem_cache_destroy(priv->txreq_cache);
 845        priv->txreq_cache = NULL;
 846}
 847
 848void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
 849{
 850        struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
 851        int i;
 852
 853        for (i = 0; i < dev->num_tx_queues; i++) {
 854                struct hfi1_ipoib_txq *txq = &priv->txqs[i];
 855
 856                napi_enable(txq->napi);
 857        }
 858}
 859
 860void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
 861{
 862        struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
 863        int i;
 864
 865        for (i = 0; i < dev->num_tx_queues; i++) {
 866                struct hfi1_ipoib_txq *txq = &priv->txqs[i];
 867
 868                napi_disable(txq->napi);
 869                (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
 870        }
 871}
 872
 873void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
 874{
 875        struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
 876        struct hfi1_ipoib_txq *txq = &priv->txqs[q];
 877        u64 completed = atomic64_read(&txq->complete_txreqs);
 878
 879        dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
 880                    txq, q,
 881                    __netif_subqueue_stopped(dev, txq->q_idx),
 882                    atomic_read(&txq->stops),
 883                    atomic_read(&txq->no_desc),
 884                    atomic_read(&txq->ring_full));
 885        dd_dev_info(priv->dd, "sde %p engine %u\n",
 886                    txq->sde,
 887                    txq->sde ? txq->sde->this_idx : 0);
 888        dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
 889        dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
 890                    txq->sent_txreqs, completed, hfi1_ipoib_used(txq));
 891        dd_dev_info(priv->dd, "tx_queue_len %u max_items %lu\n",
 892                    dev->tx_queue_len, txq->tx_ring.max_items);
 893        dd_dev_info(priv->dd, "head %lu tail %lu\n",
 894                    txq->tx_ring.head, txq->tx_ring.tail);
 895        dd_dev_info(priv->dd, "wait queued %u\n",
 896                    !list_empty(&txq->wait.list));
 897        dd_dev_info(priv->dd, "tx_list empty %u\n",
 898                    list_empty(&txq->tx_list));
 899}
 900
 901