linux/drivers/infiniband/hw/hfi1/ruc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
   2/*
   3 * Copyright(c) 2015 - 2018 Intel Corporation.
   4 */
   5
   6#include <linux/spinlock.h>
   7
   8#include "hfi.h"
   9#include "mad.h"
  10#include "qp.h"
  11#include "verbs_txreq.h"
  12#include "trace.h"
  13
  14static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
  15{
  16        return (gid->global.interface_id == id &&
  17                (gid->global.subnet_prefix == gid_prefix ||
  18                 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
  19}
  20
  21/*
  22 *
  23 * This should be called with the QP r_lock held.
  24 *
  25 * The s_lock will be acquired around the hfi1_migrate_qp() call.
  26 */
  27int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
  28{
  29        __be64 guid;
  30        unsigned long flags;
  31        struct rvt_qp *qp = packet->qp;
  32        u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
  33        u32 dlid = packet->dlid;
  34        u32 slid = packet->slid;
  35        u32 sl = packet->sl;
  36        bool migrated = packet->migrated;
  37        u16 pkey = packet->pkey;
  38
  39        if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
  40                if (!packet->grh) {
  41                        if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
  42                             IB_AH_GRH) &&
  43                            (packet->etype != RHF_RCV_TYPE_BYPASS))
  44                                return 1;
  45                } else {
  46                        const struct ib_global_route *grh;
  47
  48                        if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
  49                              IB_AH_GRH))
  50                                return 1;
  51                        grh = rdma_ah_read_grh(&qp->alt_ah_attr);
  52                        guid = get_sguid(ibp, grh->sgid_index);
  53                        if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
  54                                    guid))
  55                                return 1;
  56                        if (!gid_ok(
  57                                &packet->grh->sgid,
  58                                grh->dgid.global.subnet_prefix,
  59                                grh->dgid.global.interface_id))
  60                                return 1;
  61                }
  62                if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
  63                                            sc5, slid))) {
  64                        hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
  65                                      slid, dlid);
  66                        return 1;
  67                }
  68                /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
  69                if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
  70                    ppd_from_ibp(ibp)->port !=
  71                        rdma_ah_get_port_num(&qp->alt_ah_attr))
  72                        return 1;
  73                spin_lock_irqsave(&qp->s_lock, flags);
  74                hfi1_migrate_qp(qp);
  75                spin_unlock_irqrestore(&qp->s_lock, flags);
  76        } else {
  77                if (!packet->grh) {
  78                        if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
  79                             IB_AH_GRH) &&
  80                            (packet->etype != RHF_RCV_TYPE_BYPASS))
  81                                return 1;
  82                } else {
  83                        const struct ib_global_route *grh;
  84
  85                        if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
  86                                                   IB_AH_GRH))
  87                                return 1;
  88                        grh = rdma_ah_read_grh(&qp->remote_ah_attr);
  89                        guid = get_sguid(ibp, grh->sgid_index);
  90                        if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
  91                                    guid))
  92                                return 1;
  93                        if (!gid_ok(
  94                             &packet->grh->sgid,
  95                             grh->dgid.global.subnet_prefix,
  96                             grh->dgid.global.interface_id))
  97                                return 1;
  98                }
  99                if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
 100                                            sc5, slid))) {
 101                        hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
 102                                      slid, dlid);
 103                        return 1;
 104                }
 105                /* Validate the SLID. See Ch. 9.6.1.5 */
 106                if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
 107                    ppd_from_ibp(ibp)->port != qp->port_num)
 108                        return 1;
 109                if (qp->s_mig_state == IB_MIG_REARM && !migrated)
 110                        qp->s_mig_state = IB_MIG_ARMED;
 111        }
 112
 113        return 0;
 114}
 115
 116/**
 117 * hfi1_make_grh - construct a GRH header
 118 * @ibp: a pointer to the IB port
 119 * @hdr: a pointer to the GRH header being constructed
 120 * @grh: the global route address to send to
 121 * @hwords: size of header after grh being sent in dwords
 122 * @nwords: the number of 32 bit words of data being sent
 123 *
 124 * Return the size of the header in 32 bit words.
 125 */
 126u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
 127                  const struct ib_global_route *grh, u32 hwords, u32 nwords)
 128{
 129        hdr->version_tclass_flow =
 130                cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
 131                            (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
 132                            (grh->flow_label << IB_GRH_FLOW_SHIFT));
 133        hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
 134        /* next_hdr is defined by C8-7 in ch. 8.4.1 */
 135        hdr->next_hdr = IB_GRH_NEXT_HDR;
 136        hdr->hop_limit = grh->hop_limit;
 137        /* The SGID is 32-bit aligned. */
 138        hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
 139        hdr->sgid.global.interface_id =
 140                grh->sgid_index < HFI1_GUIDS_PER_PORT ?
 141                get_sguid(ibp, grh->sgid_index) :
 142                get_sguid(ibp, HFI1_PORT_GUID_INDEX);
 143        hdr->dgid = grh->dgid;
 144
 145        /* GRH header size in 32-bit words. */
 146        return sizeof(struct ib_grh) / sizeof(u32);
 147}
 148
 149#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
 150                              hdr.ibh.u.oth.bth[2]) / 4)
 151
 152/**
 153 * build_ahg - create ahg in s_ahg
 154 * @qp: a pointer to QP
 155 * @npsn: the next PSN for the request/response
 156 *
 157 * This routine handles the AHG by allocating an ahg entry and causing the
 158 * copy of the first middle.
 159 *
 160 * Subsequent middles use the copied entry, editing the
 161 * PSN with 1 or 2 edits.
 162 */
 163static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
 164{
 165        struct hfi1_qp_priv *priv = qp->priv;
 166
 167        if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
 168                clear_ahg(qp);
 169        if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
 170                /* first middle that needs copy  */
 171                if (qp->s_ahgidx < 0)
 172                        qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
 173                if (qp->s_ahgidx >= 0) {
 174                        qp->s_ahgpsn = npsn;
 175                        priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
 176                        /* save to protect a change in another thread */
 177                        priv->s_ahg->ahgidx = qp->s_ahgidx;
 178                        qp->s_flags |= HFI1_S_AHG_VALID;
 179                }
 180        } else {
 181                /* subsequent middle after valid */
 182                if (qp->s_ahgidx >= 0) {
 183                        priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
 184                        priv->s_ahg->ahgidx = qp->s_ahgidx;
 185                        priv->s_ahg->ahgcount++;
 186                        priv->s_ahg->ahgdesc[0] =
 187                                sdma_build_ahg_descriptor(
 188                                        (__force u16)cpu_to_be16((u16)npsn),
 189                                        BTH2_OFFSET,
 190                                        16,
 191                                        16);
 192                        if ((npsn & 0xffff0000) !=
 193                                        (qp->s_ahgpsn & 0xffff0000)) {
 194                                priv->s_ahg->ahgcount++;
 195                                priv->s_ahg->ahgdesc[1] =
 196                                        sdma_build_ahg_descriptor(
 197                                                (__force u16)cpu_to_be16(
 198                                                        (u16)(npsn >> 16)),
 199                                                BTH2_OFFSET,
 200                                                0,
 201                                                16);
 202                        }
 203                }
 204        }
 205}
 206
 207static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
 208                                     struct ib_other_headers *ohdr,
 209                                     u32 bth0, u32 bth1, u32 bth2)
 210{
 211        ohdr->bth[0] = cpu_to_be32(bth0);
 212        ohdr->bth[1] = cpu_to_be32(bth1);
 213        ohdr->bth[2] = cpu_to_be32(bth2);
 214}
 215
 216/**
 217 * hfi1_make_ruc_header_16B - build a 16B header
 218 * @qp: the queue pair
 219 * @ohdr: a pointer to the destination header memory
 220 * @bth0: bth0 passed in from the RC/UC builder
 221 * @bth1: bth1 passed in from the RC/UC builder
 222 * @bth2: bth2 passed in from the RC/UC builder
 223 * @middle: non zero implies indicates ahg "could" be used
 224 * @ps: the current packet state
 225 *
 226 * This routine may disarm ahg under these situations:
 227 * - packet needs a GRH
 228 * - BECN needed
 229 * - migration state not IB_MIG_MIGRATED
 230 */
 231static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
 232                                            struct ib_other_headers *ohdr,
 233                                            u32 bth0, u32 bth1, u32 bth2,
 234                                            int middle,
 235                                            struct hfi1_pkt_state *ps)
 236{
 237        struct hfi1_qp_priv *priv = qp->priv;
 238        struct hfi1_ibport *ibp = ps->ibp;
 239        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
 240        u32 slid;
 241        u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
 242        u8 l4 = OPA_16B_L4_IB_LOCAL;
 243        u8 extra_bytes = hfi1_get_16b_padding(
 244                                (ps->s_txreq->hdr_dwords << 2),
 245                                ps->s_txreq->s_cur_size);
 246        u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
 247                                 extra_bytes + SIZE_OF_LT) >> 2);
 248        bool becn = false;
 249
 250        if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
 251            hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
 252                struct ib_grh *grh;
 253                struct ib_global_route *grd =
 254                        rdma_ah_retrieve_grh(&qp->remote_ah_attr);
 255                /*
 256                 * Ensure OPA GIDs are transformed to IB gids
 257                 * before creating the GRH.
 258                 */
 259                if (grd->sgid_index == OPA_GID_INDEX)
 260                        grd->sgid_index = 0;
 261                grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
 262                l4 = OPA_16B_L4_IB_GLOBAL;
 263                ps->s_txreq->hdr_dwords +=
 264                        hfi1_make_grh(ibp, grh, grd,
 265                                      ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
 266                                      nwords);
 267                middle = 0;
 268        }
 269
 270        if (qp->s_mig_state == IB_MIG_MIGRATED)
 271                bth1 |= OPA_BTH_MIG_REQ;
 272        else
 273                middle = 0;
 274
 275        if (qp->s_flags & RVT_S_ECN) {
 276                qp->s_flags &= ~RVT_S_ECN;
 277                /* we recently received a FECN, so return a BECN */
 278                becn = true;
 279                middle = 0;
 280        }
 281        if (middle)
 282                build_ahg(qp, bth2);
 283        else
 284                qp->s_flags &= ~HFI1_S_AHG_VALID;
 285
 286        bth0 |= pkey;
 287        bth0 |= extra_bytes << 20;
 288        hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
 289
 290        if (!ppd->lid)
 291                slid = be32_to_cpu(OPA_LID_PERMISSIVE);
 292        else
 293                slid = ppd->lid |
 294                        (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
 295                        ((1 << ppd->lmc) - 1));
 296
 297        hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
 298                          slid,
 299                          opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
 300                                      16B),
 301                          (ps->s_txreq->hdr_dwords + nwords) >> 1,
 302                          pkey, becn, 0, l4, priv->s_sc);
 303}
 304
 305/**
 306 * hfi1_make_ruc_header_9B - build a 9B header
 307 * @qp: the queue pair
 308 * @ohdr: a pointer to the destination header memory
 309 * @bth0: bth0 passed in from the RC/UC builder
 310 * @bth1: bth1 passed in from the RC/UC builder
 311 * @bth2: bth2 passed in from the RC/UC builder
 312 * @middle: non zero implies indicates ahg "could" be used
 313 * @ps: the current packet state
 314 *
 315 * This routine may disarm ahg under these situations:
 316 * - packet needs a GRH
 317 * - BECN needed
 318 * - migration state not IB_MIG_MIGRATED
 319 */
 320static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
 321                                           struct ib_other_headers *ohdr,
 322                                           u32 bth0, u32 bth1, u32 bth2,
 323                                           int middle,
 324                                           struct hfi1_pkt_state *ps)
 325{
 326        struct hfi1_qp_priv *priv = qp->priv;
 327        struct hfi1_ibport *ibp = ps->ibp;
 328        u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
 329        u16 lrh0 = HFI1_LRH_BTH;
 330        u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
 331        u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
 332                                         extra_bytes) >> 2);
 333
 334        if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
 335                struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
 336
 337                lrh0 = HFI1_LRH_GRH;
 338                ps->s_txreq->hdr_dwords +=
 339                        hfi1_make_grh(ibp, grh,
 340                                      rdma_ah_read_grh(&qp->remote_ah_attr),
 341                                      ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
 342                                      nwords);
 343                middle = 0;
 344        }
 345        lrh0 |= (priv->s_sc & 0xf) << 12 |
 346                (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
 347
 348        if (qp->s_mig_state == IB_MIG_MIGRATED)
 349                bth0 |= IB_BTH_MIG_REQ;
 350        else
 351                middle = 0;
 352
 353        if (qp->s_flags & RVT_S_ECN) {
 354                qp->s_flags &= ~RVT_S_ECN;
 355                /* we recently received a FECN, so return a BECN */
 356                bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
 357                middle = 0;
 358        }
 359        if (middle)
 360                build_ahg(qp, bth2);
 361        else
 362                qp->s_flags &= ~HFI1_S_AHG_VALID;
 363
 364        bth0 |= pkey;
 365        bth0 |= extra_bytes << 20;
 366        hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
 367        hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
 368                         lrh0,
 369                         ps->s_txreq->hdr_dwords + nwords,
 370                         opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
 371                         ppd_from_ibp(ibp)->lid |
 372                                rdma_ah_get_path_bits(&qp->remote_ah_attr));
 373}
 374
 375typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
 376                                  struct ib_other_headers *ohdr,
 377                                  u32 bth0, u32 bth1, u32 bth2, int middle,
 378                                  struct hfi1_pkt_state *ps);
 379
 380/* We support only two types - 9B and 16B for now */
 381static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
 382        [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
 383        [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
 384};
 385
 386void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
 387                          u32 bth0, u32 bth1, u32 bth2, int middle,
 388                          struct hfi1_pkt_state *ps)
 389{
 390        struct hfi1_qp_priv *priv = qp->priv;
 391
 392        /*
 393         * reset s_ahg/AHG fields
 394         *
 395         * This insures that the ahgentry/ahgcount
 396         * are at a non-AHG default to protect
 397         * build_verbs_tx_desc() from using
 398         * an include ahgidx.
 399         *
 400         * build_ahg() will modify as appropriate
 401         * to use the AHG feature.
 402         */
 403        priv->s_ahg->tx_flags = 0;
 404        priv->s_ahg->ahgcount = 0;
 405        priv->s_ahg->ahgidx = 0;
 406
 407        /* Make the appropriate header */
 408        hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth1, bth2, middle,
 409                                            ps);
 410}
 411
 412/* when sending, force a reschedule every one of these periods */
 413#define SEND_RESCHED_TIMEOUT (5 * HZ)  /* 5s in jiffies */
 414
 415/**
 416 * hfi1_schedule_send_yield - test for a yield required for QP
 417 * send engine
 418 * @qp: a pointer to QP
 419 * @ps: a pointer to a structure with commonly lookup values for
 420 *      the send engine progress
 421 * @tid: true if it is the tid leg
 422 *
 423 * This routine checks if the time slice for the QP has expired
 424 * for RC QPs, if so an additional work entry is queued. At this
 425 * point, other QPs have an opportunity to be scheduled. It
 426 * returns true if a yield is required, otherwise, false
 427 * is returned.
 428 */
 429bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
 430                              bool tid)
 431{
 432        ps->pkts_sent = true;
 433
 434        if (unlikely(time_after(jiffies, ps->timeout))) {
 435                if (!ps->in_thread ||
 436                    workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
 437                        spin_lock_irqsave(&qp->s_lock, ps->flags);
 438                        if (!tid) {
 439                                qp->s_flags &= ~RVT_S_BUSY;
 440                                hfi1_schedule_send(qp);
 441                        } else {
 442                                struct hfi1_qp_priv *priv = qp->priv;
 443
 444                                if (priv->s_flags &
 445                                    HFI1_S_TID_BUSY_SET) {
 446                                        qp->s_flags &= ~RVT_S_BUSY;
 447                                        priv->s_flags &=
 448                                                ~(HFI1_S_TID_BUSY_SET |
 449                                                  RVT_S_BUSY);
 450                                } else {
 451                                        priv->s_flags &= ~RVT_S_BUSY;
 452                                }
 453                                hfi1_schedule_tid_send(qp);
 454                        }
 455
 456                        spin_unlock_irqrestore(&qp->s_lock, ps->flags);
 457                        this_cpu_inc(*ps->ppd->dd->send_schedule);
 458                        trace_hfi1_rc_expired_time_slice(qp, true);
 459                        return true;
 460                }
 461
 462                cond_resched();
 463                this_cpu_inc(*ps->ppd->dd->send_schedule);
 464                ps->timeout = jiffies + ps->timeout_int;
 465        }
 466
 467        trace_hfi1_rc_expired_time_slice(qp, false);
 468        return false;
 469}
 470
 471void hfi1_do_send_from_rvt(struct rvt_qp *qp)
 472{
 473        hfi1_do_send(qp, false);
 474}
 475
 476void _hfi1_do_send(struct work_struct *work)
 477{
 478        struct iowait_work *w = container_of(work, struct iowait_work, iowork);
 479        struct rvt_qp *qp = iowait_to_qp(w->iow);
 480
 481        hfi1_do_send(qp, true);
 482}
 483
 484/**
 485 * hfi1_do_send - perform a send on a QP
 486 * @qp: a pointer to the QP
 487 * @in_thread: true if in a workqueue thread
 488 *
 489 * Process entries in the send work queue until credit or queue is
 490 * exhausted.  Only allow one CPU to send a packet per QP.
 491 * Otherwise, two threads could send packets out of order.
 492 */
 493void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
 494{
 495        struct hfi1_pkt_state ps;
 496        struct hfi1_qp_priv *priv = qp->priv;
 497        int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
 498
 499        ps.dev = to_idev(qp->ibqp.device);
 500        ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
 501        ps.ppd = ppd_from_ibp(ps.ibp);
 502        ps.in_thread = in_thread;
 503        ps.wait = iowait_get_ib_work(&priv->s_iowait);
 504
 505        trace_hfi1_rc_do_send(qp, in_thread);
 506
 507        switch (qp->ibqp.qp_type) {
 508        case IB_QPT_RC:
 509                if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
 510                                   ~((1 << ps.ppd->lmc) - 1)) ==
 511                                  ps.ppd->lid)) {
 512                        rvt_ruc_loopback(qp);
 513                        return;
 514                }
 515                make_req = hfi1_make_rc_req;
 516                ps.timeout_int = qp->timeout_jiffies;
 517                break;
 518        case IB_QPT_UC:
 519                if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
 520                                   ~((1 << ps.ppd->lmc) - 1)) ==
 521                                  ps.ppd->lid)) {
 522                        rvt_ruc_loopback(qp);
 523                        return;
 524                }
 525                make_req = hfi1_make_uc_req;
 526                ps.timeout_int = SEND_RESCHED_TIMEOUT;
 527                break;
 528        default:
 529                make_req = hfi1_make_ud_req;
 530                ps.timeout_int = SEND_RESCHED_TIMEOUT;
 531        }
 532
 533        spin_lock_irqsave(&qp->s_lock, ps.flags);
 534
 535        /* Return if we are already busy processing a work request. */
 536        if (!hfi1_send_ok(qp)) {
 537                if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
 538                        iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
 539                spin_unlock_irqrestore(&qp->s_lock, ps.flags);
 540                return;
 541        }
 542
 543        qp->s_flags |= RVT_S_BUSY;
 544
 545        ps.timeout_int = ps.timeout_int / 8;
 546        ps.timeout = jiffies + ps.timeout_int;
 547        ps.cpu = priv->s_sde ? priv->s_sde->cpu :
 548                        cpumask_first(cpumask_of_node(ps.ppd->dd->node));
 549        ps.pkts_sent = false;
 550
 551        /* insure a pre-built packet is handled  */
 552        ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
 553        do {
 554                /* Check for a constructed packet to be sent. */
 555                if (ps.s_txreq) {
 556                        if (priv->s_flags & HFI1_S_TID_BUSY_SET)
 557                                qp->s_flags |= RVT_S_BUSY;
 558                        spin_unlock_irqrestore(&qp->s_lock, ps.flags);
 559                        /*
 560                         * If the packet cannot be sent now, return and
 561                         * the send engine will be woken up later.
 562                         */
 563                        if (hfi1_verbs_send(qp, &ps))
 564                                return;
 565
 566                        /* allow other tasks to run */
 567                        if (hfi1_schedule_send_yield(qp, &ps, false))
 568                                return;
 569
 570                        spin_lock_irqsave(&qp->s_lock, ps.flags);
 571                }
 572        } while (make_req(qp, &ps));
 573        iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
 574        spin_unlock_irqrestore(&qp->s_lock, ps.flags);
 575}
 576