linux/drivers/infiniband/hw/qib/qib_sdma.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
   3 * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/spinlock.h>
  35#include <linux/netdevice.h>
  36#include <linux/moduleparam.h>
  37
  38#include "qib.h"
  39#include "qib_common.h"
  40
  41/* default pio off, sdma on */
  42static ushort sdma_descq_cnt = 256;
  43module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
  44MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
  45
  46/*
  47 * Bits defined in the send DMA descriptor.
  48 */
  49#define SDMA_DESC_LAST          (1ULL << 11)
  50#define SDMA_DESC_FIRST         (1ULL << 12)
  51#define SDMA_DESC_DMA_HEAD      (1ULL << 13)
  52#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
  53#define SDMA_DESC_INTR          (1ULL << 15)
  54#define SDMA_DESC_COUNT_LSB     16
  55#define SDMA_DESC_GEN_LSB       30
  56
  57/* declare all statics here rather than keep sorting */
  58static int alloc_sdma(struct qib_pportdata *);
  59static void sdma_complete(struct kref *);
  60static void sdma_finalput(struct qib_sdma_state *);
  61static void sdma_get(struct qib_sdma_state *);
  62static void sdma_put(struct qib_sdma_state *);
  63static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
  64static void sdma_start_sw_clean_up(struct qib_pportdata *);
  65static void sdma_sw_clean_up_task(unsigned long);
  66static void unmap_desc(struct qib_pportdata *, unsigned);
  67
  68static void sdma_get(struct qib_sdma_state *ss)
  69{
  70        kref_get(&ss->kref);
  71}
  72
  73static void sdma_complete(struct kref *kref)
  74{
  75        struct qib_sdma_state *ss =
  76                container_of(kref, struct qib_sdma_state, kref);
  77
  78        complete(&ss->comp);
  79}
  80
  81static void sdma_put(struct qib_sdma_state *ss)
  82{
  83        kref_put(&ss->kref, sdma_complete);
  84}
  85
  86static void sdma_finalput(struct qib_sdma_state *ss)
  87{
  88        sdma_put(ss);
  89        wait_for_completion(&ss->comp);
  90}
  91
  92/*
  93 * Complete all the sdma requests on the active list, in the correct
  94 * order, and with appropriate processing.   Called when cleaning up
  95 * after sdma shutdown, and when new sdma requests are submitted for
  96 * a link that is down.   This matches what is done for requests
  97 * that complete normally, it's just the full list.
  98 *
  99 * Must be called with sdma_lock held
 100 */
 101static void clear_sdma_activelist(struct qib_pportdata *ppd)
 102{
 103        struct qib_sdma_txreq *txp, *txp_next;
 104
 105        list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
 106                list_del_init(&txp->list);
 107                if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
 108                        unsigned idx;
 109
 110                        idx = txp->start_idx;
 111                        while (idx != txp->next_descq_idx) {
 112                                unmap_desc(ppd, idx);
 113                                if (++idx == ppd->sdma_descq_cnt)
 114                                        idx = 0;
 115                        }
 116                }
 117                if (txp->callback)
 118                        (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
 119        }
 120}
 121
 122static void sdma_sw_clean_up_task(unsigned long opaque)
 123{
 124        struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
 125        unsigned long flags;
 126
 127        spin_lock_irqsave(&ppd->sdma_lock, flags);
 128
 129        /*
 130         * At this point, the following should always be true:
 131         * - We are halted, so no more descriptors are getting retired.
 132         * - We are not running, so no one is submitting new work.
 133         * - Only we can send the e40_sw_cleaned, so we can't start
 134         *   running again until we say so.  So, the active list and
 135         *   descq are ours to play with.
 136         */
 137
 138        /* Process all retired requests. */
 139        qib_sdma_make_progress(ppd);
 140
 141        clear_sdma_activelist(ppd);
 142
 143        /*
 144         * Resync count of added and removed.  It is VERY important that
 145         * sdma_descq_removed NEVER decrement - user_sdma depends on it.
 146         */
 147        ppd->sdma_descq_removed = ppd->sdma_descq_added;
 148
 149        /*
 150         * Reset our notion of head and tail.
 151         * Note that the HW registers will be reset when switching states
 152         * due to calling __qib_sdma_process_event() below.
 153         */
 154        ppd->sdma_descq_tail = 0;
 155        ppd->sdma_descq_head = 0;
 156        ppd->sdma_head_dma[0] = 0;
 157        ppd->sdma_generation = 0;
 158
 159        __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
 160
 161        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
 162}
 163
 164/*
 165 * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
 166 * as a result of send buffer errors or send DMA descriptor errors.
 167 * We want to disarm the buffers in these cases.
 168 */
 169static void sdma_hw_start_up(struct qib_pportdata *ppd)
 170{
 171        struct qib_sdma_state *ss = &ppd->sdma_state;
 172        unsigned bufno;
 173
 174        for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
 175                ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
 176
 177        ppd->dd->f_sdma_hw_start_up(ppd);
 178}
 179
 180static void sdma_sw_tear_down(struct qib_pportdata *ppd)
 181{
 182        struct qib_sdma_state *ss = &ppd->sdma_state;
 183
 184        /* Releasing this reference means the state machine has stopped. */
 185        sdma_put(ss);
 186}
 187
 188static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
 189{
 190        tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
 191}
 192
 193static void sdma_set_state(struct qib_pportdata *ppd,
 194        enum qib_sdma_states next_state)
 195{
 196        struct qib_sdma_state *ss = &ppd->sdma_state;
 197        struct sdma_set_state_action *action = ss->set_state_action;
 198        unsigned op = 0;
 199
 200        /* debugging bookkeeping */
 201        ss->previous_state = ss->current_state;
 202        ss->previous_op = ss->current_op;
 203
 204        ss->current_state = next_state;
 205
 206        if (action[next_state].op_enable)
 207                op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
 208
 209        if (action[next_state].op_intenable)
 210                op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
 211
 212        if (action[next_state].op_halt)
 213                op |= QIB_SDMA_SENDCTRL_OP_HALT;
 214
 215        if (action[next_state].op_drain)
 216                op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
 217
 218        if (action[next_state].go_s99_running_tofalse)
 219                ss->go_s99_running = 0;
 220
 221        if (action[next_state].go_s99_running_totrue)
 222                ss->go_s99_running = 1;
 223
 224        ss->current_op = op;
 225
 226        ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
 227}
 228
 229static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
 230{
 231        __le64 *descqp = &ppd->sdma_descq[head].qw[0];
 232        u64 desc[2];
 233        dma_addr_t addr;
 234        size_t len;
 235
 236        desc[0] = le64_to_cpu(descqp[0]);
 237        desc[1] = le64_to_cpu(descqp[1]);
 238
 239        addr = (desc[1] << 32) | (desc[0] >> 32);
 240        len = (desc[0] >> 14) & (0x7ffULL << 2);
 241        dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
 242}
 243
 244static int alloc_sdma(struct qib_pportdata *ppd)
 245{
 246        ppd->sdma_descq_cnt = sdma_descq_cnt;
 247        if (!ppd->sdma_descq_cnt)
 248                ppd->sdma_descq_cnt = 256;
 249
 250        /* Allocate memory for SendDMA descriptor FIFO */
 251        ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
 252                ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
 253                GFP_KERNEL);
 254
 255        if (!ppd->sdma_descq) {
 256                qib_dev_err(ppd->dd,
 257                        "failed to allocate SendDMA descriptor FIFO memory\n");
 258                goto bail;
 259        }
 260
 261        /* Allocate memory for DMA of head register to memory */
 262        ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
 263                PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
 264        if (!ppd->sdma_head_dma) {
 265                qib_dev_err(ppd->dd,
 266                        "failed to allocate SendDMA head memory\n");
 267                goto cleanup_descq;
 268        }
 269        ppd->sdma_head_dma[0] = 0;
 270        return 0;
 271
 272cleanup_descq:
 273        dma_free_coherent(&ppd->dd->pcidev->dev,
 274                ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
 275                ppd->sdma_descq_phys);
 276        ppd->sdma_descq = NULL;
 277        ppd->sdma_descq_phys = 0;
 278bail:
 279        ppd->sdma_descq_cnt = 0;
 280        return -ENOMEM;
 281}
 282
 283static void free_sdma(struct qib_pportdata *ppd)
 284{
 285        struct qib_devdata *dd = ppd->dd;
 286
 287        if (ppd->sdma_head_dma) {
 288                dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
 289                                  (void *)ppd->sdma_head_dma,
 290                                  ppd->sdma_head_phys);
 291                ppd->sdma_head_dma = NULL;
 292                ppd->sdma_head_phys = 0;
 293        }
 294
 295        if (ppd->sdma_descq) {
 296                dma_free_coherent(&dd->pcidev->dev,
 297                                  ppd->sdma_descq_cnt * sizeof(u64[2]),
 298                                  ppd->sdma_descq, ppd->sdma_descq_phys);
 299                ppd->sdma_descq = NULL;
 300                ppd->sdma_descq_phys = 0;
 301        }
 302}
 303
 304static inline void make_sdma_desc(struct qib_pportdata *ppd,
 305                                  u64 *sdmadesc, u64 addr, u64 dwlen,
 306                                  u64 dwoffset)
 307{
 308
 309        WARN_ON(addr & 3);
 310        /* SDmaPhyAddr[47:32] */
 311        sdmadesc[1] = addr >> 32;
 312        /* SDmaPhyAddr[31:0] */
 313        sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
 314        /* SDmaGeneration[1:0] */
 315        sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
 316                SDMA_DESC_GEN_LSB;
 317        /* SDmaDwordCount[10:0] */
 318        sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
 319        /* SDmaBufOffset[12:2] */
 320        sdmadesc[0] |= dwoffset & 0x7ffULL;
 321}
 322
 323/* sdma_lock must be held */
 324int qib_sdma_make_progress(struct qib_pportdata *ppd)
 325{
 326        struct list_head *lp = NULL;
 327        struct qib_sdma_txreq *txp = NULL;
 328        struct qib_devdata *dd = ppd->dd;
 329        int progress = 0;
 330        u16 hwhead;
 331        u16 idx = 0;
 332
 333        hwhead = dd->f_sdma_gethead(ppd);
 334
 335        /* The reason for some of the complexity of this code is that
 336         * not all descriptors have corresponding txps.  So, we have to
 337         * be able to skip over descs until we wander into the range of
 338         * the next txp on the list.
 339         */
 340
 341        if (!list_empty(&ppd->sdma_activelist)) {
 342                lp = ppd->sdma_activelist.next;
 343                txp = list_entry(lp, struct qib_sdma_txreq, list);
 344                idx = txp->start_idx;
 345        }
 346
 347        while (ppd->sdma_descq_head != hwhead) {
 348                /* if desc is part of this txp, unmap if needed */
 349                if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
 350                    (idx == ppd->sdma_descq_head)) {
 351                        unmap_desc(ppd, ppd->sdma_descq_head);
 352                        if (++idx == ppd->sdma_descq_cnt)
 353                                idx = 0;
 354                }
 355
 356                /* increment dequed desc count */
 357                ppd->sdma_descq_removed++;
 358
 359                /* advance head, wrap if needed */
 360                if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
 361                        ppd->sdma_descq_head = 0;
 362
 363                /* if now past this txp's descs, do the callback */
 364                if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
 365                        /* remove from active list */
 366                        list_del_init(&txp->list);
 367                        if (txp->callback)
 368                                (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
 369                        /* see if there is another txp */
 370                        if (list_empty(&ppd->sdma_activelist))
 371                                txp = NULL;
 372                        else {
 373                                lp = ppd->sdma_activelist.next;
 374                                txp = list_entry(lp, struct qib_sdma_txreq,
 375                                        list);
 376                                idx = txp->start_idx;
 377                        }
 378                }
 379                progress = 1;
 380        }
 381        if (progress)
 382                qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
 383        return progress;
 384}
 385
 386/*
 387 * This is called from interrupt context.
 388 */
 389void qib_sdma_intr(struct qib_pportdata *ppd)
 390{
 391        unsigned long flags;
 392
 393        spin_lock_irqsave(&ppd->sdma_lock, flags);
 394
 395        __qib_sdma_intr(ppd);
 396
 397        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
 398}
 399
 400void __qib_sdma_intr(struct qib_pportdata *ppd)
 401{
 402        if (__qib_sdma_running(ppd)) {
 403                qib_sdma_make_progress(ppd);
 404                if (!list_empty(&ppd->sdma_userpending))
 405                        qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
 406        }
 407}
 408
 409int qib_setup_sdma(struct qib_pportdata *ppd)
 410{
 411        struct qib_devdata *dd = ppd->dd;
 412        unsigned long flags;
 413        int ret = 0;
 414
 415        ret = alloc_sdma(ppd);
 416        if (ret)
 417                goto bail;
 418
 419        /* set consistent sdma state */
 420        ppd->dd->f_sdma_init_early(ppd);
 421        spin_lock_irqsave(&ppd->sdma_lock, flags);
 422        sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
 423        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
 424
 425        /* set up reference counting */
 426        kref_init(&ppd->sdma_state.kref);
 427        init_completion(&ppd->sdma_state.comp);
 428
 429        ppd->sdma_generation = 0;
 430        ppd->sdma_descq_head = 0;
 431        ppd->sdma_descq_removed = 0;
 432        ppd->sdma_descq_added = 0;
 433
 434        ppd->sdma_intrequest = 0;
 435        INIT_LIST_HEAD(&ppd->sdma_userpending);
 436
 437        INIT_LIST_HEAD(&ppd->sdma_activelist);
 438
 439        tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
 440                (unsigned long)ppd);
 441
 442        ret = dd->f_init_sdma_regs(ppd);
 443        if (ret)
 444                goto bail_alloc;
 445
 446        qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
 447
 448        return 0;
 449
 450bail_alloc:
 451        qib_teardown_sdma(ppd);
 452bail:
 453        return ret;
 454}
 455
 456void qib_teardown_sdma(struct qib_pportdata *ppd)
 457{
 458        qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
 459
 460        /*
 461         * This waits for the state machine to exit so it is not
 462         * necessary to kill the sdma_sw_clean_up_task to make sure
 463         * it is not running.
 464         */
 465        sdma_finalput(&ppd->sdma_state);
 466
 467        free_sdma(ppd);
 468}
 469
 470int qib_sdma_running(struct qib_pportdata *ppd)
 471{
 472        unsigned long flags;
 473        int ret;
 474
 475        spin_lock_irqsave(&ppd->sdma_lock, flags);
 476        ret = __qib_sdma_running(ppd);
 477        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
 478
 479        return ret;
 480}
 481
 482/*
 483 * Complete a request when sdma not running; likely only request
 484 * but to simplify the code, always queue it, then process the full
 485 * activelist.  We process the entire list to ensure that this particular
 486 * request does get it's callback, but in the correct order.
 487 * Must be called with sdma_lock held
 488 */
 489static void complete_sdma_err_req(struct qib_pportdata *ppd,
 490                                  struct qib_verbs_txreq *tx)
 491{
 492        struct qib_qp_priv *priv = tx->qp->priv;
 493
 494        atomic_inc(&priv->s_dma_busy);
 495        /* no sdma descriptors, so no unmap_desc */
 496        tx->txreq.start_idx = 0;
 497        tx->txreq.next_descq_idx = 0;
 498        list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
 499        clear_sdma_activelist(ppd);
 500}
 501
 502/*
 503 * This function queues one IB packet onto the send DMA queue per call.
 504 * The caller is responsible for checking:
 505 * 1) The number of send DMA descriptor entries is less than the size of
 506 *    the descriptor queue.
 507 * 2) The IB SGE addresses and lengths are 32-bit aligned
 508 *    (except possibly the last SGE's length)
 509 * 3) The SGE addresses are suitable for passing to dma_map_single().
 510 */
 511int qib_sdma_verbs_send(struct qib_pportdata *ppd,
 512                        struct rvt_sge_state *ss, u32 dwords,
 513                        struct qib_verbs_txreq *tx)
 514{
 515        unsigned long flags;
 516        struct rvt_sge *sge;
 517        struct rvt_qp *qp;
 518        int ret = 0;
 519        u16 tail;
 520        __le64 *descqp;
 521        u64 sdmadesc[2];
 522        u32 dwoffset;
 523        dma_addr_t addr;
 524        struct qib_qp_priv *priv;
 525
 526        spin_lock_irqsave(&ppd->sdma_lock, flags);
 527
 528retry:
 529        if (unlikely(!__qib_sdma_running(ppd))) {
 530                complete_sdma_err_req(ppd, tx);
 531                goto unlock;
 532        }
 533
 534        if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
 535                if (qib_sdma_make_progress(ppd))
 536                        goto retry;
 537                if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
 538                        ppd->dd->f_sdma_set_desc_cnt(ppd,
 539                                        ppd->sdma_descq_cnt / 2);
 540                goto busy;
 541        }
 542
 543        dwoffset = tx->hdr_dwords;
 544        make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
 545
 546        sdmadesc[0] |= SDMA_DESC_FIRST;
 547        if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
 548                sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
 549
 550        /* write to the descq */
 551        tail = ppd->sdma_descq_tail;
 552        descqp = &ppd->sdma_descq[tail].qw[0];
 553        *descqp++ = cpu_to_le64(sdmadesc[0]);
 554        *descqp++ = cpu_to_le64(sdmadesc[1]);
 555
 556        /* increment the tail */
 557        if (++tail == ppd->sdma_descq_cnt) {
 558                tail = 0;
 559                descqp = &ppd->sdma_descq[0].qw[0];
 560                ++ppd->sdma_generation;
 561        }
 562
 563        tx->txreq.start_idx = tail;
 564
 565        sge = &ss->sge;
 566        while (dwords) {
 567                u32 dw;
 568                u32 len = rvt_get_sge_length(sge, dwords << 2);
 569
 570                dw = (len + 3) >> 2;
 571                addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
 572                                      dw << 2, DMA_TO_DEVICE);
 573                if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
 574                        ret = -ENOMEM;
 575                        goto unmap;
 576                }
 577                sdmadesc[0] = 0;
 578                make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
 579                /* SDmaUseLargeBuf has to be set in every descriptor */
 580                if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
 581                        sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
 582                /* write to the descq */
 583                *descqp++ = cpu_to_le64(sdmadesc[0]);
 584                *descqp++ = cpu_to_le64(sdmadesc[1]);
 585
 586                /* increment the tail */
 587                if (++tail == ppd->sdma_descq_cnt) {
 588                        tail = 0;
 589                        descqp = &ppd->sdma_descq[0].qw[0];
 590                        ++ppd->sdma_generation;
 591                }
 592                rvt_update_sge(ss, len, false);
 593                dwoffset += dw;
 594                dwords -= dw;
 595        }
 596
 597        if (!tail)
 598                descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
 599        descqp -= 2;
 600        descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
 601        if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
 602                descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
 603        if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
 604                descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
 605        priv = tx->qp->priv;
 606        atomic_inc(&priv->s_dma_busy);
 607        tx->txreq.next_descq_idx = tail;
 608        ppd->dd->f_sdma_update_tail(ppd, tail);
 609        ppd->sdma_descq_added += tx->txreq.sg_count;
 610        list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
 611        goto unlock;
 612
 613unmap:
 614        for (;;) {
 615                if (!tail)
 616                        tail = ppd->sdma_descq_cnt - 1;
 617                else
 618                        tail--;
 619                if (tail == ppd->sdma_descq_tail)
 620                        break;
 621                unmap_desc(ppd, tail);
 622        }
 623        qp = tx->qp;
 624        priv = qp->priv;
 625        qib_put_txreq(tx);
 626        spin_lock(&qp->r_lock);
 627        spin_lock(&qp->s_lock);
 628        if (qp->ibqp.qp_type == IB_QPT_RC) {
 629                /* XXX what about error sending RDMA read responses? */
 630                if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
 631                        rvt_error_qp(qp, IB_WC_GENERAL_ERR);
 632        } else if (qp->s_wqe)
 633                rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
 634        spin_unlock(&qp->s_lock);
 635        spin_unlock(&qp->r_lock);
 636        /* return zero to process the next send work request */
 637        goto unlock;
 638
 639busy:
 640        qp = tx->qp;
 641        priv = qp->priv;
 642        spin_lock(&qp->s_lock);
 643        if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
 644                struct qib_ibdev *dev;
 645
 646                /*
 647                 * If we couldn't queue the DMA request, save the info
 648                 * and try again later rather than destroying the
 649                 * buffer and undoing the side effects of the copy.
 650                 */
 651                tx->ss = ss;
 652                tx->dwords = dwords;
 653                priv->s_tx = tx;
 654                dev = &ppd->dd->verbs_dev;
 655                spin_lock(&dev->rdi.pending_lock);
 656                if (list_empty(&priv->iowait)) {
 657                        struct qib_ibport *ibp;
 658
 659                        ibp = &ppd->ibport_data;
 660                        ibp->rvp.n_dmawait++;
 661                        qp->s_flags |= RVT_S_WAIT_DMA_DESC;
 662                        list_add_tail(&priv->iowait, &dev->dmawait);
 663                }
 664                spin_unlock(&dev->rdi.pending_lock);
 665                qp->s_flags &= ~RVT_S_BUSY;
 666                spin_unlock(&qp->s_lock);
 667                ret = -EBUSY;
 668        } else {
 669                spin_unlock(&qp->s_lock);
 670                qib_put_txreq(tx);
 671        }
 672unlock:
 673        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
 674        return ret;
 675}
 676
 677/*
 678 * sdma_lock should be acquired before calling this routine
 679 */
 680void dump_sdma_state(struct qib_pportdata *ppd)
 681{
 682        struct qib_sdma_desc *descq;
 683        struct qib_sdma_txreq *txp, *txpnext;
 684        __le64 *descqp;
 685        u64 desc[2];
 686        u64 addr;
 687        u16 gen, dwlen, dwoffset;
 688        u16 head, tail, cnt;
 689
 690        head = ppd->sdma_descq_head;
 691        tail = ppd->sdma_descq_tail;
 692        cnt = qib_sdma_descq_freecnt(ppd);
 693        descq = ppd->sdma_descq;
 694
 695        qib_dev_porterr(ppd->dd, ppd->port,
 696                "SDMA ppd->sdma_descq_head: %u\n", head);
 697        qib_dev_porterr(ppd->dd, ppd->port,
 698                "SDMA ppd->sdma_descq_tail: %u\n", tail);
 699        qib_dev_porterr(ppd->dd, ppd->port,
 700                "SDMA sdma_descq_freecnt: %u\n", cnt);
 701
 702        /* print info for each entry in the descriptor queue */
 703        while (head != tail) {
 704                char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
 705
 706                descqp = &descq[head].qw[0];
 707                desc[0] = le64_to_cpu(descqp[0]);
 708                desc[1] = le64_to_cpu(descqp[1]);
 709                flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
 710                flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
 711                flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
 712                flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
 713                flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
 714                addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
 715                gen = (desc[0] >> 30) & 3ULL;
 716                dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
 717                dwoffset = (desc[0] & 0x7ffULL) << 2;
 718                qib_dev_porterr(ppd->dd, ppd->port,
 719                        "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
 720                         head, flags, addr, gen, dwlen, dwoffset);
 721                if (++head == ppd->sdma_descq_cnt)
 722                        head = 0;
 723        }
 724
 725        /* print dma descriptor indices from the TX requests */
 726        list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
 727                                 list)
 728                qib_dev_porterr(ppd->dd, ppd->port,
 729                        "SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
 730                        txp->start_idx, txp->next_descq_idx);
 731}
 732
 733void qib_sdma_process_event(struct qib_pportdata *ppd,
 734        enum qib_sdma_events event)
 735{
 736        unsigned long flags;
 737
 738        spin_lock_irqsave(&ppd->sdma_lock, flags);
 739
 740        __qib_sdma_process_event(ppd, event);
 741
 742        if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
 743                qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
 744
 745        spin_unlock_irqrestore(&ppd->sdma_lock, flags);
 746}
 747
 748void __qib_sdma_process_event(struct qib_pportdata *ppd,
 749        enum qib_sdma_events event)
 750{
 751        struct qib_sdma_state *ss = &ppd->sdma_state;
 752
 753        switch (ss->current_state) {
 754        case qib_sdma_state_s00_hw_down:
 755                switch (event) {
 756                case qib_sdma_event_e00_go_hw_down:
 757                        break;
 758                case qib_sdma_event_e30_go_running:
 759                        /*
 760                         * If down, but running requested (usually result
 761                         * of link up, then we need to start up.
 762                         * This can happen when hw down is requested while
 763                         * bringing the link up with traffic active on
 764                         * 7220, e.g. */
 765                        ss->go_s99_running = 1;
 766                        /* fall through -- and start dma engine */
 767                case qib_sdma_event_e10_go_hw_start:
 768                        /* This reference means the state machine is started */
 769                        sdma_get(&ppd->sdma_state);
 770                        sdma_set_state(ppd,
 771                                       qib_sdma_state_s10_hw_start_up_wait);
 772                        break;
 773                case qib_sdma_event_e20_hw_started:
 774                        break;
 775                case qib_sdma_event_e40_sw_cleaned:
 776                        sdma_sw_tear_down(ppd);
 777                        break;
 778                case qib_sdma_event_e50_hw_cleaned:
 779                        break;
 780                case qib_sdma_event_e60_hw_halted:
 781                        break;
 782                case qib_sdma_event_e70_go_idle:
 783                        break;
 784                case qib_sdma_event_e7220_err_halted:
 785                        break;
 786                case qib_sdma_event_e7322_err_halted:
 787                        break;
 788                case qib_sdma_event_e90_timer_tick:
 789                        break;
 790                }
 791                break;
 792
 793        case qib_sdma_state_s10_hw_start_up_wait:
 794                switch (event) {
 795                case qib_sdma_event_e00_go_hw_down:
 796                        sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
 797                        sdma_sw_tear_down(ppd);
 798                        break;
 799                case qib_sdma_event_e10_go_hw_start:
 800                        break;
 801                case qib_sdma_event_e20_hw_started:
 802                        sdma_set_state(ppd, ss->go_s99_running ?
 803                                       qib_sdma_state_s99_running :
 804                                       qib_sdma_state_s20_idle);
 805                        break;
 806                case qib_sdma_event_e30_go_running:
 807                        ss->go_s99_running = 1;
 808                        break;
 809                case qib_sdma_event_e40_sw_cleaned:
 810                        break;
 811                case qib_sdma_event_e50_hw_cleaned:
 812                        break;
 813                case qib_sdma_event_e60_hw_halted:
 814                        break;
 815                case qib_sdma_event_e70_go_idle:
 816                        ss->go_s99_running = 0;
 817                        break;
 818                case qib_sdma_event_e7220_err_halted:
 819                        break;
 820                case qib_sdma_event_e7322_err_halted:
 821                        break;
 822                case qib_sdma_event_e90_timer_tick:
 823                        break;
 824                }
 825                break;
 826
 827        case qib_sdma_state_s20_idle:
 828                switch (event) {
 829                case qib_sdma_event_e00_go_hw_down:
 830                        sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
 831                        sdma_sw_tear_down(ppd);
 832                        break;
 833                case qib_sdma_event_e10_go_hw_start:
 834                        break;
 835                case qib_sdma_event_e20_hw_started:
 836                        break;
 837                case qib_sdma_event_e30_go_running:
 838                        sdma_set_state(ppd, qib_sdma_state_s99_running);
 839                        ss->go_s99_running = 1;
 840                        break;
 841                case qib_sdma_event_e40_sw_cleaned:
 842                        break;
 843                case qib_sdma_event_e50_hw_cleaned:
 844                        break;
 845                case qib_sdma_event_e60_hw_halted:
 846                        break;
 847                case qib_sdma_event_e70_go_idle:
 848                        break;
 849                case qib_sdma_event_e7220_err_halted:
 850                        break;
 851                case qib_sdma_event_e7322_err_halted:
 852                        break;
 853                case qib_sdma_event_e90_timer_tick:
 854                        break;
 855                }
 856                break;
 857
 858        case qib_sdma_state_s30_sw_clean_up_wait:
 859                switch (event) {
 860                case qib_sdma_event_e00_go_hw_down:
 861                        sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
 862                        break;
 863                case qib_sdma_event_e10_go_hw_start:
 864                        break;
 865                case qib_sdma_event_e20_hw_started:
 866                        break;
 867                case qib_sdma_event_e30_go_running:
 868                        ss->go_s99_running = 1;
 869                        break;
 870                case qib_sdma_event_e40_sw_cleaned:
 871                        sdma_set_state(ppd,
 872                                       qib_sdma_state_s10_hw_start_up_wait);
 873                        sdma_hw_start_up(ppd);
 874                        break;
 875                case qib_sdma_event_e50_hw_cleaned:
 876                        break;
 877                case qib_sdma_event_e60_hw_halted:
 878                        break;
 879                case qib_sdma_event_e70_go_idle:
 880                        ss->go_s99_running = 0;
 881                        break;
 882                case qib_sdma_event_e7220_err_halted:
 883                        break;
 884                case qib_sdma_event_e7322_err_halted:
 885                        break;
 886                case qib_sdma_event_e90_timer_tick:
 887                        break;
 888                }
 889                break;
 890
 891        case qib_sdma_state_s40_hw_clean_up_wait:
 892                switch (event) {
 893                case qib_sdma_event_e00_go_hw_down:
 894                        sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
 895                        sdma_start_sw_clean_up(ppd);
 896                        break;
 897                case qib_sdma_event_e10_go_hw_start:
 898                        break;
 899                case qib_sdma_event_e20_hw_started:
 900                        break;
 901                case qib_sdma_event_e30_go_running:
 902                        ss->go_s99_running = 1;
 903                        break;
 904                case qib_sdma_event_e40_sw_cleaned:
 905                        break;
 906                case qib_sdma_event_e50_hw_cleaned:
 907                        sdma_set_state(ppd,
 908                                       qib_sdma_state_s30_sw_clean_up_wait);
 909                        sdma_start_sw_clean_up(ppd);
 910                        break;
 911                case qib_sdma_event_e60_hw_halted:
 912                        break;
 913                case qib_sdma_event_e70_go_idle:
 914                        ss->go_s99_running = 0;
 915                        break;
 916                case qib_sdma_event_e7220_err_halted:
 917                        break;
 918                case qib_sdma_event_e7322_err_halted:
 919                        break;
 920                case qib_sdma_event_e90_timer_tick:
 921                        break;
 922                }
 923                break;
 924
 925        case qib_sdma_state_s50_hw_halt_wait:
 926                switch (event) {
 927                case qib_sdma_event_e00_go_hw_down:
 928                        sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
 929                        sdma_start_sw_clean_up(ppd);
 930                        break;
 931                case qib_sdma_event_e10_go_hw_start:
 932                        break;
 933                case qib_sdma_event_e20_hw_started:
 934                        break;
 935                case qib_sdma_event_e30_go_running:
 936                        ss->go_s99_running = 1;
 937                        break;
 938                case qib_sdma_event_e40_sw_cleaned:
 939                        break;
 940                case qib_sdma_event_e50_hw_cleaned:
 941                        break;
 942                case qib_sdma_event_e60_hw_halted:
 943                        sdma_set_state(ppd,
 944                                       qib_sdma_state_s40_hw_clean_up_wait);
 945                        ppd->dd->f_sdma_hw_clean_up(ppd);
 946                        break;
 947                case qib_sdma_event_e70_go_idle:
 948                        ss->go_s99_running = 0;
 949                        break;
 950                case qib_sdma_event_e7220_err_halted:
 951                        break;
 952                case qib_sdma_event_e7322_err_halted:
 953                        break;
 954                case qib_sdma_event_e90_timer_tick:
 955                        break;
 956                }
 957                break;
 958
 959        case qib_sdma_state_s99_running:
 960                switch (event) {
 961                case qib_sdma_event_e00_go_hw_down:
 962                        sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
 963                        sdma_start_sw_clean_up(ppd);
 964                        break;
 965                case qib_sdma_event_e10_go_hw_start:
 966                        break;
 967                case qib_sdma_event_e20_hw_started:
 968                        break;
 969                case qib_sdma_event_e30_go_running:
 970                        break;
 971                case qib_sdma_event_e40_sw_cleaned:
 972                        break;
 973                case qib_sdma_event_e50_hw_cleaned:
 974                        break;
 975                case qib_sdma_event_e60_hw_halted:
 976                        sdma_set_state(ppd,
 977                                       qib_sdma_state_s30_sw_clean_up_wait);
 978                        sdma_start_sw_clean_up(ppd);
 979                        break;
 980                case qib_sdma_event_e70_go_idle:
 981                        sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
 982                        ss->go_s99_running = 0;
 983                        break;
 984                case qib_sdma_event_e7220_err_halted:
 985                        sdma_set_state(ppd,
 986                                       qib_sdma_state_s30_sw_clean_up_wait);
 987                        sdma_start_sw_clean_up(ppd);
 988                        break;
 989                case qib_sdma_event_e7322_err_halted:
 990                        sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
 991                        break;
 992                case qib_sdma_event_e90_timer_tick:
 993                        break;
 994                }
 995                break;
 996        }
 997
 998        ss->last_event = event;
 999}
1000