linux/drivers/infiniband/hw/qib/qib_qp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
   3 * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
   4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/err.h>
  36#include <linux/vmalloc.h>
  37#include <rdma/rdma_vt.h>
  38#ifdef CONFIG_DEBUG_FS
  39#include <linux/seq_file.h>
  40#endif
  41
  42#include "qib.h"
  43
  44/*
  45 * mask field which was present in now deleted qib_qpn_table
  46 * is not present in rvt_qpn_table. Defining the same field
  47 * as qpt_mask here instead of adding the mask field to
  48 * rvt_qpn_table.
  49 */
  50u16 qpt_mask;
  51
  52static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
  53                              struct rvt_qpn_map *map, unsigned off)
  54{
  55        return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
  56}
  57
  58static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
  59                                        struct rvt_qpn_map *map, unsigned off,
  60                                        unsigned n)
  61{
  62        if (qpt_mask) {
  63                off++;
  64                if (((off & qpt_mask) >> 1) >= n)
  65                        off = (off | qpt_mask) + 2;
  66        } else {
  67                off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
  68        }
  69        return off;
  70}
  71
  72/*
  73 * Convert the AETH credit code into the number of credits.
  74 */
  75static u32 credit_table[31] = {
  76        0,                      /* 0 */
  77        1,                      /* 1 */
  78        2,                      /* 2 */
  79        3,                      /* 3 */
  80        4,                      /* 4 */
  81        6,                      /* 5 */
  82        8,                      /* 6 */
  83        12,                     /* 7 */
  84        16,                     /* 8 */
  85        24,                     /* 9 */
  86        32,                     /* A */
  87        48,                     /* B */
  88        64,                     /* C */
  89        96,                     /* D */
  90        128,                    /* E */
  91        192,                    /* F */
  92        256,                    /* 10 */
  93        384,                    /* 11 */
  94        512,                    /* 12 */
  95        768,                    /* 13 */
  96        1024,                   /* 14 */
  97        1536,                   /* 15 */
  98        2048,                   /* 16 */
  99        3072,                   /* 17 */
 100        4096,                   /* 18 */
 101        6144,                   /* 19 */
 102        8192,                   /* 1A */
 103        12288,                  /* 1B */
 104        16384,                  /* 1C */
 105        24576,                  /* 1D */
 106        32768                   /* 1E */
 107};
 108
 109static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
 110                         gfp_t gfp)
 111{
 112        unsigned long page = get_zeroed_page(gfp);
 113
 114        /*
 115         * Free the page if someone raced with us installing it.
 116         */
 117
 118        spin_lock(&qpt->lock);
 119        if (map->page)
 120                free_page(page);
 121        else
 122                map->page = (void *)page;
 123        spin_unlock(&qpt->lock);
 124}
 125
 126/*
 127 * Allocate the next available QPN or
 128 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
 129 */
 130int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
 131                  enum ib_qp_type type, u8 port, gfp_t gfp)
 132{
 133        u32 i, offset, max_scan, qpn;
 134        struct rvt_qpn_map *map;
 135        u32 ret;
 136        struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
 137        struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
 138                                              verbs_dev);
 139
 140        if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
 141                unsigned n;
 142
 143                ret = type == IB_QPT_GSI;
 144                n = 1 << (ret + 2 * (port - 1));
 145                spin_lock(&qpt->lock);
 146                if (qpt->flags & n)
 147                        ret = -EINVAL;
 148                else
 149                        qpt->flags |= n;
 150                spin_unlock(&qpt->lock);
 151                goto bail;
 152        }
 153
 154        qpn = qpt->last + 2;
 155        if (qpn >= RVT_QPN_MAX)
 156                qpn = 2;
 157        if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
 158                qpn = (qpn | qpt_mask) + 2;
 159        offset = qpn & RVT_BITS_PER_PAGE_MASK;
 160        map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
 161        max_scan = qpt->nmaps - !offset;
 162        for (i = 0;;) {
 163                if (unlikely(!map->page)) {
 164                        get_map_page(qpt, map, gfp);
 165                        if (unlikely(!map->page))
 166                                break;
 167                }
 168                do {
 169                        if (!test_and_set_bit(offset, map->page)) {
 170                                qpt->last = qpn;
 171                                ret = qpn;
 172                                goto bail;
 173                        }
 174                        offset = find_next_offset(qpt, map, offset,
 175                                dd->n_krcv_queues);
 176                        qpn = mk_qpn(qpt, map, offset);
 177                        /*
 178                         * This test differs from alloc_pidmap().
 179                         * If find_next_offset() does find a zero
 180                         * bit, we don't need to check for QPN
 181                         * wrapping around past our starting QPN.
 182                         * We just need to be sure we don't loop
 183                         * forever.
 184                         */
 185                } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
 186                /*
 187                 * In order to keep the number of pages allocated to a
 188                 * minimum, we scan the all existing pages before increasing
 189                 * the size of the bitmap table.
 190                 */
 191                if (++i > max_scan) {
 192                        if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
 193                                break;
 194                        map = &qpt->map[qpt->nmaps++];
 195                        offset = 0;
 196                } else if (map < &qpt->map[qpt->nmaps]) {
 197                        ++map;
 198                        offset = 0;
 199                } else {
 200                        map = &qpt->map[0];
 201                        offset = 2;
 202                }
 203                qpn = mk_qpn(qpt, map, offset);
 204        }
 205
 206        ret = -ENOMEM;
 207
 208bail:
 209        return ret;
 210}
 211
 212/**
 213 * qib_free_all_qps - check for QPs still in use
 214 */
 215unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
 216{
 217        struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
 218        struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
 219                                              verbs_dev);
 220        unsigned n, qp_inuse = 0;
 221
 222        for (n = 0; n < dd->num_pports; n++) {
 223                struct qib_ibport *ibp = &dd->pport[n].ibport_data;
 224
 225                rcu_read_lock();
 226                if (rcu_dereference(ibp->rvp.qp[0]))
 227                        qp_inuse++;
 228                if (rcu_dereference(ibp->rvp.qp[1]))
 229                        qp_inuse++;
 230                rcu_read_unlock();
 231        }
 232        return qp_inuse;
 233}
 234
 235void qib_notify_qp_reset(struct rvt_qp *qp)
 236{
 237        struct qib_qp_priv *priv = qp->priv;
 238
 239        atomic_set(&priv->s_dma_busy, 0);
 240}
 241
 242void qib_notify_error_qp(struct rvt_qp *qp)
 243{
 244        struct qib_qp_priv *priv = qp->priv;
 245        struct qib_ibdev *dev = to_idev(qp->ibqp.device);
 246
 247        spin_lock(&dev->rdi.pending_lock);
 248        if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
 249                qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
 250                list_del_init(&priv->iowait);
 251        }
 252        spin_unlock(&dev->rdi.pending_lock);
 253
 254        if (!(qp->s_flags & RVT_S_BUSY)) {
 255                qp->s_hdrwords = 0;
 256                if (qp->s_rdma_mr) {
 257                        rvt_put_mr(qp->s_rdma_mr);
 258                        qp->s_rdma_mr = NULL;
 259                }
 260                if (priv->s_tx) {
 261                        qib_put_txreq(priv->s_tx);
 262                        priv->s_tx = NULL;
 263                }
 264        }
 265}
 266
 267static int mtu_to_enum(u32 mtu)
 268{
 269        int enum_mtu;
 270
 271        switch (mtu) {
 272        case 4096:
 273                enum_mtu = IB_MTU_4096;
 274                break;
 275        case 2048:
 276                enum_mtu = IB_MTU_2048;
 277                break;
 278        case 1024:
 279                enum_mtu = IB_MTU_1024;
 280                break;
 281        case 512:
 282                enum_mtu = IB_MTU_512;
 283                break;
 284        case 256:
 285                enum_mtu = IB_MTU_256;
 286                break;
 287        default:
 288                enum_mtu = IB_MTU_2048;
 289        }
 290        return enum_mtu;
 291}
 292
 293int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
 294                           struct ib_qp_attr *attr)
 295{
 296        int mtu, pmtu, pidx = qp->port_num - 1;
 297        struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
 298        struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
 299                                              verbs_dev);
 300        mtu = ib_mtu_enum_to_int(attr->path_mtu);
 301        if (mtu == -1)
 302                return -EINVAL;
 303
 304        if (mtu > dd->pport[pidx].ibmtu)
 305                pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
 306        else
 307                pmtu = attr->path_mtu;
 308        return pmtu;
 309}
 310
 311int qib_mtu_to_path_mtu(u32 mtu)
 312{
 313        return mtu_to_enum(mtu);
 314}
 315
 316u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
 317{
 318        return ib_mtu_enum_to_int(pmtu);
 319}
 320
 321/**
 322 * qib_compute_aeth - compute the AETH (syndrome + MSN)
 323 * @qp: the queue pair to compute the AETH for
 324 *
 325 * Returns the AETH.
 326 */
 327__be32 qib_compute_aeth(struct rvt_qp *qp)
 328{
 329        u32 aeth = qp->r_msn & QIB_MSN_MASK;
 330
 331        if (qp->ibqp.srq) {
 332                /*
 333                 * Shared receive queues don't generate credits.
 334                 * Set the credit field to the invalid value.
 335                 */
 336                aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
 337        } else {
 338                u32 min, max, x;
 339                u32 credits;
 340                struct rvt_rwq *wq = qp->r_rq.wq;
 341                u32 head;
 342                u32 tail;
 343
 344                /* sanity check pointers before trusting them */
 345                head = wq->head;
 346                if (head >= qp->r_rq.size)
 347                        head = 0;
 348                tail = wq->tail;
 349                if (tail >= qp->r_rq.size)
 350                        tail = 0;
 351                /*
 352                 * Compute the number of credits available (RWQEs).
 353                 * XXX Not holding the r_rq.lock here so there is a small
 354                 * chance that the pair of reads are not atomic.
 355                 */
 356                credits = head - tail;
 357                if ((int)credits < 0)
 358                        credits += qp->r_rq.size;
 359                /*
 360                 * Binary search the credit table to find the code to
 361                 * use.
 362                 */
 363                min = 0;
 364                max = 31;
 365                for (;;) {
 366                        x = (min + max) / 2;
 367                        if (credit_table[x] == credits)
 368                                break;
 369                        if (credit_table[x] > credits)
 370                                max = x;
 371                        else if (min == x)
 372                                break;
 373                        else
 374                                min = x;
 375                }
 376                aeth |= x << QIB_AETH_CREDIT_SHIFT;
 377        }
 378        return cpu_to_be32(aeth);
 379}
 380
 381void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
 382{
 383        struct qib_qp_priv *priv;
 384
 385        priv = kzalloc(sizeof(*priv), gfp);
 386        if (!priv)
 387                return ERR_PTR(-ENOMEM);
 388        priv->owner = qp;
 389
 390        priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
 391        if (!priv->s_hdr) {
 392                kfree(priv);
 393                return ERR_PTR(-ENOMEM);
 394        }
 395        init_waitqueue_head(&priv->wait_dma);
 396        INIT_WORK(&priv->s_work, _qib_do_send);
 397        INIT_LIST_HEAD(&priv->iowait);
 398
 399        return priv;
 400}
 401
 402void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
 403{
 404        struct qib_qp_priv *priv = qp->priv;
 405
 406        kfree(priv->s_hdr);
 407        kfree(priv);
 408}
 409
 410void qib_stop_send_queue(struct rvt_qp *qp)
 411{
 412        struct qib_qp_priv *priv = qp->priv;
 413
 414        cancel_work_sync(&priv->s_work);
 415        del_timer_sync(&qp->s_timer);
 416}
 417
 418void qib_quiesce_qp(struct rvt_qp *qp)
 419{
 420        struct qib_qp_priv *priv = qp->priv;
 421
 422        wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
 423        if (priv->s_tx) {
 424                qib_put_txreq(priv->s_tx);
 425                priv->s_tx = NULL;
 426        }
 427}
 428
 429void qib_flush_qp_waiters(struct rvt_qp *qp)
 430{
 431        struct qib_qp_priv *priv = qp->priv;
 432        struct qib_ibdev *dev = to_idev(qp->ibqp.device);
 433
 434        spin_lock(&dev->rdi.pending_lock);
 435        if (!list_empty(&priv->iowait))
 436                list_del_init(&priv->iowait);
 437        spin_unlock(&dev->rdi.pending_lock);
 438}
 439
 440/**
 441 * qib_get_credit - flush the send work queue of a QP
 442 * @qp: the qp who's send work queue to flush
 443 * @aeth: the Acknowledge Extended Transport Header
 444 *
 445 * The QP s_lock should be held.
 446 */
 447void qib_get_credit(struct rvt_qp *qp, u32 aeth)
 448{
 449        u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
 450
 451        /*
 452         * If the credit is invalid, we can send
 453         * as many packets as we like.  Otherwise, we have to
 454         * honor the credit field.
 455         */
 456        if (credit == QIB_AETH_CREDIT_INVAL) {
 457                if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
 458                        qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
 459                        if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
 460                                qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
 461                                qib_schedule_send(qp);
 462                        }
 463                }
 464        } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
 465                /* Compute new LSN (i.e., MSN + credit) */
 466                credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
 467                if (qib_cmp24(credit, qp->s_lsn) > 0) {
 468                        qp->s_lsn = credit;
 469                        if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
 470                                qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
 471                                qib_schedule_send(qp);
 472                        }
 473                }
 474        }
 475}
 476
 477/**
 478 * qib_check_send_wqe - validate wr/wqe
 479 * @qp - The qp
 480 * @wqe - The built wqe
 481 *
 482 * validate wr/wqe.  This is called
 483 * prior to inserting the wqe into
 484 * the ring but after the wqe has been
 485 * setup.
 486 *
 487 * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
 488 */
 489int qib_check_send_wqe(struct rvt_qp *qp,
 490                       struct rvt_swqe *wqe)
 491{
 492        struct rvt_ah *ah;
 493        int ret = 0;
 494
 495        switch (qp->ibqp.qp_type) {
 496        case IB_QPT_RC:
 497        case IB_QPT_UC:
 498                if (wqe->length > 0x80000000U)
 499                        return -EINVAL;
 500                break;
 501        case IB_QPT_SMI:
 502        case IB_QPT_GSI:
 503        case IB_QPT_UD:
 504                ah = ibah_to_rvtah(wqe->ud_wr.ah);
 505                if (wqe->length > (1 << ah->log_pmtu))
 506                        return -EINVAL;
 507                /* progress hint */
 508                ret = 1;
 509                break;
 510        default:
 511                break;
 512        }
 513        return ret;
 514}
 515
 516#ifdef CONFIG_DEBUG_FS
 517
 518struct qib_qp_iter {
 519        struct qib_ibdev *dev;
 520        struct rvt_qp *qp;
 521        int n;
 522};
 523
 524struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
 525{
 526        struct qib_qp_iter *iter;
 527
 528        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 529        if (!iter)
 530                return NULL;
 531
 532        iter->dev = dev;
 533        if (qib_qp_iter_next(iter)) {
 534                kfree(iter);
 535                return NULL;
 536        }
 537
 538        return iter;
 539}
 540
 541int qib_qp_iter_next(struct qib_qp_iter *iter)
 542{
 543        struct qib_ibdev *dev = iter->dev;
 544        int n = iter->n;
 545        int ret = 1;
 546        struct rvt_qp *pqp = iter->qp;
 547        struct rvt_qp *qp;
 548
 549        for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
 550                if (pqp)
 551                        qp = rcu_dereference(pqp->next);
 552                else
 553                        qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
 554                pqp = qp;
 555                if (qp) {
 556                        iter->qp = qp;
 557                        iter->n = n;
 558                        return 0;
 559                }
 560        }
 561        return ret;
 562}
 563
 564static const char * const qp_type_str[] = {
 565        "SMI", "GSI", "RC", "UC", "UD",
 566};
 567
 568void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
 569{
 570        struct rvt_swqe *wqe;
 571        struct rvt_qp *qp = iter->qp;
 572        struct qib_qp_priv *priv = qp->priv;
 573
 574        wqe = rvt_get_swqe_ptr(qp, qp->s_last);
 575        seq_printf(s,
 576                   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
 577                   iter->n,
 578                   qp->ibqp.qp_num,
 579                   qp_type_str[qp->ibqp.qp_type],
 580                   qp->state,
 581                   wqe->wr.opcode,
 582                   qp->s_hdrwords,
 583                   qp->s_flags,
 584                   atomic_read(&priv->s_dma_busy),
 585                   !list_empty(&priv->iowait),
 586                   qp->timeout,
 587                   wqe->ssn,
 588                   qp->s_lsn,
 589                   qp->s_last_psn,
 590                   qp->s_psn, qp->s_next_psn,
 591                   qp->s_sending_psn, qp->s_sending_hpsn,
 592                   qp->s_last, qp->s_acked, qp->s_cur,
 593                   qp->s_tail, qp->s_head, qp->s_size,
 594                   qp->remote_qpn,
 595                   qp->remote_ah_attr.dlid);
 596}
 597
 598#endif
 599