linux/drivers/infiniband/hw/hns/hns_roce_qp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Hisilicon Limited.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/pci.h>
  35#include <linux/platform_device.h>
  36#include <rdma/ib_addr.h>
  37#include <rdma/ib_umem.h>
  38#include <rdma/uverbs_ioctl.h>
  39#include "hns_roce_common.h"
  40#include "hns_roce_device.h"
  41#include "hns_roce_hem.h"
  42#include <rdma/hns-abi.h>
  43
  44#define SQP_NUM                         (2 * HNS_ROCE_MAX_PORTS)
  45
  46static void flush_work_handle(struct work_struct *work)
  47{
  48        struct hns_roce_work *flush_work = container_of(work,
  49                                        struct hns_roce_work, work);
  50        struct hns_roce_qp *hr_qp = container_of(flush_work,
  51                                        struct hns_roce_qp, flush_work);
  52        struct device *dev = flush_work->hr_dev->dev;
  53        struct ib_qp_attr attr;
  54        int attr_mask;
  55        int ret;
  56
  57        attr_mask = IB_QP_STATE;
  58        attr.qp_state = IB_QPS_ERR;
  59
  60        if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
  61                ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
  62                if (ret)
  63                        dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
  64                                ret);
  65        }
  66
  67        /*
  68         * make sure we signal QP destroy leg that flush QP was completed
  69         * so that it can safely proceed ahead now and destroy QP
  70         */
  71        if (atomic_dec_and_test(&hr_qp->refcount))
  72                complete(&hr_qp->free);
  73}
  74
  75void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
  76{
  77        struct hns_roce_work *flush_work = &hr_qp->flush_work;
  78
  79        flush_work->hr_dev = hr_dev;
  80        INIT_WORK(&flush_work->work, flush_work_handle);
  81        atomic_inc(&hr_qp->refcount);
  82        queue_work(hr_dev->irq_workq, &flush_work->work);
  83}
  84
  85void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
  86{
  87        struct device *dev = hr_dev->dev;
  88        struct hns_roce_qp *qp;
  89
  90        xa_lock(&hr_dev->qp_table_xa);
  91        qp = __hns_roce_qp_lookup(hr_dev, qpn);
  92        if (qp)
  93                atomic_inc(&qp->refcount);
  94        xa_unlock(&hr_dev->qp_table_xa);
  95
  96        if (!qp) {
  97                dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
  98                return;
  99        }
 100
 101        if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
 102            (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
 103             event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
 104             event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
 105                qp->state = IB_QPS_ERR;
 106                if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
 107                        init_flush_work(hr_dev, qp);
 108        }
 109
 110        qp->event(qp, (enum hns_roce_event)event_type);
 111
 112        if (atomic_dec_and_test(&qp->refcount))
 113                complete(&qp->free);
 114}
 115
 116static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
 117                                 enum hns_roce_event type)
 118{
 119        struct ib_event event;
 120        struct ib_qp *ibqp = &hr_qp->ibqp;
 121
 122        if (ibqp->event_handler) {
 123                event.device = ibqp->device;
 124                event.element.qp = ibqp;
 125                switch (type) {
 126                case HNS_ROCE_EVENT_TYPE_PATH_MIG:
 127                        event.event = IB_EVENT_PATH_MIG;
 128                        break;
 129                case HNS_ROCE_EVENT_TYPE_COMM_EST:
 130                        event.event = IB_EVENT_COMM_EST;
 131                        break;
 132                case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
 133                        event.event = IB_EVENT_SQ_DRAINED;
 134                        break;
 135                case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
 136                        event.event = IB_EVENT_QP_LAST_WQE_REACHED;
 137                        break;
 138                case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
 139                        event.event = IB_EVENT_QP_FATAL;
 140                        break;
 141                case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
 142                        event.event = IB_EVENT_PATH_MIG_ERR;
 143                        break;
 144                case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
 145                        event.event = IB_EVENT_QP_REQ_ERR;
 146                        break;
 147                case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
 148                        event.event = IB_EVENT_QP_ACCESS_ERR;
 149                        break;
 150                default:
 151                        dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
 152                                type, hr_qp->qpn);
 153                        return;
 154                }
 155                ibqp->event_handler(&event, ibqp->qp_context);
 156        }
 157}
 158
 159static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 160{
 161        unsigned long num = 0;
 162        int ret;
 163
 164        if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
 165                /* when hw version is v1, the sqpn is allocated */
 166                if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
 167                        num = HNS_ROCE_MAX_PORTS +
 168                              hr_dev->iboe.phy_port[hr_qp->port];
 169                else
 170                        num = 1;
 171
 172                hr_qp->doorbell_qpn = 1;
 173        } else {
 174                ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap,
 175                                                  1, 1, &num);
 176                if (ret) {
 177                        ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n");
 178                        return -ENOMEM;
 179                }
 180
 181                hr_qp->doorbell_qpn = (u32)num;
 182        }
 183
 184        hr_qp->qpn = num;
 185
 186        return 0;
 187}
 188
 189enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
 190{
 191        switch (state) {
 192        case IB_QPS_RESET:
 193                return HNS_ROCE_QP_STATE_RST;
 194        case IB_QPS_INIT:
 195                return HNS_ROCE_QP_STATE_INIT;
 196        case IB_QPS_RTR:
 197                return HNS_ROCE_QP_STATE_RTR;
 198        case IB_QPS_RTS:
 199                return HNS_ROCE_QP_STATE_RTS;
 200        case IB_QPS_SQD:
 201                return HNS_ROCE_QP_STATE_SQD;
 202        case IB_QPS_ERR:
 203                return HNS_ROCE_QP_STATE_ERR;
 204        default:
 205                return HNS_ROCE_QP_NUM_STATE;
 206        }
 207}
 208
 209static void add_qp_to_list(struct hns_roce_dev *hr_dev,
 210                           struct hns_roce_qp *hr_qp,
 211                           struct ib_cq *send_cq, struct ib_cq *recv_cq)
 212{
 213        struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
 214        unsigned long flags;
 215
 216        hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
 217        hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
 218
 219        spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
 220        hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
 221
 222        list_add_tail(&hr_qp->node, &hr_dev->qp_list);
 223        if (hr_send_cq)
 224                list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
 225        if (hr_recv_cq)
 226                list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
 227
 228        hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
 229        spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
 230}
 231
 232static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
 233                             struct hns_roce_qp *hr_qp,
 234                             struct ib_qp_init_attr *init_attr)
 235{
 236        struct xarray *xa = &hr_dev->qp_table_xa;
 237        int ret;
 238
 239        if (!hr_qp->qpn)
 240                return -EINVAL;
 241
 242        ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
 243        if (ret)
 244                dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
 245        else
 246                /* add QP to device's QP list for softwc */
 247                add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
 248                               init_attr->recv_cq);
 249
 250        return ret;
 251}
 252
 253static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 254{
 255        struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
 256        struct device *dev = hr_dev->dev;
 257        int ret;
 258
 259        if (!hr_qp->qpn)
 260                return -EINVAL;
 261
 262        /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
 263        if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
 264            hr_dev->hw_rev == HNS_ROCE_HW_VER1)
 265                return 0;
 266
 267        /* Alloc memory for QPC */
 268        ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
 269        if (ret) {
 270                dev_err(dev, "Failed to get QPC table\n");
 271                goto err_out;
 272        }
 273
 274        /* Alloc memory for IRRL */
 275        ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
 276        if (ret) {
 277                dev_err(dev, "Failed to get IRRL table\n");
 278                goto err_put_qp;
 279        }
 280
 281        if (hr_dev->caps.trrl_entry_sz) {
 282                /* Alloc memory for TRRL */
 283                ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
 284                                         hr_qp->qpn);
 285                if (ret) {
 286                        dev_err(dev, "Failed to get TRRL table\n");
 287                        goto err_put_irrl;
 288                }
 289        }
 290
 291        if (hr_dev->caps.sccc_entry_sz) {
 292                /* Alloc memory for SCC CTX */
 293                ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
 294                                         hr_qp->qpn);
 295                if (ret) {
 296                        dev_err(dev, "Failed to get SCC CTX table\n");
 297                        goto err_put_trrl;
 298                }
 299        }
 300
 301        return 0;
 302
 303err_put_trrl:
 304        if (hr_dev->caps.trrl_entry_sz)
 305                hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
 306
 307err_put_irrl:
 308        hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
 309
 310err_put_qp:
 311        hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
 312
 313err_out:
 314        return ret;
 315}
 316
 317void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 318{
 319        struct xarray *xa = &hr_dev->qp_table_xa;
 320        unsigned long flags;
 321
 322        list_del(&hr_qp->node);
 323        list_del(&hr_qp->sq_node);
 324        list_del(&hr_qp->rq_node);
 325
 326        xa_lock_irqsave(xa, flags);
 327        __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
 328        xa_unlock_irqrestore(xa, flags);
 329}
 330
 331static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 332{
 333        struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
 334
 335        /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
 336        if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
 337            hr_dev->hw_rev == HNS_ROCE_HW_VER1)
 338                return;
 339
 340        if (hr_dev->caps.trrl_entry_sz)
 341                hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
 342        hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
 343}
 344
 345static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 346{
 347        struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
 348
 349        if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
 350                return;
 351
 352        if (hr_qp->qpn < hr_dev->caps.reserved_qps)
 353                return;
 354
 355        hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
 356}
 357
 358static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
 359                       struct hns_roce_qp *hr_qp, int has_rq)
 360{
 361        u32 cnt;
 362
 363        /* If srq exist, set zero for relative number of rq */
 364        if (!has_rq) {
 365                hr_qp->rq.wqe_cnt = 0;
 366                hr_qp->rq.max_gs = 0;
 367                hr_qp->rq_inl_buf.wqe_cnt = 0;
 368                cap->max_recv_wr = 0;
 369                cap->max_recv_sge = 0;
 370
 371                return 0;
 372        }
 373
 374        /* Check the validity of QP support capacity */
 375        if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
 376            cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
 377                ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
 378                          cap->max_recv_wr, cap->max_recv_sge);
 379                return -EINVAL;
 380        }
 381
 382        cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
 383        if (cnt > hr_dev->caps.max_wqes) {
 384                ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
 385                          cap->max_recv_wr);
 386                return -EINVAL;
 387        }
 388
 389        hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
 390
 391        if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
 392                hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
 393        else
 394                hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
 395                                            hr_qp->rq.max_gs);
 396
 397        hr_qp->rq.wqe_cnt = cnt;
 398        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
 399                hr_qp->rq_inl_buf.wqe_cnt = cnt;
 400        else
 401                hr_qp->rq_inl_buf.wqe_cnt = 0;
 402
 403        cap->max_recv_wr = cnt;
 404        cap->max_recv_sge = hr_qp->rq.max_gs;
 405
 406        return 0;
 407}
 408
 409static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
 410                                struct hns_roce_qp *hr_qp,
 411                                struct ib_qp_cap *cap)
 412{
 413        u32 cnt;
 414
 415        cnt = max(1U, cap->max_send_sge);
 416        if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
 417                hr_qp->sq.max_gs = roundup_pow_of_two(cnt);
 418                hr_qp->sge.sge_cnt = 0;
 419
 420                return 0;
 421        }
 422
 423        hr_qp->sq.max_gs = cnt;
 424
 425        /* UD sqwqe's sge use extend sge */
 426        if (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
 427            hr_qp->ibqp.qp_type == IB_QPT_UD) {
 428                cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs);
 429        } else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) {
 430                cnt = roundup_pow_of_two(sq_wqe_cnt *
 431                                     (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE));
 432        } else {
 433                cnt = 0;
 434        }
 435
 436        hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
 437        hr_qp->sge.sge_cnt = cnt;
 438
 439        return 0;
 440}
 441
 442static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
 443                                        struct ib_qp_cap *cap,
 444                                        struct hns_roce_ib_create_qp *ucmd)
 445{
 446        u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
 447        u8 max_sq_stride = ilog2(roundup_sq_stride);
 448
 449        /* Sanity check SQ size before proceeding */
 450        if (ucmd->log_sq_stride > max_sq_stride ||
 451            ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
 452                ibdev_err(&hr_dev->ib_dev, "Failed to check SQ stride size\n");
 453                return -EINVAL;
 454        }
 455
 456        if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
 457                ibdev_err(&hr_dev->ib_dev, "Failed to check SQ SGE size %d\n",
 458                          cap->max_send_sge);
 459                return -EINVAL;
 460        }
 461
 462        return 0;
 463}
 464
 465static int set_user_sq_size(struct hns_roce_dev *hr_dev,
 466                            struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
 467                            struct hns_roce_ib_create_qp *ucmd)
 468{
 469        struct ib_device *ibdev = &hr_dev->ib_dev;
 470        u32 cnt = 0;
 471        int ret;
 472
 473        if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
 474            cnt > hr_dev->caps.max_wqes)
 475                return -EINVAL;
 476
 477        ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
 478        if (ret) {
 479                ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
 480                          ret);
 481                return ret;
 482        }
 483
 484        ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
 485        if (ret)
 486                return ret;
 487
 488        hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
 489        hr_qp->sq.wqe_cnt = cnt;
 490
 491        return 0;
 492}
 493
 494static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
 495                            struct hns_roce_qp *hr_qp,
 496                            struct hns_roce_buf_attr *buf_attr)
 497{
 498        int buf_size;
 499        int idx = 0;
 500
 501        hr_qp->buff_size = 0;
 502
 503        /* SQ WQE */
 504        hr_qp->sq.offset = 0;
 505        buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
 506                                          hr_qp->sq.wqe_shift);
 507        if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
 508                buf_attr->region[idx].size = buf_size;
 509                buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
 510                idx++;
 511                hr_qp->buff_size += buf_size;
 512        }
 513
 514        /* extend SGE WQE in SQ */
 515        hr_qp->sge.offset = hr_qp->buff_size;
 516        buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
 517                                          hr_qp->sge.sge_shift);
 518        if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
 519                buf_attr->region[idx].size = buf_size;
 520                buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
 521                idx++;
 522                hr_qp->buff_size += buf_size;
 523        }
 524
 525        /* RQ WQE */
 526        hr_qp->rq.offset = hr_qp->buff_size;
 527        buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
 528                                          hr_qp->rq.wqe_shift);
 529        if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
 530                buf_attr->region[idx].size = buf_size;
 531                buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
 532                idx++;
 533                hr_qp->buff_size += buf_size;
 534        }
 535
 536        if (hr_qp->buff_size < 1)
 537                return -EINVAL;
 538
 539        buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
 540        buf_attr->fixed_page = true;
 541        buf_attr->region_count = idx;
 542
 543        return 0;
 544}
 545
 546static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
 547                              struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
 548{
 549        struct ib_device *ibdev = &hr_dev->ib_dev;
 550        u32 cnt;
 551        int ret;
 552
 553        if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
 554            cap->max_send_sge > hr_dev->caps.max_sq_sg ||
 555            cap->max_inline_data > hr_dev->caps.max_sq_inline) {
 556                ibdev_err(ibdev,
 557                          "failed to check SQ WR, SGE or inline num, ret = %d.\n",
 558                          -EINVAL);
 559                return -EINVAL;
 560        }
 561
 562        cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
 563        if (cnt > hr_dev->caps.max_wqes) {
 564                ibdev_err(ibdev, "failed to check WQE num, WQE num = %d.\n",
 565                          cnt);
 566                return -EINVAL;
 567        }
 568
 569        hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
 570        hr_qp->sq.wqe_cnt = cnt;
 571
 572        ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
 573        if (ret)
 574                return ret;
 575
 576        /* sync the parameters of kernel QP to user's configuration */
 577        cap->max_send_wr = cnt;
 578        cap->max_send_sge = hr_qp->sq.max_gs;
 579
 580        /* We don't support inline sends for kernel QPs (yet) */
 581        cap->max_inline_data = 0;
 582
 583        return 0;
 584}
 585
 586static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
 587{
 588        if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
 589                return 0;
 590
 591        return 1;
 592}
 593
 594static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
 595{
 596        if (attr->qp_type == IB_QPT_XRC_INI ||
 597            attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
 598            !attr->cap.max_recv_wr)
 599                return 0;
 600
 601        return 1;
 602}
 603
 604static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
 605                               struct ib_qp_init_attr *init_attr)
 606{
 607        u32 max_recv_sge = init_attr->cap.max_recv_sge;
 608        u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
 609        struct hns_roce_rinl_wqe *wqe_list;
 610        int i;
 611
 612        /* allocate recv inline buf */
 613        wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
 614                           GFP_KERNEL);
 615
 616        if (!wqe_list)
 617                goto err;
 618
 619        /* Allocate a continuous buffer for all inline sge we need */
 620        wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
 621                                      sizeof(struct hns_roce_rinl_sge)),
 622                                      GFP_KERNEL);
 623        if (!wqe_list[0].sg_list)
 624                goto err_wqe_list;
 625
 626        /* Assign buffers of sg_list to each inline wqe */
 627        for (i = 1; i < wqe_cnt; i++)
 628                wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
 629
 630        hr_qp->rq_inl_buf.wqe_list = wqe_list;
 631
 632        return 0;
 633
 634err_wqe_list:
 635        kfree(wqe_list);
 636
 637err:
 638        return -ENOMEM;
 639}
 640
 641static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
 642{
 643        if (hr_qp->rq_inl_buf.wqe_list)
 644                kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
 645        kfree(hr_qp->rq_inl_buf.wqe_list);
 646}
 647
 648static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
 649                        struct ib_qp_init_attr *init_attr,
 650                        struct ib_udata *udata, unsigned long addr)
 651{
 652        struct ib_device *ibdev = &hr_dev->ib_dev;
 653        struct hns_roce_buf_attr buf_attr = {};
 654        int ret;
 655
 656        if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
 657                ret = alloc_rq_inline_buf(hr_qp, init_attr);
 658                if (ret) {
 659                        ibdev_err(ibdev,
 660                                  "failed to alloc inline buf, ret = %d.\n",
 661                                  ret);
 662                        return ret;
 663                }
 664        } else {
 665                hr_qp->rq_inl_buf.wqe_list = NULL;
 666        }
 667
 668        ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
 669        if (ret) {
 670                ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
 671                goto err_inline;
 672        }
 673        ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
 674                                  HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
 675                                  udata, addr);
 676        if (ret) {
 677                ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
 678                goto err_inline;
 679        }
 680
 681        return 0;
 682err_inline:
 683        free_rq_inline_buf(hr_qp);
 684
 685        return ret;
 686}
 687
 688static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 689{
 690        hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
 691        free_rq_inline_buf(hr_qp);
 692}
 693
 694static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
 695                                   struct ib_qp_init_attr *init_attr,
 696                                   struct ib_udata *udata,
 697                                   struct hns_roce_ib_create_qp_resp *resp,
 698                                   struct hns_roce_ib_create_qp *ucmd)
 699{
 700        return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
 701                udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
 702                hns_roce_qp_has_sq(init_attr) &&
 703                udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
 704}
 705
 706static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
 707                                   struct ib_qp_init_attr *init_attr,
 708                                   struct ib_udata *udata,
 709                                   struct hns_roce_ib_create_qp_resp *resp)
 710{
 711        return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
 712                udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
 713                hns_roce_qp_has_rq(init_attr));
 714}
 715
 716static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
 717                                     struct ib_qp_init_attr *init_attr)
 718{
 719        return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
 720                hns_roce_qp_has_rq(init_attr));
 721}
 722
 723static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
 724                       struct ib_qp_init_attr *init_attr,
 725                       struct ib_udata *udata,
 726                       struct hns_roce_ib_create_qp *ucmd,
 727                       struct hns_roce_ib_create_qp_resp *resp)
 728{
 729        struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
 730                udata, struct hns_roce_ucontext, ibucontext);
 731        struct ib_device *ibdev = &hr_dev->ib_dev;
 732        int ret;
 733
 734        if (udata) {
 735                if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
 736                        ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr,
 737                                                   &hr_qp->sdb);
 738                        if (ret) {
 739                                ibdev_err(ibdev,
 740                                          "Failed to map user SQ doorbell\n");
 741                                goto err_out;
 742                        }
 743                        hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
 744                        resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
 745                }
 746
 747                if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
 748                        ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr,
 749                                                   &hr_qp->rdb);
 750                        if (ret) {
 751                                ibdev_err(ibdev,
 752                                          "Failed to map user RQ doorbell\n");
 753                                goto err_sdb;
 754                        }
 755                        hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
 756                        resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
 757                }
 758        } else {
 759                /* QP doorbell register address */
 760                hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
 761                                     DB_REG_OFFSET * hr_dev->priv_uar.index;
 762                hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
 763                                     DB_REG_OFFSET * hr_dev->priv_uar.index;
 764
 765                if (kernel_qp_has_rdb(hr_dev, init_attr)) {
 766                        ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
 767                        if (ret) {
 768                                ibdev_err(ibdev,
 769                                          "Failed to alloc kernel RQ doorbell\n");
 770                                goto err_out;
 771                        }
 772                        *hr_qp->rdb.db_record = 0;
 773                        hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
 774                }
 775        }
 776
 777        return 0;
 778err_sdb:
 779        if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
 780                hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
 781err_out:
 782        return ret;
 783}
 784
 785static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
 786                       struct ib_udata *udata)
 787{
 788        struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
 789                udata, struct hns_roce_ucontext, ibucontext);
 790
 791        if (udata) {
 792                if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
 793                        hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
 794                if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
 795                        hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
 796        } else {
 797                if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
 798                        hns_roce_free_db(hr_dev, &hr_qp->rdb);
 799        }
 800}
 801
 802static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
 803                             struct hns_roce_qp *hr_qp)
 804{
 805        struct ib_device *ibdev = &hr_dev->ib_dev;
 806        u64 *sq_wrid = NULL;
 807        u64 *rq_wrid = NULL;
 808        int ret;
 809
 810        sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
 811        if (ZERO_OR_NULL_PTR(sq_wrid)) {
 812                ibdev_err(ibdev, "Failed to alloc SQ wrid\n");
 813                return -ENOMEM;
 814        }
 815
 816        if (hr_qp->rq.wqe_cnt) {
 817                rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
 818                if (ZERO_OR_NULL_PTR(rq_wrid)) {
 819                        ibdev_err(ibdev, "Failed to alloc RQ wrid\n");
 820                        ret = -ENOMEM;
 821                        goto err_sq;
 822                }
 823        }
 824
 825        hr_qp->sq.wrid = sq_wrid;
 826        hr_qp->rq.wrid = rq_wrid;
 827        return 0;
 828err_sq:
 829        kfree(sq_wrid);
 830
 831        return ret;
 832}
 833
 834static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
 835{
 836        kfree(hr_qp->rq.wrid);
 837        kfree(hr_qp->sq.wrid);
 838}
 839
 840static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
 841                        struct ib_qp_init_attr *init_attr,
 842                        struct ib_udata *udata,
 843                        struct hns_roce_ib_create_qp *ucmd)
 844{
 845        struct ib_device *ibdev = &hr_dev->ib_dev;
 846        int ret;
 847
 848        hr_qp->ibqp.qp_type = init_attr->qp_type;
 849
 850        if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
 851                hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
 852        else
 853                hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
 854
 855        ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
 856                          hns_roce_qp_has_rq(init_attr));
 857        if (ret) {
 858                ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
 859                          ret);
 860                return ret;
 861        }
 862
 863        if (udata) {
 864                if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) {
 865                        ibdev_err(ibdev, "Failed to copy QP ucmd\n");
 866                        return -EFAULT;
 867                }
 868
 869                ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
 870                if (ret)
 871                        ibdev_err(ibdev, "Failed to set user SQ size\n");
 872        } else {
 873                if (init_attr->create_flags &
 874                    IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
 875                        ibdev_err(ibdev, "Failed to check multicast loopback\n");
 876                        return -EINVAL;
 877                }
 878
 879                if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
 880                        ibdev_err(ibdev, "Failed to check ipoib ud lso\n");
 881                        return -EINVAL;
 882                }
 883
 884                ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
 885                if (ret)
 886                        ibdev_err(ibdev, "Failed to set kernel SQ size\n");
 887        }
 888
 889        return ret;
 890}
 891
 892static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
 893                                     struct ib_pd *ib_pd,
 894                                     struct ib_qp_init_attr *init_attr,
 895                                     struct ib_udata *udata,
 896                                     struct hns_roce_qp *hr_qp)
 897{
 898        struct hns_roce_ib_create_qp_resp resp = {};
 899        struct ib_device *ibdev = &hr_dev->ib_dev;
 900        struct hns_roce_ib_create_qp ucmd;
 901        int ret;
 902
 903        mutex_init(&hr_qp->mutex);
 904        spin_lock_init(&hr_qp->sq.lock);
 905        spin_lock_init(&hr_qp->rq.lock);
 906
 907        hr_qp->state = IB_QPS_RESET;
 908        hr_qp->flush_flag = 0;
 909
 910        ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
 911        if (ret) {
 912                ibdev_err(ibdev, "Failed to set QP param\n");
 913                return ret;
 914        }
 915
 916        if (!udata) {
 917                ret = alloc_kernel_wrid(hr_dev, hr_qp);
 918                if (ret) {
 919                        ibdev_err(ibdev, "Failed to alloc wrid\n");
 920                        return ret;
 921                }
 922        }
 923
 924        ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
 925        if (ret) {
 926                ibdev_err(ibdev, "Failed to alloc QP doorbell\n");
 927                goto err_wrid;
 928        }
 929
 930        ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
 931        if (ret) {
 932                ibdev_err(ibdev, "Failed to alloc QP buffer\n");
 933                goto err_db;
 934        }
 935
 936        ret = alloc_qpn(hr_dev, hr_qp);
 937        if (ret) {
 938                ibdev_err(ibdev, "Failed to alloc QPN\n");
 939                goto err_buf;
 940        }
 941
 942        ret = alloc_qpc(hr_dev, hr_qp);
 943        if (ret) {
 944                ibdev_err(ibdev, "Failed to alloc QP context\n");
 945                goto err_qpn;
 946        }
 947
 948        ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
 949        if (ret) {
 950                ibdev_err(ibdev, "Failed to store QP\n");
 951                goto err_qpc;
 952        }
 953
 954        if (udata) {
 955                ret = ib_copy_to_udata(udata, &resp,
 956                                       min(udata->outlen, sizeof(resp)));
 957                if (ret) {
 958                        ibdev_err(ibdev, "copy qp resp failed!\n");
 959                        goto err_store;
 960                }
 961        }
 962
 963        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
 964                ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
 965                if (ret)
 966                        goto err_store;
 967        }
 968
 969        hr_qp->ibqp.qp_num = hr_qp->qpn;
 970        hr_qp->event = hns_roce_ib_qp_event;
 971        atomic_set(&hr_qp->refcount, 1);
 972        init_completion(&hr_qp->free);
 973
 974        return 0;
 975
 976err_store:
 977        hns_roce_qp_remove(hr_dev, hr_qp);
 978err_qpc:
 979        free_qpc(hr_dev, hr_qp);
 980err_qpn:
 981        free_qpn(hr_dev, hr_qp);
 982err_buf:
 983        free_qp_buf(hr_dev, hr_qp);
 984err_db:
 985        free_qp_db(hr_dev, hr_qp, udata);
 986err_wrid:
 987        free_kernel_wrid(hr_qp);
 988        return ret;
 989}
 990
 991void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
 992                         struct ib_udata *udata)
 993{
 994        if (atomic_dec_and_test(&hr_qp->refcount))
 995                complete(&hr_qp->free);
 996        wait_for_completion(&hr_qp->free);
 997
 998        free_qpc(hr_dev, hr_qp);
 999        free_qpn(hr_dev, hr_qp);
1000        free_qp_buf(hr_dev, hr_qp);
1001        free_kernel_wrid(hr_qp);
1002        free_qp_db(hr_dev, hr_qp, udata);
1003
1004        kfree(hr_qp);
1005}
1006
1007struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
1008                                 struct ib_qp_init_attr *init_attr,
1009                                 struct ib_udata *udata)
1010{
1011        struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
1012        struct ib_device *ibdev = &hr_dev->ib_dev;
1013        struct hns_roce_qp *hr_qp;
1014        int ret;
1015
1016        switch (init_attr->qp_type) {
1017        case IB_QPT_RC: {
1018                hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
1019                if (!hr_qp)
1020                        return ERR_PTR(-ENOMEM);
1021
1022                ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
1023                                                hr_qp);
1024                if (ret) {
1025                        ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n",
1026                                  hr_qp->qpn, ret);
1027                        kfree(hr_qp);
1028                        return ERR_PTR(ret);
1029                }
1030
1031                break;
1032        }
1033        case IB_QPT_GSI: {
1034                /* Userspace is not allowed to create special QPs: */
1035                if (udata) {
1036                        ibdev_err(ibdev, "not support usr space GSI\n");
1037                        return ERR_PTR(-EINVAL);
1038                }
1039
1040                hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
1041                if (!hr_qp)
1042                        return ERR_PTR(-ENOMEM);
1043
1044                hr_qp->port = init_attr->port_num - 1;
1045                hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
1046
1047                ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
1048                                                hr_qp);
1049                if (ret) {
1050                        ibdev_err(ibdev, "Create GSI QP failed!\n");
1051                        kfree(hr_qp);
1052                        return ERR_PTR(ret);
1053                }
1054
1055                break;
1056        }
1057        default:{
1058                ibdev_err(ibdev, "not support QP type %d\n",
1059                          init_attr->qp_type);
1060                return ERR_PTR(-EOPNOTSUPP);
1061        }
1062        }
1063
1064        return &hr_qp->ibqp;
1065}
1066
1067int to_hr_qp_type(int qp_type)
1068{
1069        int transport_type;
1070
1071        if (qp_type == IB_QPT_RC)
1072                transport_type = SERV_TYPE_RC;
1073        else if (qp_type == IB_QPT_UC)
1074                transport_type = SERV_TYPE_UC;
1075        else if (qp_type == IB_QPT_UD)
1076                transport_type = SERV_TYPE_UD;
1077        else if (qp_type == IB_QPT_GSI)
1078                transport_type = SERV_TYPE_UD;
1079        else
1080                transport_type = -1;
1081
1082        return transport_type;
1083}
1084
1085static int check_mtu_validate(struct hns_roce_dev *hr_dev,
1086                              struct hns_roce_qp *hr_qp,
1087                              struct ib_qp_attr *attr, int attr_mask)
1088{
1089        enum ib_mtu active_mtu;
1090        int p;
1091
1092        p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1093        active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1094
1095        if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
1096            attr->path_mtu > hr_dev->caps.max_mtu) ||
1097            attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
1098                ibdev_err(&hr_dev->ib_dev,
1099                        "attr path_mtu(%d)invalid while modify qp",
1100                        attr->path_mtu);
1101                return -EINVAL;
1102        }
1103
1104        return 0;
1105}
1106
1107static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1108                                  int attr_mask)
1109{
1110        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1111        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1112        int p;
1113
1114        if ((attr_mask & IB_QP_PORT) &&
1115            (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1116                ibdev_err(&hr_dev->ib_dev,
1117                        "attr port_num invalid.attr->port_num=%d\n",
1118                        attr->port_num);
1119                return -EINVAL;
1120        }
1121
1122        if (attr_mask & IB_QP_PKEY_INDEX) {
1123                p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1124                if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1125                        ibdev_err(&hr_dev->ib_dev,
1126                                "attr pkey_index invalid.attr->pkey_index=%d\n",
1127                                attr->pkey_index);
1128                        return -EINVAL;
1129                }
1130        }
1131
1132        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1133            attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1134                ibdev_err(&hr_dev->ib_dev,
1135                        "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
1136                        attr->max_rd_atomic);
1137                return -EINVAL;
1138        }
1139
1140        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1141            attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1142                ibdev_err(&hr_dev->ib_dev,
1143                        "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
1144                        attr->max_dest_rd_atomic);
1145                return -EINVAL;
1146        }
1147
1148        if (attr_mask & IB_QP_PATH_MTU)
1149                return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
1150
1151        return 0;
1152}
1153
1154int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1155                       int attr_mask, struct ib_udata *udata)
1156{
1157        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1158        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1159        enum ib_qp_state cur_state, new_state;
1160        int ret = -EINVAL;
1161
1162        mutex_lock(&hr_qp->mutex);
1163
1164        cur_state = attr_mask & IB_QP_CUR_STATE ?
1165                    attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
1166        new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1167
1168        if (ibqp->uobject &&
1169            (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1170                if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
1171                        hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1172
1173                        if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1174                                hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1175                } else {
1176                        ibdev_warn(&hr_dev->ib_dev,
1177                                  "flush cqe is not supported in userspace!\n");
1178                        goto out;
1179                }
1180        }
1181
1182        if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1183                                attr_mask)) {
1184                ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
1185                goto out;
1186        }
1187
1188        ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
1189        if (ret)
1190                goto out;
1191
1192        if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1193                if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
1194                        ret = -EPERM;
1195                        ibdev_err(&hr_dev->ib_dev,
1196                                  "RST2RST state is not supported\n");
1197                } else {
1198                        ret = 0;
1199                }
1200
1201                goto out;
1202        }
1203
1204        ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1205                                    new_state);
1206
1207out:
1208        mutex_unlock(&hr_qp->mutex);
1209
1210        return ret;
1211}
1212
1213void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1214                       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1215{
1216        if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1217                __acquire(&send_cq->lock);
1218                __acquire(&recv_cq->lock);
1219        } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1220                spin_lock_irq(&send_cq->lock);
1221                __acquire(&recv_cq->lock);
1222        } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1223                spin_lock_irq(&recv_cq->lock);
1224                __acquire(&send_cq->lock);
1225        } else if (send_cq == recv_cq) {
1226                spin_lock_irq(&send_cq->lock);
1227                __acquire(&recv_cq->lock);
1228        } else if (send_cq->cqn < recv_cq->cqn) {
1229                spin_lock_irq(&send_cq->lock);
1230                spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1231        } else {
1232                spin_lock_irq(&recv_cq->lock);
1233                spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1234        }
1235}
1236
1237void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1238                         struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1239                         __releases(&recv_cq->lock)
1240{
1241        if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1242                __release(&recv_cq->lock);
1243                __release(&send_cq->lock);
1244        } else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1245                __release(&recv_cq->lock);
1246                spin_unlock(&send_cq->lock);
1247        } else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1248                __release(&send_cq->lock);
1249                spin_unlock(&recv_cq->lock);
1250        } else if (send_cq == recv_cq) {
1251                __release(&recv_cq->lock);
1252                spin_unlock_irq(&send_cq->lock);
1253        } else if (send_cq->cqn < recv_cq->cqn) {
1254                spin_unlock(&recv_cq->lock);
1255                spin_unlock_irq(&send_cq->lock);
1256        } else {
1257                spin_unlock(&send_cq->lock);
1258                spin_unlock_irq(&recv_cq->lock);
1259        }
1260}
1261
1262static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1263{
1264        return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
1265}
1266
1267void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1268{
1269        return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1270}
1271
1272void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1273{
1274        return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1275}
1276
1277void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
1278{
1279        return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
1280}
1281
1282bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1283                          struct ib_cq *ib_cq)
1284{
1285        struct hns_roce_cq *hr_cq;
1286        u32 cur;
1287
1288        cur = hr_wq->head - hr_wq->tail;
1289        if (likely(cur + nreq < hr_wq->wqe_cnt))
1290                return false;
1291
1292        hr_cq = to_hr_cq(ib_cq);
1293        spin_lock(&hr_cq->lock);
1294        cur = hr_wq->head - hr_wq->tail;
1295        spin_unlock(&hr_cq->lock);
1296
1297        return cur + nreq >= hr_wq->wqe_cnt;
1298}
1299
1300int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1301{
1302        struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1303        int reserved_from_top = 0;
1304        int reserved_from_bot;
1305        int ret;
1306
1307        mutex_init(&qp_table->scc_mutex);
1308        xa_init(&hr_dev->qp_table_xa);
1309
1310        reserved_from_bot = hr_dev->caps.reserved_qps;
1311
1312        ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1313                                   hr_dev->caps.num_qps - 1, reserved_from_bot,
1314                                   reserved_from_top);
1315        if (ret) {
1316                dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1317                        ret);
1318                return ret;
1319        }
1320
1321        return 0;
1322}
1323
1324void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1325{
1326        hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1327}
1328