linux/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Hisilicon Limited.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/platform_device.h>
  34#include <linux/acpi.h>
  35#include <linux/etherdevice.h>
  36#include <linux/of.h>
  37#include <rdma/ib_umem.h>
  38#include "hns_roce_common.h"
  39#include "hns_roce_device.h"
  40#include "hns_roce_cmd.h"
  41#include "hns_roce_hem.h"
  42#include "hns_roce_hw_v1.h"
  43
  44static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
  45{
  46        dseg->lkey = cpu_to_le32(sg->lkey);
  47        dseg->addr = cpu_to_le64(sg->addr);
  48        dseg->len  = cpu_to_le32(sg->length);
  49}
  50
  51static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
  52                          u32 rkey)
  53{
  54        rseg->raddr = cpu_to_le64(remote_addr);
  55        rseg->rkey  = cpu_to_le32(rkey);
  56        rseg->len   = 0;
  57}
  58
  59int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  60                          struct ib_send_wr **bad_wr)
  61{
  62        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  63        struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
  64        struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
  65        struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
  66        struct hns_roce_wqe_data_seg *dseg = NULL;
  67        struct hns_roce_qp *qp = to_hr_qp(ibqp);
  68        struct device *dev = &hr_dev->pdev->dev;
  69        struct hns_roce_sq_db sq_db;
  70        int ps_opcode = 0, i = 0;
  71        unsigned long flags = 0;
  72        void *wqe = NULL;
  73        u32 doorbell[2];
  74        int nreq = 0;
  75        u32 ind = 0;
  76        int ret = 0;
  77        u8 *smac;
  78        int loopback;
  79
  80        if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
  81                ibqp->qp_type != IB_QPT_RC)) {
  82                dev_err(dev, "un-supported QP type\n");
  83                *bad_wr = NULL;
  84                return -EOPNOTSUPP;
  85        }
  86
  87        spin_lock_irqsave(&qp->sq.lock, flags);
  88        ind = qp->sq_next_wqe;
  89        for (nreq = 0; wr; ++nreq, wr = wr->next) {
  90                if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
  91                        ret = -ENOMEM;
  92                        *bad_wr = wr;
  93                        goto out;
  94                }
  95
  96                if (unlikely(wr->num_sge > qp->sq.max_gs)) {
  97                        dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
  98                                wr->num_sge, qp->sq.max_gs);
  99                        ret = -EINVAL;
 100                        *bad_wr = wr;
 101                        goto out;
 102                }
 103
 104                wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
 105                qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
 106                                                                      wr->wr_id;
 107
 108                /* Corresponding to the RC and RD type wqe process separately */
 109                if (ibqp->qp_type == IB_QPT_GSI) {
 110                        ud_sq_wqe = wqe;
 111                        roce_set_field(ud_sq_wqe->dmac_h,
 112                                       UD_SEND_WQE_U32_4_DMAC_0_M,
 113                                       UD_SEND_WQE_U32_4_DMAC_0_S,
 114                                       ah->av.mac[0]);
 115                        roce_set_field(ud_sq_wqe->dmac_h,
 116                                       UD_SEND_WQE_U32_4_DMAC_1_M,
 117                                       UD_SEND_WQE_U32_4_DMAC_1_S,
 118                                       ah->av.mac[1]);
 119                        roce_set_field(ud_sq_wqe->dmac_h,
 120                                       UD_SEND_WQE_U32_4_DMAC_2_M,
 121                                       UD_SEND_WQE_U32_4_DMAC_2_S,
 122                                       ah->av.mac[2]);
 123                        roce_set_field(ud_sq_wqe->dmac_h,
 124                                       UD_SEND_WQE_U32_4_DMAC_3_M,
 125                                       UD_SEND_WQE_U32_4_DMAC_3_S,
 126                                       ah->av.mac[3]);
 127
 128                        roce_set_field(ud_sq_wqe->u32_8,
 129                                       UD_SEND_WQE_U32_8_DMAC_4_M,
 130                                       UD_SEND_WQE_U32_8_DMAC_4_S,
 131                                       ah->av.mac[4]);
 132                        roce_set_field(ud_sq_wqe->u32_8,
 133                                       UD_SEND_WQE_U32_8_DMAC_5_M,
 134                                       UD_SEND_WQE_U32_8_DMAC_5_S,
 135                                       ah->av.mac[5]);
 136
 137                        smac = (u8 *)hr_dev->dev_addr[qp->port];
 138                        loopback = ether_addr_equal_unaligned(ah->av.mac,
 139                                                              smac) ? 1 : 0;
 140                        roce_set_bit(ud_sq_wqe->u32_8,
 141                                     UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
 142                                     loopback);
 143
 144                        roce_set_field(ud_sq_wqe->u32_8,
 145                                       UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
 146                                       UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
 147                                       HNS_ROCE_WQE_OPCODE_SEND);
 148                        roce_set_field(ud_sq_wqe->u32_8,
 149                                       UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
 150                                       UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
 151                                       2);
 152                        roce_set_bit(ud_sq_wqe->u32_8,
 153                                UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
 154                                1);
 155
 156                        ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
 157                                cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
 158                                (wr->send_flags & IB_SEND_SOLICITED ?
 159                                cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
 160                                ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
 161                                cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
 162
 163                        roce_set_field(ud_sq_wqe->u32_16,
 164                                       UD_SEND_WQE_U32_16_DEST_QP_M,
 165                                       UD_SEND_WQE_U32_16_DEST_QP_S,
 166                                       ud_wr(wr)->remote_qpn);
 167                        roce_set_field(ud_sq_wqe->u32_16,
 168                                       UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
 169                                       UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
 170                                       ah->av.stat_rate);
 171
 172                        roce_set_field(ud_sq_wqe->u32_36,
 173                                       UD_SEND_WQE_U32_36_FLOW_LABEL_M,
 174                                       UD_SEND_WQE_U32_36_FLOW_LABEL_S, 0);
 175                        roce_set_field(ud_sq_wqe->u32_36,
 176                                       UD_SEND_WQE_U32_36_PRIORITY_M,
 177                                       UD_SEND_WQE_U32_36_PRIORITY_S,
 178                                       ah->av.sl_tclass_flowlabel >>
 179                                       HNS_ROCE_SL_SHIFT);
 180                        roce_set_field(ud_sq_wqe->u32_36,
 181                                       UD_SEND_WQE_U32_36_SGID_INDEX_M,
 182                                       UD_SEND_WQE_U32_36_SGID_INDEX_S,
 183                                       hns_get_gid_index(hr_dev, qp->phy_port,
 184                                                         ah->av.gid_index));
 185
 186                        roce_set_field(ud_sq_wqe->u32_40,
 187                                       UD_SEND_WQE_U32_40_HOP_LIMIT_M,
 188                                       UD_SEND_WQE_U32_40_HOP_LIMIT_S,
 189                                       ah->av.hop_limit);
 190                        roce_set_field(ud_sq_wqe->u32_40,
 191                                       UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
 192                                       UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, 0);
 193
 194                        memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
 195
 196                        ud_sq_wqe->va0_l = (u32)wr->sg_list[0].addr;
 197                        ud_sq_wqe->va0_h = (wr->sg_list[0].addr) >> 32;
 198                        ud_sq_wqe->l_key0 = wr->sg_list[0].lkey;
 199
 200                        ud_sq_wqe->va1_l = (u32)wr->sg_list[1].addr;
 201                        ud_sq_wqe->va1_h = (wr->sg_list[1].addr) >> 32;
 202                        ud_sq_wqe->l_key1 = wr->sg_list[1].lkey;
 203                        ind++;
 204                } else if (ibqp->qp_type == IB_QPT_RC) {
 205                        ctrl = wqe;
 206                        memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
 207                        for (i = 0; i < wr->num_sge; i++)
 208                                ctrl->msg_length += wr->sg_list[i].length;
 209
 210                        ctrl->sgl_pa_h = 0;
 211                        ctrl->flag = 0;
 212                        ctrl->imm_data = send_ieth(wr);
 213
 214                        /*Ctrl field, ctrl set type: sig, solic, imm, fence */
 215                        /* SO wait for conforming application scenarios */
 216                        ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
 217                                      cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
 218                                      (wr->send_flags & IB_SEND_SOLICITED ?
 219                                      cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
 220                                      ((wr->opcode == IB_WR_SEND_WITH_IMM ||
 221                                      wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
 222                                      cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
 223                                      (wr->send_flags & IB_SEND_FENCE ?
 224                                      (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
 225
 226                        wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
 227
 228                        switch (wr->opcode) {
 229                        case IB_WR_RDMA_READ:
 230                                ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
 231                                set_raddr_seg(wqe,  rdma_wr(wr)->remote_addr,
 232                                               rdma_wr(wr)->rkey);
 233                                break;
 234                        case IB_WR_RDMA_WRITE:
 235                        case IB_WR_RDMA_WRITE_WITH_IMM:
 236                                ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
 237                                set_raddr_seg(wqe,  rdma_wr(wr)->remote_addr,
 238                                              rdma_wr(wr)->rkey);
 239                                break;
 240                        case IB_WR_SEND:
 241                        case IB_WR_SEND_WITH_INV:
 242                        case IB_WR_SEND_WITH_IMM:
 243                                ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
 244                                break;
 245                        case IB_WR_LOCAL_INV:
 246                                break;
 247                        case IB_WR_ATOMIC_CMP_AND_SWP:
 248                        case IB_WR_ATOMIC_FETCH_AND_ADD:
 249                        case IB_WR_LSO:
 250                        default:
 251                                ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
 252                                break;
 253                        }
 254                        ctrl->flag |= cpu_to_le32(ps_opcode);
 255                        wqe += sizeof(struct hns_roce_wqe_raddr_seg);
 256
 257                        dseg = wqe;
 258                        if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
 259                                if (ctrl->msg_length >
 260                                        hr_dev->caps.max_sq_inline) {
 261                                        ret = -EINVAL;
 262                                        *bad_wr = wr;
 263                                        dev_err(dev, "inline len(1-%d)=%d, illegal",
 264                                                ctrl->msg_length,
 265                                                hr_dev->caps.max_sq_inline);
 266                                        goto out;
 267                                }
 268                                for (i = 0; i < wr->num_sge; i++) {
 269                                        memcpy(wqe, ((void *) (uintptr_t)
 270                                               wr->sg_list[i].addr),
 271                                               wr->sg_list[i].length);
 272                                        wqe += wr->sg_list[i].length;
 273                                }
 274                                ctrl->flag |= HNS_ROCE_WQE_INLINE;
 275                        } else {
 276                                /*sqe num is two */
 277                                for (i = 0; i < wr->num_sge; i++)
 278                                        set_data_seg(dseg + i, wr->sg_list + i);
 279
 280                                ctrl->flag |= cpu_to_le32(wr->num_sge <<
 281                                              HNS_ROCE_WQE_SGE_NUM_BIT);
 282                        }
 283                        ind++;
 284                }
 285        }
 286
 287out:
 288        /* Set DB return */
 289        if (likely(nreq)) {
 290                qp->sq.head += nreq;
 291                /* Memory barrier */
 292                wmb();
 293
 294                sq_db.u32_4 = 0;
 295                sq_db.u32_8 = 0;
 296                roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
 297                               SQ_DOORBELL_U32_4_SQ_HEAD_S,
 298                              (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
 299                roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
 300                               SQ_DOORBELL_U32_4_SL_S, qp->sl);
 301                roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
 302                               SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
 303                roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
 304                               SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
 305                roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
 306
 307                doorbell[0] = sq_db.u32_4;
 308                doorbell[1] = sq_db.u32_8;
 309
 310                hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
 311                qp->sq_next_wqe = ind;
 312        }
 313
 314        spin_unlock_irqrestore(&qp->sq.lock, flags);
 315
 316        return ret;
 317}
 318
 319int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 320                          struct ib_recv_wr **bad_wr)
 321{
 322        int ret = 0;
 323        int nreq = 0;
 324        int ind = 0;
 325        int i = 0;
 326        u32 reg_val = 0;
 327        unsigned long flags = 0;
 328        struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
 329        struct hns_roce_wqe_data_seg *scat = NULL;
 330        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 331        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 332        struct device *dev = &hr_dev->pdev->dev;
 333        struct hns_roce_rq_db rq_db;
 334        uint32_t doorbell[2] = {0};
 335
 336        spin_lock_irqsave(&hr_qp->rq.lock, flags);
 337        ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
 338
 339        for (nreq = 0; wr; ++nreq, wr = wr->next) {
 340                if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
 341                        hr_qp->ibqp.recv_cq)) {
 342                        ret = -ENOMEM;
 343                        *bad_wr = wr;
 344                        goto out;
 345                }
 346
 347                if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
 348                        dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
 349                                wr->num_sge, hr_qp->rq.max_gs);
 350                        ret = -EINVAL;
 351                        *bad_wr = wr;
 352                        goto out;
 353                }
 354
 355                ctrl = get_recv_wqe(hr_qp, ind);
 356
 357                roce_set_field(ctrl->rwqe_byte_12,
 358                               RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
 359                               RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
 360                               wr->num_sge);
 361
 362                scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
 363
 364                for (i = 0; i < wr->num_sge; i++)
 365                        set_data_seg(scat + i, wr->sg_list + i);
 366
 367                hr_qp->rq.wrid[ind] = wr->wr_id;
 368
 369                ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
 370        }
 371
 372out:
 373        if (likely(nreq)) {
 374                hr_qp->rq.head += nreq;
 375                /* Memory barrier */
 376                wmb();
 377
 378                if (ibqp->qp_type == IB_QPT_GSI) {
 379                        /* SW update GSI rq header */
 380                        reg_val = roce_read(to_hr_dev(ibqp->device),
 381                                            ROCEE_QP1C_CFG3_0_REG +
 382                                            QP1C_CFGN_OFFSET * hr_qp->phy_port);
 383                        roce_set_field(reg_val,
 384                                       ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
 385                                       ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
 386                                       hr_qp->rq.head);
 387                        roce_write(to_hr_dev(ibqp->device),
 388                                   ROCEE_QP1C_CFG3_0_REG +
 389                                   QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
 390                } else {
 391                        rq_db.u32_4 = 0;
 392                        rq_db.u32_8 = 0;
 393
 394                        roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
 395                                       RQ_DOORBELL_U32_4_RQ_HEAD_S,
 396                                       hr_qp->rq.head);
 397                        roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
 398                                       RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
 399                        roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
 400                                       RQ_DOORBELL_U32_8_CMD_S, 1);
 401                        roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
 402                                     1);
 403
 404                        doorbell[0] = rq_db.u32_4;
 405                        doorbell[1] = rq_db.u32_8;
 406
 407                        hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
 408                }
 409        }
 410        spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
 411
 412        return ret;
 413}
 414
 415static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
 416                                       int sdb_mode, int odb_mode)
 417{
 418        u32 val;
 419
 420        val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
 421        roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
 422        roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
 423        roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
 424}
 425
 426static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
 427                                     u32 odb_mode)
 428{
 429        u32 val;
 430
 431        /* Configure SDB/ODB extend mode */
 432        val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
 433        roce_set_bit(val, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
 434        roce_set_bit(val, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
 435        roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
 436}
 437
 438static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
 439                             u32 sdb_alful)
 440{
 441        u32 val;
 442
 443        /* Configure SDB */
 444        val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
 445        roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
 446                       ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
 447        roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
 448                       ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
 449        roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
 450}
 451
 452static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
 453                             u32 odb_alful)
 454{
 455        u32 val;
 456
 457        /* Configure ODB */
 458        val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
 459        roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
 460                       ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
 461        roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
 462                       ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
 463        roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
 464}
 465
 466static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
 467                                 u32 ext_sdb_alful)
 468{
 469        struct device *dev = &hr_dev->pdev->dev;
 470        struct hns_roce_v1_priv *priv;
 471        struct hns_roce_db_table *db;
 472        dma_addr_t sdb_dma_addr;
 473        u32 val;
 474
 475        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
 476        db = &priv->db_table;
 477
 478        /* Configure extend SDB threshold */
 479        roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
 480        roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
 481
 482        /* Configure extend SDB base addr */
 483        sdb_dma_addr = db->ext_db->sdb_buf_list->map;
 484        roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
 485
 486        /* Configure extend SDB depth */
 487        val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
 488        roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
 489                       ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
 490                       db->ext_db->esdb_dep);
 491        /*
 492         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
 493         * using 4K page, and shift more 32 because of
 494         * caculating the high 32 bit value evaluated to hardware.
 495         */
 496        roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
 497                       ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
 498        roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
 499
 500        dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
 501        dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
 502                ext_sdb_alept, ext_sdb_alful);
 503}
 504
 505static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
 506                                 u32 ext_odb_alful)
 507{
 508        struct device *dev = &hr_dev->pdev->dev;
 509        struct hns_roce_v1_priv *priv;
 510        struct hns_roce_db_table *db;
 511        dma_addr_t odb_dma_addr;
 512        u32 val;
 513
 514        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
 515        db = &priv->db_table;
 516
 517        /* Configure extend ODB threshold */
 518        roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
 519        roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
 520
 521        /* Configure extend ODB base addr */
 522        odb_dma_addr = db->ext_db->odb_buf_list->map;
 523        roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
 524
 525        /* Configure extend ODB depth */
 526        val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
 527        roce_set_field(val, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
 528                       ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
 529                       db->ext_db->eodb_dep);
 530        roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
 531                       ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
 532                       db->ext_db->eodb_dep);
 533        roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
 534
 535        dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
 536        dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
 537                ext_odb_alept, ext_odb_alful);
 538}
 539
 540static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
 541                                u32 odb_ext_mod)
 542{
 543        struct device *dev = &hr_dev->pdev->dev;
 544        struct hns_roce_v1_priv *priv;
 545        struct hns_roce_db_table *db;
 546        dma_addr_t sdb_dma_addr;
 547        dma_addr_t odb_dma_addr;
 548        int ret = 0;
 549
 550        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
 551        db = &priv->db_table;
 552
 553        db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
 554        if (!db->ext_db)
 555                return -ENOMEM;
 556
 557        if (sdb_ext_mod) {
 558                db->ext_db->sdb_buf_list = kmalloc(
 559                                sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
 560                if (!db->ext_db->sdb_buf_list) {
 561                        ret = -ENOMEM;
 562                        goto ext_sdb_buf_fail_out;
 563                }
 564
 565                db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
 566                                                     HNS_ROCE_V1_EXT_SDB_SIZE,
 567                                                     &sdb_dma_addr, GFP_KERNEL);
 568                if (!db->ext_db->sdb_buf_list->buf) {
 569                        ret = -ENOMEM;
 570                        goto alloc_sq_db_buf_fail;
 571                }
 572                db->ext_db->sdb_buf_list->map = sdb_dma_addr;
 573
 574                db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
 575                hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
 576                                     HNS_ROCE_V1_EXT_SDB_ALFUL);
 577        } else
 578                hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
 579                                 HNS_ROCE_V1_SDB_ALFUL);
 580
 581        if (odb_ext_mod) {
 582                db->ext_db->odb_buf_list = kmalloc(
 583                                sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
 584                if (!db->ext_db->odb_buf_list) {
 585                        ret = -ENOMEM;
 586                        goto ext_odb_buf_fail_out;
 587                }
 588
 589                db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
 590                                                     HNS_ROCE_V1_EXT_ODB_SIZE,
 591                                                     &odb_dma_addr, GFP_KERNEL);
 592                if (!db->ext_db->odb_buf_list->buf) {
 593                        ret = -ENOMEM;
 594                        goto alloc_otr_db_buf_fail;
 595                }
 596                db->ext_db->odb_buf_list->map = odb_dma_addr;
 597
 598                db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
 599                hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
 600                                     HNS_ROCE_V1_EXT_ODB_ALFUL);
 601        } else
 602                hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
 603                                 HNS_ROCE_V1_ODB_ALFUL);
 604
 605        hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
 606
 607        return 0;
 608
 609alloc_otr_db_buf_fail:
 610        kfree(db->ext_db->odb_buf_list);
 611
 612ext_odb_buf_fail_out:
 613        if (sdb_ext_mod) {
 614                dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
 615                                  db->ext_db->sdb_buf_list->buf,
 616                                  db->ext_db->sdb_buf_list->map);
 617        }
 618
 619alloc_sq_db_buf_fail:
 620        if (sdb_ext_mod)
 621                kfree(db->ext_db->sdb_buf_list);
 622
 623ext_sdb_buf_fail_out:
 624        kfree(db->ext_db);
 625        return ret;
 626}
 627
 628static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
 629                                                    struct ib_pd *pd)
 630{
 631        struct device *dev = &hr_dev->pdev->dev;
 632        struct ib_qp_init_attr init_attr;
 633        struct ib_qp *qp;
 634
 635        memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
 636        init_attr.qp_type               = IB_QPT_RC;
 637        init_attr.sq_sig_type           = IB_SIGNAL_ALL_WR;
 638        init_attr.cap.max_recv_wr       = HNS_ROCE_MIN_WQE_NUM;
 639        init_attr.cap.max_send_wr       = HNS_ROCE_MIN_WQE_NUM;
 640
 641        qp = hns_roce_create_qp(pd, &init_attr, NULL);
 642        if (IS_ERR(qp)) {
 643                dev_err(dev, "Create loop qp for mr free failed!");
 644                return NULL;
 645        }
 646
 647        return to_hr_qp(qp);
 648}
 649
 650static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
 651{
 652        struct hns_roce_caps *caps = &hr_dev->caps;
 653        struct device *dev = &hr_dev->pdev->dev;
 654        struct ib_cq_init_attr cq_init_attr;
 655        struct hns_roce_free_mr *free_mr;
 656        struct ib_qp_attr attr = { 0 };
 657        struct hns_roce_v1_priv *priv;
 658        struct hns_roce_qp *hr_qp;
 659        struct ib_cq *cq;
 660        struct ib_pd *pd;
 661        union ib_gid dgid;
 662        u64 subnet_prefix;
 663        int attr_mask = 0;
 664        int i, j;
 665        int ret;
 666        u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
 667        u8 phy_port;
 668        u8 port = 0;
 669        u8 sl;
 670
 671        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
 672        free_mr = &priv->free_mr;
 673
 674        /* Reserved cq for loop qp */
 675        cq_init_attr.cqe                = HNS_ROCE_MIN_WQE_NUM * 2;
 676        cq_init_attr.comp_vector        = 0;
 677        cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
 678        if (IS_ERR(cq)) {
 679                dev_err(dev, "Create cq for reseved loop qp failed!");
 680                return -ENOMEM;
 681        }
 682        free_mr->mr_free_cq = to_hr_cq(cq);
 683        free_mr->mr_free_cq->ib_cq.device               = &hr_dev->ib_dev;
 684        free_mr->mr_free_cq->ib_cq.uobject              = NULL;
 685        free_mr->mr_free_cq->ib_cq.comp_handler         = NULL;
 686        free_mr->mr_free_cq->ib_cq.event_handler        = NULL;
 687        free_mr->mr_free_cq->ib_cq.cq_context           = NULL;
 688        atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
 689
 690        pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
 691        if (IS_ERR(pd)) {
 692                dev_err(dev, "Create pd for reseved loop qp failed!");
 693                ret = -ENOMEM;
 694                goto alloc_pd_failed;
 695        }
 696        free_mr->mr_free_pd = to_hr_pd(pd);
 697        free_mr->mr_free_pd->ibpd.device  = &hr_dev->ib_dev;
 698        free_mr->mr_free_pd->ibpd.uobject = NULL;
 699        atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
 700
 701        attr.qp_access_flags    = IB_ACCESS_REMOTE_WRITE;
 702        attr.pkey_index         = 0;
 703        attr.min_rnr_timer      = 0;
 704        /* Disable read ability */
 705        attr.max_dest_rd_atomic = 0;
 706        attr.max_rd_atomic      = 0;
 707        /* Use arbitrary values as rq_psn and sq_psn */
 708        attr.rq_psn             = 0x0808;
 709        attr.sq_psn             = 0x0808;
 710        attr.retry_cnt          = 7;
 711        attr.rnr_retry          = 7;
 712        attr.timeout            = 0x12;
 713        attr.path_mtu           = IB_MTU_256;
 714        attr.ah_attr.type       = RDMA_AH_ATTR_TYPE_ROCE;
 715        rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
 716        rdma_ah_set_static_rate(&attr.ah_attr, 3);
 717
 718        subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
 719        for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
 720                phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
 721                                (i % HNS_ROCE_MAX_PORTS);
 722                sl = i / HNS_ROCE_MAX_PORTS;
 723
 724                for (j = 0; j < caps->num_ports; j++) {
 725                        if (hr_dev->iboe.phy_port[j] == phy_port) {
 726                                queue_en[i] = 1;
 727                                port = j;
 728                                break;
 729                        }
 730                }
 731
 732                if (!queue_en[i])
 733                        continue;
 734
 735                free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
 736                if (!free_mr->mr_free_qp[i]) {
 737                        dev_err(dev, "Create loop qp failed!\n");
 738                        goto create_lp_qp_failed;
 739                }
 740                hr_qp = free_mr->mr_free_qp[i];
 741
 742                hr_qp->port             = port;
 743                hr_qp->phy_port         = phy_port;
 744                hr_qp->ibqp.qp_type     = IB_QPT_RC;
 745                hr_qp->ibqp.device      = &hr_dev->ib_dev;
 746                hr_qp->ibqp.uobject     = NULL;
 747                atomic_set(&hr_qp->ibqp.usecnt, 0);
 748                hr_qp->ibqp.pd          = pd;
 749                hr_qp->ibqp.recv_cq     = cq;
 750                hr_qp->ibqp.send_cq     = cq;
 751
 752                rdma_ah_set_port_num(&attr.ah_attr, port + 1);
 753                rdma_ah_set_sl(&attr.ah_attr, sl);
 754                attr.port_num           = port + 1;
 755
 756                attr.dest_qp_num        = hr_qp->qpn;
 757                memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
 758                       hr_dev->dev_addr[port],
 759                       MAC_ADDR_OCTET_NUM);
 760
 761                memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
 762                memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
 763                memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
 764                dgid.raw[11] = 0xff;
 765                dgid.raw[12] = 0xfe;
 766                dgid.raw[8] ^= 2;
 767                rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
 768
 769                ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
 770                                            IB_QPS_RESET, IB_QPS_INIT);
 771                if (ret) {
 772                        dev_err(dev, "modify qp failed(%d)!\n", ret);
 773                        goto create_lp_qp_failed;
 774                }
 775
 776                ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
 777                                            IB_QPS_INIT, IB_QPS_RTR);
 778                if (ret) {
 779                        dev_err(dev, "modify qp failed(%d)!\n", ret);
 780                        goto create_lp_qp_failed;
 781                }
 782
 783                ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
 784                                            IB_QPS_RTR, IB_QPS_RTS);
 785                if (ret) {
 786                        dev_err(dev, "modify qp failed(%d)!\n", ret);
 787                        goto create_lp_qp_failed;
 788                }
 789        }
 790
 791        return 0;
 792
 793create_lp_qp_failed:
 794        for (i -= 1; i >= 0; i--) {
 795                hr_qp = free_mr->mr_free_qp[i];
 796                if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
 797                        dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
 798        }
 799
 800        if (hns_roce_dealloc_pd(pd))
 801                dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
 802
 803alloc_pd_failed:
 804        if (hns_roce_ib_destroy_cq(cq))
 805                dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
 806
 807        return -EINVAL;
 808}
 809
 810static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
 811{
 812        struct device *dev = &hr_dev->pdev->dev;
 813        struct hns_roce_free_mr *free_mr;
 814        struct hns_roce_v1_priv *priv;
 815        struct hns_roce_qp *hr_qp;
 816        int ret;
 817        int i;
 818
 819        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
 820        free_mr = &priv->free_mr;
 821
 822        for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
 823                hr_qp = free_mr->mr_free_qp[i];
 824                if (!hr_qp)
 825                        continue;
 826
 827                ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
 828                if (ret)
 829                        dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
 830                                i, ret);
 831        }
 832
 833        ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
 834        if (ret)
 835                dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
 836
 837        ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
 838        if (ret)
 839                dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
 840}
 841
 842static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
 843{
 844        struct device *dev = &hr_dev->pdev->dev;
 845        struct hns_roce_v1_priv *priv;
 846        struct hns_roce_db_table *db;
 847        u32 sdb_ext_mod;
 848        u32 odb_ext_mod;
 849        u32 sdb_evt_mod;
 850        u32 odb_evt_mod;
 851        int ret = 0;
 852
 853        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
 854        db = &priv->db_table;
 855
 856        memset(db, 0, sizeof(*db));
 857
 858        /* Default DB mode */
 859        sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
 860        odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
 861        sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
 862        odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
 863
 864        db->sdb_ext_mod = sdb_ext_mod;
 865        db->odb_ext_mod = odb_ext_mod;
 866
 867        /* Init extend DB */
 868        ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
 869        if (ret) {
 870                dev_err(dev, "Failed in extend DB configuration.\n");
 871                return ret;
 872        }
 873
 874        hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
 875
 876        return 0;
 877}
 878
 879void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
 880{
 881        struct hns_roce_recreate_lp_qp_work *lp_qp_work;
 882        struct hns_roce_dev *hr_dev;
 883
 884        lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
 885                                  work);
 886        hr_dev = to_hr_dev(lp_qp_work->ib_dev);
 887
 888        hns_roce_v1_release_lp_qp(hr_dev);
 889
 890        if (hns_roce_v1_rsv_lp_qp(hr_dev))
 891                dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
 892
 893        if (lp_qp_work->comp_flag)
 894                complete(lp_qp_work->comp);
 895
 896        kfree(lp_qp_work);
 897}
 898
 899static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
 900{
 901        struct device *dev = &hr_dev->pdev->dev;
 902        struct hns_roce_recreate_lp_qp_work *lp_qp_work;
 903        struct hns_roce_free_mr *free_mr;
 904        struct hns_roce_v1_priv *priv;
 905        struct completion comp;
 906        unsigned long end =
 907          msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
 908
 909        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
 910        free_mr = &priv->free_mr;
 911
 912        lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
 913                             GFP_KERNEL);
 914
 915        INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
 916
 917        lp_qp_work->ib_dev = &(hr_dev->ib_dev);
 918        lp_qp_work->comp = &comp;
 919        lp_qp_work->comp_flag = 1;
 920
 921        init_completion(lp_qp_work->comp);
 922
 923        queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
 924
 925        while (time_before_eq(jiffies, end)) {
 926                if (try_wait_for_completion(&comp))
 927                        return 0;
 928                msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
 929        }
 930
 931        lp_qp_work->comp_flag = 0;
 932        if (try_wait_for_completion(&comp))
 933                return 0;
 934
 935        dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
 936        return -ETIMEDOUT;
 937}
 938
 939static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
 940{
 941        struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
 942        struct device *dev = &hr_dev->pdev->dev;
 943        struct ib_send_wr send_wr, *bad_wr;
 944        int ret;
 945
 946        memset(&send_wr, 0, sizeof(send_wr));
 947        send_wr.next    = NULL;
 948        send_wr.num_sge = 0;
 949        send_wr.send_flags = 0;
 950        send_wr.sg_list = NULL;
 951        send_wr.wr_id   = (unsigned long long)&send_wr;
 952        send_wr.opcode  = IB_WR_RDMA_WRITE;
 953
 954        ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
 955        if (ret) {
 956                dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
 957                return ret;
 958        }
 959
 960        return 0;
 961}
 962
 963static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
 964{
 965        struct hns_roce_mr_free_work *mr_work;
 966        struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
 967        struct hns_roce_free_mr *free_mr;
 968        struct hns_roce_cq *mr_free_cq;
 969        struct hns_roce_v1_priv *priv;
 970        struct hns_roce_dev *hr_dev;
 971        struct hns_roce_mr *hr_mr;
 972        struct hns_roce_qp *hr_qp;
 973        struct device *dev;
 974        unsigned long end =
 975                msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
 976        int i;
 977        int ret;
 978        int ne = 0;
 979
 980        mr_work = container_of(work, struct hns_roce_mr_free_work, work);
 981        hr_mr = (struct hns_roce_mr *)mr_work->mr;
 982        hr_dev = to_hr_dev(mr_work->ib_dev);
 983        dev = &hr_dev->pdev->dev;
 984
 985        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
 986        free_mr = &priv->free_mr;
 987        mr_free_cq = free_mr->mr_free_cq;
 988
 989        for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
 990                hr_qp = free_mr->mr_free_qp[i];
 991                if (!hr_qp)
 992                        continue;
 993                ne++;
 994
 995                ret = hns_roce_v1_send_lp_wqe(hr_qp);
 996                if (ret) {
 997                        dev_err(dev,
 998                             "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
 999                             hr_qp->qpn, ret);
1000                        goto free_work;
1001                }
1002        }
1003
1004        do {
1005                ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1006                if (ret < 0) {
1007                        dev_err(dev,
1008                           "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1009                           hr_qp->qpn, ret, hr_mr->key, ne);
1010                        goto free_work;
1011                }
1012                ne -= ret;
1013                usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1014                             (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1015        } while (ne && time_before_eq(jiffies, end));
1016
1017        if (ne != 0)
1018                dev_err(dev,
1019                        "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1020                        hr_mr->key, ne);
1021
1022free_work:
1023        if (mr_work->comp_flag)
1024                complete(mr_work->comp);
1025        kfree(mr_work);
1026}
1027
1028int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
1029{
1030        struct device *dev = &hr_dev->pdev->dev;
1031        struct hns_roce_mr_free_work *mr_work;
1032        struct hns_roce_free_mr *free_mr;
1033        struct hns_roce_v1_priv *priv;
1034        struct completion comp;
1035        unsigned long end =
1036                msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1037        unsigned long start = jiffies;
1038        int npages;
1039        int ret = 0;
1040
1041        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1042        free_mr = &priv->free_mr;
1043
1044        if (mr->enabled) {
1045                if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
1046                                       & (hr_dev->caps.num_mtpts - 1)))
1047                        dev_warn(dev, "HW2SW_MPT failed!\n");
1048        }
1049
1050        mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1051        if (!mr_work) {
1052                ret = -ENOMEM;
1053                goto free_mr;
1054        }
1055
1056        INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1057
1058        mr_work->ib_dev = &(hr_dev->ib_dev);
1059        mr_work->comp = &comp;
1060        mr_work->comp_flag = 1;
1061        mr_work->mr = (void *)mr;
1062        init_completion(mr_work->comp);
1063
1064        queue_work(free_mr->free_mr_wq, &(mr_work->work));
1065
1066        while (time_before_eq(jiffies, end)) {
1067                if (try_wait_for_completion(&comp))
1068                        goto free_mr;
1069                msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1070        }
1071
1072        mr_work->comp_flag = 0;
1073        if (try_wait_for_completion(&comp))
1074                goto free_mr;
1075
1076        dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1077        ret = -ETIMEDOUT;
1078
1079free_mr:
1080        dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1081                mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1082
1083        if (mr->size != ~0ULL) {
1084                npages = ib_umem_page_count(mr->umem);
1085                dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1086                                  mr->pbl_dma_addr);
1087        }
1088
1089        hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1090                             key_to_hw_index(mr->key), 0);
1091
1092        if (mr->umem)
1093                ib_umem_release(mr->umem);
1094
1095        kfree(mr);
1096
1097        return ret;
1098}
1099
1100static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1101{
1102        struct device *dev = &hr_dev->pdev->dev;
1103        struct hns_roce_v1_priv *priv;
1104        struct hns_roce_db_table *db;
1105
1106        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1107        db = &priv->db_table;
1108
1109        if (db->sdb_ext_mod) {
1110                dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1111                                  db->ext_db->sdb_buf_list->buf,
1112                                  db->ext_db->sdb_buf_list->map);
1113                kfree(db->ext_db->sdb_buf_list);
1114        }
1115
1116        if (db->odb_ext_mod) {
1117                dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1118                                  db->ext_db->odb_buf_list->buf,
1119                                  db->ext_db->odb_buf_list->map);
1120                kfree(db->ext_db->odb_buf_list);
1121        }
1122
1123        kfree(db->ext_db);
1124}
1125
1126static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1127{
1128        int ret;
1129        int raq_shift = 0;
1130        dma_addr_t addr;
1131        u32 val;
1132        struct hns_roce_v1_priv *priv;
1133        struct hns_roce_raq_table *raq;
1134        struct device *dev = &hr_dev->pdev->dev;
1135
1136        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1137        raq = &priv->raq_table;
1138
1139        raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1140        if (!raq->e_raq_buf)
1141                return -ENOMEM;
1142
1143        raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1144                                                 &addr, GFP_KERNEL);
1145        if (!raq->e_raq_buf->buf) {
1146                ret = -ENOMEM;
1147                goto err_dma_alloc_raq;
1148        }
1149        raq->e_raq_buf->map = addr;
1150
1151        /* Configure raq extended address. 48bit 4K align*/
1152        roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1153
1154        /* Configure raq_shift */
1155        raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1156        val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1157        roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1158                       ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1159        /*
1160         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1161         * using 4K page, and shift more 32 because of
1162         * caculating the high 32 bit value evaluated to hardware.
1163         */
1164        roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1165                       ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1166                       raq->e_raq_buf->map >> 44);
1167        roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1168        dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1169
1170        /* Configure raq threshold */
1171        val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1172        roce_set_field(val, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1173                       ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1174                       HNS_ROCE_V1_EXT_RAQ_WF);
1175        roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1176        dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1177
1178        /* Enable extend raq */
1179        val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1180        roce_set_field(val,
1181                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1182                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1183                       POL_TIME_INTERVAL_VAL);
1184        roce_set_bit(val, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1185        roce_set_field(val,
1186                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1187                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1188                       2);
1189        roce_set_bit(val,
1190                     ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1191        roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1192        dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1193
1194        /* Enable raq drop */
1195        val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1196        roce_set_bit(val, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1197        roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1198        dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1199
1200        return 0;
1201
1202err_dma_alloc_raq:
1203        kfree(raq->e_raq_buf);
1204        return ret;
1205}
1206
1207static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1208{
1209        struct device *dev = &hr_dev->pdev->dev;
1210        struct hns_roce_v1_priv *priv;
1211        struct hns_roce_raq_table *raq;
1212
1213        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1214        raq = &priv->raq_table;
1215
1216        dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1217                          raq->e_raq_buf->map);
1218        kfree(raq->e_raq_buf);
1219}
1220
1221static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1222{
1223        u32 val;
1224
1225        if (enable_flag) {
1226                val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1227                 /* Open all ports */
1228                roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1229                               ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1230                               ALL_PORT_VAL_OPEN);
1231                roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1232        } else {
1233                val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1234                /* Close all ports */
1235                roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1236                               ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1237                roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1238        }
1239}
1240
1241static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1242{
1243        struct device *dev = &hr_dev->pdev->dev;
1244        struct hns_roce_v1_priv *priv;
1245        int ret;
1246
1247        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1248
1249        priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1250                HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1251                GFP_KERNEL);
1252        if (!priv->bt_table.qpc_buf.buf)
1253                return -ENOMEM;
1254
1255        priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1256                HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1257                GFP_KERNEL);
1258        if (!priv->bt_table.mtpt_buf.buf) {
1259                ret = -ENOMEM;
1260                goto err_failed_alloc_mtpt_buf;
1261        }
1262
1263        priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1264                HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1265                GFP_KERNEL);
1266        if (!priv->bt_table.cqc_buf.buf) {
1267                ret = -ENOMEM;
1268                goto err_failed_alloc_cqc_buf;
1269        }
1270
1271        return 0;
1272
1273err_failed_alloc_cqc_buf:
1274        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1275                priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1276
1277err_failed_alloc_mtpt_buf:
1278        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1279                priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1280
1281        return ret;
1282}
1283
1284static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1285{
1286        struct device *dev = &hr_dev->pdev->dev;
1287        struct hns_roce_v1_priv *priv;
1288
1289        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1290
1291        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1292                priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1293
1294        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1295                priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1296
1297        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1298                priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1299}
1300
1301static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1302{
1303        struct device *dev = &hr_dev->pdev->dev;
1304        struct hns_roce_buf_list *tptr_buf;
1305        struct hns_roce_v1_priv *priv;
1306
1307        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1308        tptr_buf = &priv->tptr_table.tptr_buf;
1309
1310        /*
1311         * This buffer will be used for CQ's tptr(tail pointer), also
1312         * named ci(customer index). Every CQ will use 2 bytes to save
1313         * cqe ci in hip06. Hardware will read this area to get new ci
1314         * when the queue is almost full.
1315         */
1316        tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1317                                           &tptr_buf->map, GFP_KERNEL);
1318        if (!tptr_buf->buf)
1319                return -ENOMEM;
1320
1321        hr_dev->tptr_dma_addr = tptr_buf->map;
1322        hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1323
1324        return 0;
1325}
1326
1327static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1328{
1329        struct device *dev = &hr_dev->pdev->dev;
1330        struct hns_roce_buf_list *tptr_buf;
1331        struct hns_roce_v1_priv *priv;
1332
1333        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1334        tptr_buf = &priv->tptr_table.tptr_buf;
1335
1336        dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1337                          tptr_buf->buf, tptr_buf->map);
1338}
1339
1340static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1341{
1342        struct device *dev = &hr_dev->pdev->dev;
1343        struct hns_roce_free_mr *free_mr;
1344        struct hns_roce_v1_priv *priv;
1345        int ret = 0;
1346
1347        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1348        free_mr = &priv->free_mr;
1349
1350        free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1351        if (!free_mr->free_mr_wq) {
1352                dev_err(dev, "Create free mr workqueue failed!\n");
1353                return -ENOMEM;
1354        }
1355
1356        ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1357        if (ret) {
1358                dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1359                flush_workqueue(free_mr->free_mr_wq);
1360                destroy_workqueue(free_mr->free_mr_wq);
1361        }
1362
1363        return ret;
1364}
1365
1366static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1367{
1368        struct hns_roce_free_mr *free_mr;
1369        struct hns_roce_v1_priv *priv;
1370
1371        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1372        free_mr = &priv->free_mr;
1373
1374        flush_workqueue(free_mr->free_mr_wq);
1375        destroy_workqueue(free_mr->free_mr_wq);
1376
1377        hns_roce_v1_release_lp_qp(hr_dev);
1378}
1379
1380/**
1381 * hns_roce_v1_reset - reset RoCE
1382 * @hr_dev: RoCE device struct pointer
1383 * @enable: true -- drop reset, false -- reset
1384 * return 0 - success , negative --fail
1385 */
1386int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1387{
1388        struct device_node *dsaf_node;
1389        struct device *dev = &hr_dev->pdev->dev;
1390        struct device_node *np = dev->of_node;
1391        struct fwnode_handle *fwnode;
1392        int ret;
1393
1394        /* check if this is DT/ACPI case */
1395        if (dev_of_node(dev)) {
1396                dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1397                if (!dsaf_node) {
1398                        dev_err(dev, "could not find dsaf-handle\n");
1399                        return -EINVAL;
1400                }
1401                fwnode = &dsaf_node->fwnode;
1402        } else if (is_acpi_device_node(dev->fwnode)) {
1403                struct acpi_reference_args args;
1404
1405                ret = acpi_node_get_property_reference(dev->fwnode,
1406                                                       "dsaf-handle", 0, &args);
1407                if (ret) {
1408                        dev_err(dev, "could not find dsaf-handle\n");
1409                        return ret;
1410                }
1411                fwnode = acpi_fwnode_handle(args.adev);
1412        } else {
1413                dev_err(dev, "cannot read data from DT or ACPI\n");
1414                return -ENXIO;
1415        }
1416
1417        ret = hns_dsaf_roce_reset(fwnode, false);
1418        if (ret)
1419                return ret;
1420
1421        if (dereset) {
1422                msleep(SLEEP_TIME_INTERVAL);
1423                ret = hns_dsaf_roce_reset(fwnode, true);
1424        }
1425
1426        return ret;
1427}
1428
1429static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
1430{
1431        struct device *dev = &hr_dev->pdev->dev;
1432        struct hns_roce_v1_priv *priv;
1433        struct hns_roce_des_qp *des_qp;
1434
1435        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1436        des_qp = &priv->des_qp;
1437
1438        des_qp->requeue_flag = 1;
1439        des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
1440        if (!des_qp->qp_wq) {
1441                dev_err(dev, "Create destroy qp workqueue failed!\n");
1442                return -ENOMEM;
1443        }
1444
1445        return 0;
1446}
1447
1448static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
1449{
1450        struct hns_roce_v1_priv *priv;
1451        struct hns_roce_des_qp *des_qp;
1452
1453        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1454        des_qp = &priv->des_qp;
1455
1456        des_qp->requeue_flag = 0;
1457        flush_workqueue(des_qp->qp_wq);
1458        destroy_workqueue(des_qp->qp_wq);
1459}
1460
1461void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1462{
1463        int i = 0;
1464        struct hns_roce_caps *caps = &hr_dev->caps;
1465
1466        hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
1467        hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
1468                                             ROCEE_VENDOR_PART_ID_REG));
1469        hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
1470                                             ROCEE_SYS_IMAGE_GUID_L_REG)) |
1471                                ((u64)le32_to_cpu(roce_read(hr_dev,
1472                                            ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
1473        hr_dev->hw_rev          = HNS_ROCE_HW_VER1;
1474
1475        caps->num_qps           = HNS_ROCE_V1_MAX_QP_NUM;
1476        caps->max_wqes          = HNS_ROCE_V1_MAX_WQE_NUM;
1477        caps->num_cqs           = HNS_ROCE_V1_MAX_CQ_NUM;
1478        caps->max_cqes          = HNS_ROCE_V1_MAX_CQE_NUM;
1479        caps->max_sq_sg         = HNS_ROCE_V1_SG_NUM;
1480        caps->max_rq_sg         = HNS_ROCE_V1_SG_NUM;
1481        caps->max_sq_inline     = HNS_ROCE_V1_INLINE_SIZE;
1482        caps->num_uars          = HNS_ROCE_V1_UAR_NUM;
1483        caps->phy_num_uars      = HNS_ROCE_V1_PHY_UAR_NUM;
1484        caps->num_aeq_vectors   = HNS_ROCE_AEQE_VEC_NUM;
1485        caps->num_comp_vectors  = HNS_ROCE_COMP_VEC_NUM;
1486        caps->num_other_vectors = HNS_ROCE_AEQE_OF_VEC_NUM;
1487        caps->num_mtpts         = HNS_ROCE_V1_MAX_MTPT_NUM;
1488        caps->num_mtt_segs      = HNS_ROCE_V1_MAX_MTT_SEGS;
1489        caps->num_pds           = HNS_ROCE_V1_MAX_PD_NUM;
1490        caps->max_qp_init_rdma  = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1491        caps->max_qp_dest_rdma  = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1492        caps->max_sq_desc_sz    = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1493        caps->max_rq_desc_sz    = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1494        caps->qpc_entry_sz      = HNS_ROCE_V1_QPC_ENTRY_SIZE;
1495        caps->irrl_entry_sz     = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1496        caps->cqc_entry_sz      = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1497        caps->mtpt_entry_sz     = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1498        caps->mtt_entry_sz      = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1499        caps->cq_entry_sz       = HNS_ROCE_V1_CQE_ENTRY_SIZE;
1500        caps->page_size_cap     = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1501        caps->reserved_lkey     = 0;
1502        caps->reserved_pds      = 0;
1503        caps->reserved_mrws     = 1;
1504        caps->reserved_uars     = 0;
1505        caps->reserved_cqs      = 0;
1506
1507        for (i = 0; i < caps->num_ports; i++)
1508                caps->pkey_table_len[i] = 1;
1509
1510        for (i = 0; i < caps->num_ports; i++) {
1511                /* Six ports shared 16 GID in v1 engine */
1512                if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1513                        caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1514                                                 caps->num_ports;
1515                else
1516                        caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1517                                                 caps->num_ports + 1;
1518        }
1519
1520        for (i = 0; i < caps->num_comp_vectors; i++)
1521                caps->ceqe_depth[i] = HNS_ROCE_V1_NUM_COMP_EQE;
1522
1523        caps->aeqe_depth = HNS_ROCE_V1_NUM_ASYNC_EQE;
1524        caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
1525                                                         ROCEE_ACK_DELAY_REG));
1526        caps->max_mtu = IB_MTU_2048;
1527}
1528
1529int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1530{
1531        int ret;
1532        u32 val;
1533        struct device *dev = &hr_dev->pdev->dev;
1534
1535        /* DMAE user config */
1536        val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1537        roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1538                       ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1539        roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1540                       ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1541                       1 << PAGES_SHIFT_16);
1542        roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1543
1544        val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1545        roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1546                       ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1547        roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1548                       ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1549                       1 << PAGES_SHIFT_16);
1550
1551        ret = hns_roce_db_init(hr_dev);
1552        if (ret) {
1553                dev_err(dev, "doorbell init failed!\n");
1554                return ret;
1555        }
1556
1557        ret = hns_roce_raq_init(hr_dev);
1558        if (ret) {
1559                dev_err(dev, "raq init failed!\n");
1560                goto error_failed_raq_init;
1561        }
1562
1563        ret = hns_roce_bt_init(hr_dev);
1564        if (ret) {
1565                dev_err(dev, "bt init failed!\n");
1566                goto error_failed_bt_init;
1567        }
1568
1569        ret = hns_roce_tptr_init(hr_dev);
1570        if (ret) {
1571                dev_err(dev, "tptr init failed!\n");
1572                goto error_failed_tptr_init;
1573        }
1574
1575        ret = hns_roce_des_qp_init(hr_dev);
1576        if (ret) {
1577                dev_err(dev, "des qp init failed!\n");
1578                goto error_failed_des_qp_init;
1579        }
1580
1581        ret = hns_roce_free_mr_init(hr_dev);
1582        if (ret) {
1583                dev_err(dev, "free mr init failed!\n");
1584                goto error_failed_free_mr_init;
1585        }
1586
1587        hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1588
1589        return 0;
1590
1591error_failed_free_mr_init:
1592        hns_roce_des_qp_free(hr_dev);
1593
1594error_failed_des_qp_init:
1595        hns_roce_tptr_free(hr_dev);
1596
1597error_failed_tptr_init:
1598        hns_roce_bt_free(hr_dev);
1599
1600error_failed_bt_init:
1601        hns_roce_raq_free(hr_dev);
1602
1603error_failed_raq_init:
1604        hns_roce_db_free(hr_dev);
1605        return ret;
1606}
1607
1608void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1609{
1610        hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1611        hns_roce_free_mr_free(hr_dev);
1612        hns_roce_des_qp_free(hr_dev);
1613        hns_roce_tptr_free(hr_dev);
1614        hns_roce_bt_free(hr_dev);
1615        hns_roce_raq_free(hr_dev);
1616        hns_roce_db_free(hr_dev);
1617}
1618
1619void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
1620                         union ib_gid *gid)
1621{
1622        u32 *p = NULL;
1623        u8 gid_idx = 0;
1624
1625        gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1626
1627        p = (u32 *)&gid->raw[0];
1628        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1629                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1630
1631        p = (u32 *)&gid->raw[4];
1632        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1633                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1634
1635        p = (u32 *)&gid->raw[8];
1636        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1637                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1638
1639        p = (u32 *)&gid->raw[0xc];
1640        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1641                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1642}
1643
1644void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
1645{
1646        u32 reg_smac_l;
1647        u16 reg_smac_h;
1648        u16 *p_h;
1649        u32 *p;
1650        u32 val;
1651
1652        /*
1653         * When mac changed, loopback may fail
1654         * because of smac not equal to dmac.
1655         * We Need to release and create reserved qp again.
1656         */
1657        if (hr_dev->hw->dereg_mr && hns_roce_v1_recreate_lp_qp(hr_dev))
1658                dev_warn(&hr_dev->pdev->dev, "recreate lp qp timeout!\n");
1659
1660        p = (u32 *)(&addr[0]);
1661        reg_smac_l = *p;
1662        roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1663                       PHY_PORT_OFFSET * phy_port);
1664
1665        val = roce_read(hr_dev,
1666                        ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1667        p_h = (u16 *)(&addr[4]);
1668        reg_smac_h  = *p_h;
1669        roce_set_field(val, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1670                       ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1671        roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1672                   val);
1673}
1674
1675void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1676                         enum ib_mtu mtu)
1677{
1678        u32 val;
1679
1680        val = roce_read(hr_dev,
1681                        ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1682        roce_set_field(val, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1683                       ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1684        roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1685                   val);
1686}
1687
1688int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1689                           unsigned long mtpt_idx)
1690{
1691        struct hns_roce_v1_mpt_entry *mpt_entry;
1692        struct scatterlist *sg;
1693        u64 *pages;
1694        int entry;
1695        int i;
1696
1697        /* MPT filled into mailbox buf */
1698        mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1699        memset(mpt_entry, 0, sizeof(*mpt_entry));
1700
1701        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1702                       MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1703        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1704                       MPT_BYTE_4_KEY_S, mr->key);
1705        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1706                       MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1707        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1708        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1709                     (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1710        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1711        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1712                       MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1713        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1714        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1715                     (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1716        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1717                     (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1718        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1719                     (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1720        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1721                     0);
1722        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1723
1724        roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1725                       MPT_BYTE_12_PBL_ADDR_H_S, 0);
1726        roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1727                       MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1728
1729        mpt_entry->virt_addr_l = (u32)mr->iova;
1730        mpt_entry->virt_addr_h = (u32)(mr->iova >> 32);
1731        mpt_entry->length = (u32)mr->size;
1732
1733        roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1734                       MPT_BYTE_28_PD_S, mr->pd);
1735        roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1736                       MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1737        roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1738                       MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1739
1740        /* DMA memory register */
1741        if (mr->type == MR_TYPE_DMA)
1742                return 0;
1743
1744        pages = (u64 *) __get_free_page(GFP_KERNEL);
1745        if (!pages)
1746                return -ENOMEM;
1747
1748        i = 0;
1749        for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1750                pages[i] = ((u64)sg_dma_address(sg)) >> 12;
1751
1752                /* Directly record to MTPT table firstly 7 entry */
1753                if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
1754                        break;
1755                i++;
1756        }
1757
1758        /* Register user mr */
1759        for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
1760                switch (i) {
1761                case 0:
1762                        mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1763                        roce_set_field(mpt_entry->mpt_byte_36,
1764                                MPT_BYTE_36_PA0_H_M,
1765                                MPT_BYTE_36_PA0_H_S,
1766                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
1767                        break;
1768                case 1:
1769                        roce_set_field(mpt_entry->mpt_byte_36,
1770                                       MPT_BYTE_36_PA1_L_M,
1771                                       MPT_BYTE_36_PA1_L_S,
1772                                       cpu_to_le32((u32)(pages[i])));
1773                        roce_set_field(mpt_entry->mpt_byte_40,
1774                                MPT_BYTE_40_PA1_H_M,
1775                                MPT_BYTE_40_PA1_H_S,
1776                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
1777                        break;
1778                case 2:
1779                        roce_set_field(mpt_entry->mpt_byte_40,
1780                                       MPT_BYTE_40_PA2_L_M,
1781                                       MPT_BYTE_40_PA2_L_S,
1782                                       cpu_to_le32((u32)(pages[i])));
1783                        roce_set_field(mpt_entry->mpt_byte_44,
1784                                MPT_BYTE_44_PA2_H_M,
1785                                MPT_BYTE_44_PA2_H_S,
1786                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
1787                        break;
1788                case 3:
1789                        roce_set_field(mpt_entry->mpt_byte_44,
1790                                       MPT_BYTE_44_PA3_L_M,
1791                                       MPT_BYTE_44_PA3_L_S,
1792                                       cpu_to_le32((u32)(pages[i])));
1793                        roce_set_field(mpt_entry->mpt_byte_48,
1794                                MPT_BYTE_48_PA3_H_M,
1795                                MPT_BYTE_48_PA3_H_S,
1796                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_8)));
1797                        break;
1798                case 4:
1799                        mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1800                        roce_set_field(mpt_entry->mpt_byte_56,
1801                                MPT_BYTE_56_PA4_H_M,
1802                                MPT_BYTE_56_PA4_H_S,
1803                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
1804                        break;
1805                case 5:
1806                        roce_set_field(mpt_entry->mpt_byte_56,
1807                                       MPT_BYTE_56_PA5_L_M,
1808                                       MPT_BYTE_56_PA5_L_S,
1809                                       cpu_to_le32((u32)(pages[i])));
1810                        roce_set_field(mpt_entry->mpt_byte_60,
1811                                MPT_BYTE_60_PA5_H_M,
1812                                MPT_BYTE_60_PA5_H_S,
1813                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
1814                        break;
1815                case 6:
1816                        roce_set_field(mpt_entry->mpt_byte_60,
1817                                       MPT_BYTE_60_PA6_L_M,
1818                                       MPT_BYTE_60_PA6_L_S,
1819                                       cpu_to_le32((u32)(pages[i])));
1820                        roce_set_field(mpt_entry->mpt_byte_64,
1821                                MPT_BYTE_64_PA6_H_M,
1822                                MPT_BYTE_64_PA6_H_S,
1823                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
1824                        break;
1825                default:
1826                        break;
1827                }
1828        }
1829
1830        free_page((unsigned long) pages);
1831
1832        mpt_entry->pbl_addr_l = (u32)(mr->pbl_dma_addr);
1833
1834        roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1835                       MPT_BYTE_12_PBL_ADDR_H_S,
1836                       ((u32)(mr->pbl_dma_addr >> 32)));
1837
1838        return 0;
1839}
1840
1841static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1842{
1843        return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1844                                   n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
1845}
1846
1847static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1848{
1849        struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1850
1851        /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1852        return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1853                !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
1854}
1855
1856static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1857{
1858        return get_sw_cqe(hr_cq, hr_cq->cons_index);
1859}
1860
1861void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1862{
1863        u32 doorbell[2];
1864
1865        doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
1866        doorbell[1] = 0;
1867        roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
1868        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
1869                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
1870        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
1871                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
1872        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
1873                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
1874
1875        hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
1876}
1877
1878static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1879                                   struct hns_roce_srq *srq)
1880{
1881        struct hns_roce_cqe *cqe, *dest;
1882        u32 prod_index;
1883        int nfreed = 0;
1884        u8 owner_bit;
1885
1886        for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
1887             ++prod_index) {
1888                if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
1889                        break;
1890        }
1891
1892        /*
1893         * Now backwards through the CQ, removing CQ entries
1894         * that match our QP by overwriting them with next entries.
1895         */
1896        while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
1897                cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
1898                if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
1899                                     CQE_BYTE_16_LOCAL_QPN_S) &
1900                                     HNS_ROCE_CQE_QPN_MASK) == qpn) {
1901                        /* In v1 engine, not support SRQ */
1902                        ++nfreed;
1903                } else if (nfreed) {
1904                        dest = get_cqe(hr_cq, (prod_index + nfreed) &
1905                                       hr_cq->ib_cq.cqe);
1906                        owner_bit = roce_get_bit(dest->cqe_byte_4,
1907                                                 CQE_BYTE_4_OWNER_S);
1908                        memcpy(dest, cqe, sizeof(*cqe));
1909                        roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
1910                                     owner_bit);
1911                }
1912        }
1913
1914        if (nfreed) {
1915                hr_cq->cons_index += nfreed;
1916                /*
1917                 * Make sure update of buffer contents is done before
1918                 * updating consumer index.
1919                 */
1920                wmb();
1921
1922                hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
1923        }
1924}
1925
1926static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1927                                 struct hns_roce_srq *srq)
1928{
1929        spin_lock_irq(&hr_cq->lock);
1930        __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
1931        spin_unlock_irq(&hr_cq->lock);
1932}
1933
1934void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
1935                           struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
1936                           dma_addr_t dma_handle, int nent, u32 vector)
1937{
1938        struct hns_roce_cq_context *cq_context = NULL;
1939        struct hns_roce_buf_list *tptr_buf;
1940        struct hns_roce_v1_priv *priv;
1941        dma_addr_t tptr_dma_addr;
1942        int offset;
1943
1944        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1945        tptr_buf = &priv->tptr_table.tptr_buf;
1946
1947        cq_context = mb_buf;
1948        memset(cq_context, 0, sizeof(*cq_context));
1949
1950        /* Get the tptr for this CQ. */
1951        offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
1952        tptr_dma_addr = tptr_buf->map + offset;
1953        hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
1954
1955        /* Register cq_context members */
1956        roce_set_field(cq_context->cqc_byte_4,
1957                       CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
1958                       CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
1959        roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
1960                       CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
1961        cq_context->cqc_byte_4 = cpu_to_le32(cq_context->cqc_byte_4);
1962
1963        cq_context->cq_bt_l = (u32)dma_handle;
1964        cq_context->cq_bt_l = cpu_to_le32(cq_context->cq_bt_l);
1965
1966        roce_set_field(cq_context->cqc_byte_12,
1967                       CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
1968                       CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
1969                       ((u64)dma_handle >> 32));
1970        roce_set_field(cq_context->cqc_byte_12,
1971                       CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
1972                       CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
1973                       ilog2((unsigned int)nent));
1974        roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
1975                       CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
1976        cq_context->cqc_byte_12 = cpu_to_le32(cq_context->cqc_byte_12);
1977
1978        cq_context->cur_cqe_ba0_l = (u32)(mtts[0]);
1979        cq_context->cur_cqe_ba0_l = cpu_to_le32(cq_context->cur_cqe_ba0_l);
1980
1981        roce_set_field(cq_context->cqc_byte_20,
1982                       CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
1983                       CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S,
1984                       cpu_to_le32((mtts[0]) >> 32));
1985        /* Dedicated hardware, directly set 0 */
1986        roce_set_field(cq_context->cqc_byte_20,
1987                       CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
1988                       CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
1989        /**
1990         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1991         * using 4K page, and shift more 32 because of
1992         * caculating the high 32 bit value evaluated to hardware.
1993         */
1994        roce_set_field(cq_context->cqc_byte_20,
1995                       CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
1996                       CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
1997                       tptr_dma_addr >> 44);
1998        cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
1999
2000        cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12);
2001
2002        roce_set_field(cq_context->cqc_byte_32,
2003                       CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2004                       CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2005        roce_set_bit(cq_context->cqc_byte_32,
2006                     CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2007        roce_set_bit(cq_context->cqc_byte_32,
2008                     CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2009        roce_set_bit(cq_context->cqc_byte_32,
2010                     CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2011        roce_set_bit(cq_context->cqc_byte_32,
2012                     CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2013                     0);
2014        /* The initial value of cq's ci is 0 */
2015        roce_set_field(cq_context->cqc_byte_32,
2016                       CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2017                       CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2018        cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
2019}
2020
2021int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
2022{
2023        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2024        u32 notification_flag;
2025        u32 doorbell[2];
2026
2027        notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2028                            IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2029        /*
2030         * flags = 0; Notification Flag = 1, next
2031         * flags = 1; Notification Flag = 0, solocited
2032         */
2033        doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
2034        roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2035        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2036                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2037        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2038                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2039        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2040                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2041                       hr_cq->cqn | notification_flag);
2042
2043        hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2044
2045        return 0;
2046}
2047
2048static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2049                                struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2050{
2051        int qpn;
2052        int is_send;
2053        u16 wqe_ctr;
2054        u32 status;
2055        u32 opcode;
2056        struct hns_roce_cqe *cqe;
2057        struct hns_roce_qp *hr_qp;
2058        struct hns_roce_wq *wq;
2059        struct hns_roce_wqe_ctrl_seg *sq_wqe;
2060        struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2061        struct device *dev = &hr_dev->pdev->dev;
2062
2063        /* Find cqe according consumer index */
2064        cqe = next_cqe_sw(hr_cq);
2065        if (!cqe)
2066                return -EAGAIN;
2067
2068        ++hr_cq->cons_index;
2069        /* Memory barrier */
2070        rmb();
2071        /* 0->SQ, 1->RQ */
2072        is_send  = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2073
2074        /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2075        if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2076                           CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2077                qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2078                                     CQE_BYTE_20_PORT_NUM_S) +
2079                      roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2080                                     CQE_BYTE_16_LOCAL_QPN_S) *
2081                                     HNS_ROCE_MAX_PORTS;
2082        } else {
2083                qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2084                                     CQE_BYTE_16_LOCAL_QPN_S);
2085        }
2086
2087        if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2088                hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2089                if (unlikely(!hr_qp)) {
2090                        dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2091                                hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2092                        return -EINVAL;
2093                }
2094
2095                *cur_qp = hr_qp;
2096        }
2097
2098        wc->qp = &(*cur_qp)->ibqp;
2099        wc->vendor_err = 0;
2100
2101        status = roce_get_field(cqe->cqe_byte_4,
2102                                CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2103                                CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2104                                HNS_ROCE_CQE_STATUS_MASK;
2105        switch (status) {
2106        case HNS_ROCE_CQE_SUCCESS:
2107                wc->status = IB_WC_SUCCESS;
2108                break;
2109        case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2110                wc->status = IB_WC_LOC_LEN_ERR;
2111                break;
2112        case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2113                wc->status = IB_WC_LOC_QP_OP_ERR;
2114                break;
2115        case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2116                wc->status = IB_WC_LOC_PROT_ERR;
2117                break;
2118        case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2119                wc->status = IB_WC_WR_FLUSH_ERR;
2120                break;
2121        case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2122                wc->status = IB_WC_MW_BIND_ERR;
2123                break;
2124        case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2125                wc->status = IB_WC_BAD_RESP_ERR;
2126                break;
2127        case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2128                wc->status = IB_WC_LOC_ACCESS_ERR;
2129                break;
2130        case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2131                wc->status = IB_WC_REM_INV_REQ_ERR;
2132                break;
2133        case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2134                wc->status = IB_WC_REM_ACCESS_ERR;
2135                break;
2136        case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2137                wc->status = IB_WC_REM_OP_ERR;
2138                break;
2139        case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2140                wc->status = IB_WC_RETRY_EXC_ERR;
2141                break;
2142        case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2143                wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2144                break;
2145        default:
2146                wc->status = IB_WC_GENERAL_ERR;
2147                break;
2148        }
2149
2150        /* CQE status error, directly return */
2151        if (wc->status != IB_WC_SUCCESS)
2152                return 0;
2153
2154        if (is_send) {
2155                /* SQ conrespond to CQE */
2156                sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
2157                                                CQE_BYTE_4_WQE_INDEX_M,
2158                                                CQE_BYTE_4_WQE_INDEX_S)&
2159                                                ((*cur_qp)->sq.wqe_cnt-1));
2160                switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
2161                case HNS_ROCE_WQE_OPCODE_SEND:
2162                        wc->opcode = IB_WC_SEND;
2163                        break;
2164                case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2165                        wc->opcode = IB_WC_RDMA_READ;
2166                        wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2167                        break;
2168                case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2169                        wc->opcode = IB_WC_RDMA_WRITE;
2170                        break;
2171                case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2172                        wc->opcode = IB_WC_LOCAL_INV;
2173                        break;
2174                case HNS_ROCE_WQE_OPCODE_UD_SEND:
2175                        wc->opcode = IB_WC_SEND;
2176                        break;
2177                default:
2178                        wc->status = IB_WC_GENERAL_ERR;
2179                        break;
2180                }
2181                wc->wc_flags = (sq_wqe->flag & HNS_ROCE_WQE_IMM ?
2182                                IB_WC_WITH_IMM : 0);
2183
2184                wq = &(*cur_qp)->sq;
2185                if ((*cur_qp)->sq_signal_bits) {
2186                        /*
2187                         * If sg_signal_bit is 1,
2188                         * firstly tail pointer updated to wqe
2189                         * which current cqe correspond to
2190                         */
2191                        wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2192                                                      CQE_BYTE_4_WQE_INDEX_M,
2193                                                      CQE_BYTE_4_WQE_INDEX_S);
2194                        wq->tail += (wqe_ctr - (u16)wq->tail) &
2195                                    (wq->wqe_cnt - 1);
2196                }
2197                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2198                ++wq->tail;
2199        } else {
2200                /* RQ conrespond to CQE */
2201                wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2202                opcode = roce_get_field(cqe->cqe_byte_4,
2203                                        CQE_BYTE_4_OPERATION_TYPE_M,
2204                                        CQE_BYTE_4_OPERATION_TYPE_S) &
2205                                        HNS_ROCE_CQE_OPCODE_MASK;
2206                switch (opcode) {
2207                case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2208                        wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2209                        wc->wc_flags = IB_WC_WITH_IMM;
2210                        wc->ex.imm_data = le32_to_cpu(cqe->immediate_data);
2211                        break;
2212                case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2213                        if (roce_get_bit(cqe->cqe_byte_4,
2214                                         CQE_BYTE_4_IMM_INDICATOR_S)) {
2215                                wc->opcode = IB_WC_RECV;
2216                                wc->wc_flags = IB_WC_WITH_IMM;
2217                                wc->ex.imm_data = le32_to_cpu(
2218                                                  cqe->immediate_data);
2219                        } else {
2220                                wc->opcode = IB_WC_RECV;
2221                                wc->wc_flags = 0;
2222                        }
2223                        break;
2224                default:
2225                        wc->status = IB_WC_GENERAL_ERR;
2226                        break;
2227                }
2228
2229                /* Update tail pointer, record wr_id */
2230                wq = &(*cur_qp)->rq;
2231                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2232                ++wq->tail;
2233                wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2234                                            CQE_BYTE_20_SL_S);
2235                wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2236                                                CQE_BYTE_20_REMOTE_QPN_M,
2237                                                CQE_BYTE_20_REMOTE_QPN_S);
2238                wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2239                                              CQE_BYTE_20_GRH_PRESENT_S) ?
2240                                              IB_WC_GRH : 0);
2241                wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2242                                                     CQE_BYTE_28_P_KEY_IDX_M,
2243                                                     CQE_BYTE_28_P_KEY_IDX_S);
2244        }
2245
2246        return 0;
2247}
2248
2249int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2250{
2251        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2252        struct hns_roce_qp *cur_qp = NULL;
2253        unsigned long flags;
2254        int npolled;
2255        int ret = 0;
2256
2257        spin_lock_irqsave(&hr_cq->lock, flags);
2258
2259        for (npolled = 0; npolled < num_entries; ++npolled) {
2260                ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2261                if (ret)
2262                        break;
2263        }
2264
2265        if (npolled) {
2266                *hr_cq->tptr_addr = hr_cq->cons_index &
2267                        ((hr_cq->cq_depth << 1) - 1);
2268
2269                /* Memroy barrier */
2270                wmb();
2271                hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2272        }
2273
2274        spin_unlock_irqrestore(&hr_cq->lock, flags);
2275
2276        if (ret == 0 || ret == -EAGAIN)
2277                return npolled;
2278        else
2279                return ret;
2280}
2281
2282int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2283                struct hns_roce_hem_table *table, int obj)
2284{
2285        struct device *dev = &hr_dev->pdev->dev;
2286        struct hns_roce_v1_priv *priv;
2287        unsigned long end = 0, flags = 0;
2288        uint32_t bt_cmd_val[2] = {0};
2289        void __iomem *bt_cmd;
2290        u64 bt_ba = 0;
2291
2292        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
2293
2294        switch (table->type) {
2295        case HEM_TYPE_QPC:
2296                roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2297                        ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
2298                bt_ba = priv->bt_table.qpc_buf.map >> 12;
2299                break;
2300        case HEM_TYPE_MTPT:
2301                roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2302                        ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
2303                bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2304                break;
2305        case HEM_TYPE_CQC:
2306                roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2307                        ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
2308                bt_ba = priv->bt_table.cqc_buf.map >> 12;
2309                break;
2310        case HEM_TYPE_SRQC:
2311                dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2312                return -EINVAL;
2313        default:
2314                return 0;
2315        }
2316        roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2317                ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2318        roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2319        roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2320
2321        spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2322
2323        bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2324
2325        end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
2326        while (1) {
2327                if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2328                        if (!(time_before(jiffies, end))) {
2329                                dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2330                                spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2331                                        flags);
2332                                return -EBUSY;
2333                        }
2334                } else {
2335                        break;
2336                }
2337                msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
2338        }
2339
2340        bt_cmd_val[0] = (uint32_t)bt_ba;
2341        roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2342                ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2343        hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2344
2345        spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2346
2347        return 0;
2348}
2349
2350static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2351                                 struct hns_roce_mtt *mtt,
2352                                 enum hns_roce_qp_state cur_state,
2353                                 enum hns_roce_qp_state new_state,
2354                                 struct hns_roce_qp_context *context,
2355                                 struct hns_roce_qp *hr_qp)
2356{
2357        static const u16
2358        op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2359                [HNS_ROCE_QP_STATE_RST] = {
2360                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2361                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2362                [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2363                },
2364                [HNS_ROCE_QP_STATE_INIT] = {
2365                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2366                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2367                /* Note: In v1 engine, HW doesn't support RST2INIT.
2368                 * We use RST2INIT cmd instead of INIT2INIT.
2369                 */
2370                [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2371                [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2372                },
2373                [HNS_ROCE_QP_STATE_RTR] = {
2374                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2375                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2376                [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2377                },
2378                [HNS_ROCE_QP_STATE_RTS] = {
2379                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2380                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2381                [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2382                [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2383                },
2384                [HNS_ROCE_QP_STATE_SQD] = {
2385                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2386                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2387                [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2388                [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2389                },
2390                [HNS_ROCE_QP_STATE_ERR] = {
2391                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2392                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2393                }
2394        };
2395
2396        struct hns_roce_cmd_mailbox *mailbox;
2397        struct device *dev = &hr_dev->pdev->dev;
2398        int ret = 0;
2399
2400        if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2401            new_state >= HNS_ROCE_QP_NUM_STATE ||
2402            !op[cur_state][new_state]) {
2403                dev_err(dev, "[modify_qp]not support state %d to %d\n",
2404                        cur_state, new_state);
2405                return -EINVAL;
2406        }
2407
2408        if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2409                return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2410                                         HNS_ROCE_CMD_2RST_QP,
2411                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
2412
2413        if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2414                return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2415                                         HNS_ROCE_CMD_2ERR_QP,
2416                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
2417
2418        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2419        if (IS_ERR(mailbox))
2420                return PTR_ERR(mailbox);
2421
2422        memcpy(mailbox->buf, context, sizeof(*context));
2423
2424        ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2425                                op[cur_state][new_state],
2426                                HNS_ROCE_CMD_TIMEOUT_MSECS);
2427
2428        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2429        return ret;
2430}
2431
2432static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2433                             int attr_mask, enum ib_qp_state cur_state,
2434                             enum ib_qp_state new_state)
2435{
2436        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2437        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2438        struct hns_roce_sqp_context *context;
2439        struct device *dev = &hr_dev->pdev->dev;
2440        dma_addr_t dma_handle = 0;
2441        int rq_pa_start;
2442        u32 reg_val;
2443        u64 *mtts;
2444        u32 *addr;
2445
2446        context = kzalloc(sizeof(*context), GFP_KERNEL);
2447        if (!context)
2448                return -ENOMEM;
2449
2450        /* Search QP buf's MTTs */
2451        mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
2452                                   hr_qp->mtt.first_seg, &dma_handle);
2453        if (!mtts) {
2454                dev_err(dev, "qp buf pa find failed\n");
2455                goto out;
2456        }
2457
2458        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2459                roce_set_field(context->qp1c_bytes_4,
2460                               QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2461                               QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2462                               ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2463                roce_set_field(context->qp1c_bytes_4,
2464                               QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2465                               QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2466                               ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2467                roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2468                               QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2469
2470                context->sq_rq_bt_l = (u32)(dma_handle);
2471                roce_set_field(context->qp1c_bytes_12,
2472                               QP1C_BYTES_12_SQ_RQ_BT_H_M,
2473                               QP1C_BYTES_12_SQ_RQ_BT_H_S,
2474                               ((u32)(dma_handle >> 32)));
2475
2476                roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2477                               QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2478                roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2479                               QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2480                roce_set_bit(context->qp1c_bytes_16,
2481                             QP1C_BYTES_16_SIGNALING_TYPE_S,
2482                             hr_qp->sq_signal_bits);
2483                roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2484                             1);
2485                roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2486                             1);
2487                roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2488                             0);
2489
2490                roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2491                               QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2492                roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2493                               QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2494
2495                rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2496                context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
2497
2498                roce_set_field(context->qp1c_bytes_28,
2499                               QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2500                               QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2501                               (mtts[rq_pa_start]) >> 32);
2502                roce_set_field(context->qp1c_bytes_28,
2503                               QP1C_BYTES_28_RQ_CUR_IDX_M,
2504                               QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2505
2506                roce_set_field(context->qp1c_bytes_32,
2507                               QP1C_BYTES_32_RX_CQ_NUM_M,
2508                               QP1C_BYTES_32_RX_CQ_NUM_S,
2509                               to_hr_cq(ibqp->recv_cq)->cqn);
2510                roce_set_field(context->qp1c_bytes_32,
2511                               QP1C_BYTES_32_TX_CQ_NUM_M,
2512                               QP1C_BYTES_32_TX_CQ_NUM_S,
2513                               to_hr_cq(ibqp->send_cq)->cqn);
2514
2515                context->cur_sq_wqe_ba_l  = (u32)mtts[0];
2516
2517                roce_set_field(context->qp1c_bytes_40,
2518                               QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2519                               QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2520                               (mtts[0]) >> 32);
2521                roce_set_field(context->qp1c_bytes_40,
2522                               QP1C_BYTES_40_SQ_CUR_IDX_M,
2523                               QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2524
2525                /* Copy context to QP1C register */
2526                addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
2527                        hr_qp->phy_port * sizeof(*context));
2528
2529                writel(context->qp1c_bytes_4, addr);
2530                writel(context->sq_rq_bt_l, addr + 1);
2531                writel(context->qp1c_bytes_12, addr + 2);
2532                writel(context->qp1c_bytes_16, addr + 3);
2533                writel(context->qp1c_bytes_20, addr + 4);
2534                writel(context->cur_rq_wqe_ba_l, addr + 5);
2535                writel(context->qp1c_bytes_28, addr + 6);
2536                writel(context->qp1c_bytes_32, addr + 7);
2537                writel(context->cur_sq_wqe_ba_l, addr + 8);
2538                writel(context->qp1c_bytes_40, addr + 9);
2539        }
2540
2541        /* Modify QP1C status */
2542        reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2543                            hr_qp->phy_port * sizeof(*context));
2544        roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2545                       ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2546        roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2547                    hr_qp->phy_port * sizeof(*context), reg_val);
2548
2549        hr_qp->state = new_state;
2550        if (new_state == IB_QPS_RESET) {
2551                hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2552                                     ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2553                if (ibqp->send_cq != ibqp->recv_cq)
2554                        hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2555                                             hr_qp->qpn, NULL);
2556
2557                hr_qp->rq.head = 0;
2558                hr_qp->rq.tail = 0;
2559                hr_qp->sq.head = 0;
2560                hr_qp->sq.tail = 0;
2561                hr_qp->sq_next_wqe = 0;
2562        }
2563
2564        kfree(context);
2565        return 0;
2566
2567out:
2568        kfree(context);
2569        return -EINVAL;
2570}
2571
2572static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2573                            int attr_mask, enum ib_qp_state cur_state,
2574                            enum ib_qp_state new_state)
2575{
2576        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2577        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2578        struct device *dev = &hr_dev->pdev->dev;
2579        struct hns_roce_qp_context *context;
2580        const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2581        dma_addr_t dma_handle_2 = 0;
2582        dma_addr_t dma_handle = 0;
2583        uint32_t doorbell[2] = {0};
2584        int rq_pa_start = 0;
2585        u64 *mtts_2 = NULL;
2586        int ret = -EINVAL;
2587        u64 *mtts = NULL;
2588        int port;
2589        u8 port_num;
2590        u8 *dmac;
2591        u8 *smac;
2592
2593        context = kzalloc(sizeof(*context), GFP_KERNEL);
2594        if (!context)
2595                return -ENOMEM;
2596
2597        /* Search qp buf's mtts */
2598        mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
2599                                   hr_qp->mtt.first_seg, &dma_handle);
2600        if (mtts == NULL) {
2601                dev_err(dev, "qp buf pa find failed\n");
2602                goto out;
2603        }
2604
2605        /* Search IRRL's mtts */
2606        mtts_2 = hns_roce_table_find(&hr_dev->qp_table.irrl_table, hr_qp->qpn,
2607                                     &dma_handle_2);
2608        if (mtts_2 == NULL) {
2609                dev_err(dev, "qp irrl_table find failed\n");
2610                goto out;
2611        }
2612
2613        /*
2614         * Reset to init
2615         *      Mandatory param:
2616         *      IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2617         *      Optional param: NA
2618         */
2619        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2620                roce_set_field(context->qpc_bytes_4,
2621                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2622                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2623                               to_hr_qp_type(hr_qp->ibqp.qp_type));
2624
2625                roce_set_bit(context->qpc_bytes_4,
2626                             QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2627                roce_set_bit(context->qpc_bytes_4,
2628                             QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2629                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2630                roce_set_bit(context->qpc_bytes_4,
2631                             QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2632                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2633                             );
2634                roce_set_bit(context->qpc_bytes_4,
2635                             QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2636                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2637                             );
2638                roce_set_bit(context->qpc_bytes_4,
2639                             QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2640                roce_set_field(context->qpc_bytes_4,
2641                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2642                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2643                               ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2644                roce_set_field(context->qpc_bytes_4,
2645                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2646                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2647                               ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2648                roce_set_field(context->qpc_bytes_4,
2649                               QP_CONTEXT_QPC_BYTES_4_PD_M,
2650                               QP_CONTEXT_QPC_BYTES_4_PD_S,
2651                               to_hr_pd(ibqp->pd)->pdn);
2652                hr_qp->access_flags = attr->qp_access_flags;
2653                roce_set_field(context->qpc_bytes_8,
2654                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2655                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2656                               to_hr_cq(ibqp->send_cq)->cqn);
2657                roce_set_field(context->qpc_bytes_8,
2658                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2659                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2660                               to_hr_cq(ibqp->recv_cq)->cqn);
2661
2662                if (ibqp->srq)
2663                        roce_set_field(context->qpc_bytes_12,
2664                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2665                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2666                                       to_hr_srq(ibqp->srq)->srqn);
2667
2668                roce_set_field(context->qpc_bytes_12,
2669                               QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2670                               QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2671                               attr->pkey_index);
2672                hr_qp->pkey_index = attr->pkey_index;
2673                roce_set_field(context->qpc_bytes_16,
2674                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2675                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2676
2677        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2678                roce_set_field(context->qpc_bytes_4,
2679                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2680                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2681                               to_hr_qp_type(hr_qp->ibqp.qp_type));
2682                roce_set_bit(context->qpc_bytes_4,
2683                             QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2684                if (attr_mask & IB_QP_ACCESS_FLAGS) {
2685                        roce_set_bit(context->qpc_bytes_4,
2686                                     QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2687                                     !!(attr->qp_access_flags &
2688                                     IB_ACCESS_REMOTE_READ));
2689                        roce_set_bit(context->qpc_bytes_4,
2690                                     QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2691                                     !!(attr->qp_access_flags &
2692                                     IB_ACCESS_REMOTE_WRITE));
2693                } else {
2694                        roce_set_bit(context->qpc_bytes_4,
2695                                     QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2696                                     !!(hr_qp->access_flags &
2697                                     IB_ACCESS_REMOTE_READ));
2698                        roce_set_bit(context->qpc_bytes_4,
2699                                     QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2700                                     !!(hr_qp->access_flags &
2701                                     IB_ACCESS_REMOTE_WRITE));
2702                }
2703
2704                roce_set_bit(context->qpc_bytes_4,
2705                             QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2706                roce_set_field(context->qpc_bytes_4,
2707                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2708                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2709                               ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2710                roce_set_field(context->qpc_bytes_4,
2711                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2712                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2713                               ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2714                roce_set_field(context->qpc_bytes_4,
2715                               QP_CONTEXT_QPC_BYTES_4_PD_M,
2716                               QP_CONTEXT_QPC_BYTES_4_PD_S,
2717                               to_hr_pd(ibqp->pd)->pdn);
2718
2719                roce_set_field(context->qpc_bytes_8,
2720                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2721                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2722                               to_hr_cq(ibqp->send_cq)->cqn);
2723                roce_set_field(context->qpc_bytes_8,
2724                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2725                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2726                               to_hr_cq(ibqp->recv_cq)->cqn);
2727
2728                if (ibqp->srq)
2729                        roce_set_field(context->qpc_bytes_12,
2730                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2731                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2732                                       to_hr_srq(ibqp->srq)->srqn);
2733                if (attr_mask & IB_QP_PKEY_INDEX)
2734                        roce_set_field(context->qpc_bytes_12,
2735                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2736                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2737                                       attr->pkey_index);
2738                else
2739                        roce_set_field(context->qpc_bytes_12,
2740                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2741                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2742                                       hr_qp->pkey_index);
2743
2744                roce_set_field(context->qpc_bytes_16,
2745                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2746                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2747        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2748                if ((attr_mask & IB_QP_ALT_PATH) ||
2749                    (attr_mask & IB_QP_ACCESS_FLAGS) ||
2750                    (attr_mask & IB_QP_PKEY_INDEX) ||
2751                    (attr_mask & IB_QP_QKEY)) {
2752                        dev_err(dev, "INIT2RTR attr_mask error\n");
2753                        goto out;
2754                }
2755
2756                dmac = (u8 *)attr->ah_attr.roce.dmac;
2757
2758                context->sq_rq_bt_l = (u32)(dma_handle);
2759                roce_set_field(context->qpc_bytes_24,
2760                               QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2761                               QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2762                               ((u32)(dma_handle >> 32)));
2763                roce_set_bit(context->qpc_bytes_24,
2764                             QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2765                             1);
2766                roce_set_field(context->qpc_bytes_24,
2767                               QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2768                               QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2769                               attr->min_rnr_timer);
2770                context->irrl_ba_l = (u32)(dma_handle_2);
2771                roce_set_field(context->qpc_bytes_32,
2772                               QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2773                               QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2774                               ((u32)(dma_handle_2 >> 32)) &
2775                                QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2776                roce_set_field(context->qpc_bytes_32,
2777                               QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2778                               QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2779                roce_set_bit(context->qpc_bytes_32,
2780                             QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2781                             1);
2782                roce_set_bit(context->qpc_bytes_32,
2783                             QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2784                             hr_qp->sq_signal_bits);
2785
2786                port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2787                        hr_qp->port;
2788                smac = (u8 *)hr_dev->dev_addr[port];
2789                /* when dmac equals smac or loop_idc is 1, it should loopback */
2790                if (ether_addr_equal_unaligned(dmac, smac) ||
2791                    hr_dev->loop_idc == 0x1)
2792                        roce_set_bit(context->qpc_bytes_32,
2793                              QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2794
2795                roce_set_bit(context->qpc_bytes_32,
2796                             QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2797                             rdma_ah_get_ah_flags(&attr->ah_attr));
2798                roce_set_field(context->qpc_bytes_32,
2799                               QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2800                               QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2801                               ilog2((unsigned int)attr->max_dest_rd_atomic));
2802
2803                roce_set_field(context->qpc_bytes_36,
2804                               QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2805                               QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2806                               attr->dest_qp_num);
2807
2808                /* Configure GID index */
2809                port_num = rdma_ah_get_port_num(&attr->ah_attr);
2810                roce_set_field(context->qpc_bytes_36,
2811                               QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2812                               QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2813                                hns_get_gid_index(hr_dev,
2814                                                  port_num - 1,
2815                                                  grh->sgid_index));
2816
2817                memcpy(&(context->dmac_l), dmac, 4);
2818
2819                roce_set_field(context->qpc_bytes_44,
2820                               QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2821                               QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2822                               *((u16 *)(&dmac[4])));
2823                roce_set_field(context->qpc_bytes_44,
2824                               QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2825                               QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2826                               rdma_ah_get_static_rate(&attr->ah_attr));
2827                roce_set_field(context->qpc_bytes_44,
2828                               QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2829                               QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2830                               grh->hop_limit);
2831
2832                roce_set_field(context->qpc_bytes_48,
2833                               QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2834                               QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2835                               grh->flow_label);
2836                roce_set_field(context->qpc_bytes_48,
2837                               QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2838                               QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
2839                               grh->traffic_class);
2840                roce_set_field(context->qpc_bytes_48,
2841                               QP_CONTEXT_QPC_BYTES_48_MTU_M,
2842                               QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2843
2844                memcpy(context->dgid, grh->dgid.raw,
2845                       sizeof(grh->dgid.raw));
2846
2847                dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
2848                        roce_get_field(context->qpc_bytes_44,
2849                                       QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2850                                       QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
2851
2852                roce_set_field(context->qpc_bytes_68,
2853                               QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
2854                               QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
2855                               hr_qp->rq.head);
2856                roce_set_field(context->qpc_bytes_68,
2857                               QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
2858                               QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
2859
2860                rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2861                context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
2862
2863                roce_set_field(context->qpc_bytes_76,
2864                        QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
2865                        QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
2866                        mtts[rq_pa_start] >> 32);
2867                roce_set_field(context->qpc_bytes_76,
2868                               QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
2869                               QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
2870
2871                context->rx_rnr_time = 0;
2872
2873                roce_set_field(context->qpc_bytes_84,
2874                               QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
2875                               QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
2876                               attr->rq_psn - 1);
2877                roce_set_field(context->qpc_bytes_84,
2878                               QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
2879                               QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
2880
2881                roce_set_field(context->qpc_bytes_88,
2882                               QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
2883                               QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
2884                               attr->rq_psn);
2885                roce_set_bit(context->qpc_bytes_88,
2886                             QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
2887                roce_set_bit(context->qpc_bytes_88,
2888                             QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
2889                roce_set_field(context->qpc_bytes_88,
2890                        QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
2891                        QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
2892                        0);
2893                roce_set_field(context->qpc_bytes_88,
2894                               QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
2895                               QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
2896                               0);
2897
2898                context->dma_length = 0;
2899                context->r_key = 0;
2900                context->va_l = 0;
2901                context->va_h = 0;
2902
2903                roce_set_field(context->qpc_bytes_108,
2904                               QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
2905                               QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
2906                roce_set_bit(context->qpc_bytes_108,
2907                             QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
2908                roce_set_bit(context->qpc_bytes_108,
2909                             QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
2910
2911                roce_set_field(context->qpc_bytes_112,
2912                               QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
2913                               QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
2914                roce_set_field(context->qpc_bytes_112,
2915                               QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
2916                               QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
2917
2918                /* For chip resp ack */
2919                roce_set_field(context->qpc_bytes_156,
2920                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
2921                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
2922                               hr_qp->phy_port);
2923                roce_set_field(context->qpc_bytes_156,
2924                               QP_CONTEXT_QPC_BYTES_156_SL_M,
2925                               QP_CONTEXT_QPC_BYTES_156_SL_S,
2926                               rdma_ah_get_sl(&attr->ah_attr));
2927                hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
2928        } else if (cur_state == IB_QPS_RTR &&
2929                new_state == IB_QPS_RTS) {
2930                /* If exist optional param, return error */
2931                if ((attr_mask & IB_QP_ALT_PATH) ||
2932                    (attr_mask & IB_QP_ACCESS_FLAGS) ||
2933                    (attr_mask & IB_QP_QKEY) ||
2934                    (attr_mask & IB_QP_PATH_MIG_STATE) ||
2935                    (attr_mask & IB_QP_CUR_STATE) ||
2936                    (attr_mask & IB_QP_MIN_RNR_TIMER)) {
2937                        dev_err(dev, "RTR2RTS attr_mask error\n");
2938                        goto out;
2939                }
2940
2941                context->rx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
2942
2943                roce_set_field(context->qpc_bytes_120,
2944                               QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
2945                               QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
2946                               (mtts[0]) >> 32);
2947
2948                roce_set_field(context->qpc_bytes_124,
2949                               QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
2950                               QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
2951                roce_set_field(context->qpc_bytes_124,
2952                               QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
2953                               QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
2954
2955                roce_set_field(context->qpc_bytes_128,
2956                               QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
2957                               QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
2958                               attr->sq_psn);
2959                roce_set_bit(context->qpc_bytes_128,
2960                             QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
2961                roce_set_field(context->qpc_bytes_128,
2962                             QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
2963                             QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
2964                             0);
2965                roce_set_bit(context->qpc_bytes_128,
2966                             QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
2967
2968                roce_set_field(context->qpc_bytes_132,
2969                               QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
2970                               QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
2971                roce_set_field(context->qpc_bytes_132,
2972                               QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
2973                               QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
2974
2975                roce_set_field(context->qpc_bytes_136,
2976                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
2977                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
2978                               attr->sq_psn);
2979                roce_set_field(context->qpc_bytes_136,
2980                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
2981                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
2982                               attr->sq_psn);
2983
2984                roce_set_field(context->qpc_bytes_140,
2985                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
2986                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
2987                               (attr->sq_psn >> SQ_PSN_SHIFT));
2988                roce_set_field(context->qpc_bytes_140,
2989                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
2990                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
2991                roce_set_bit(context->qpc_bytes_140,
2992                             QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
2993
2994                roce_set_field(context->qpc_bytes_148,
2995                               QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
2996                               QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
2997                roce_set_field(context->qpc_bytes_148,
2998                               QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
2999                               QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3000                               attr->retry_cnt);
3001                roce_set_field(context->qpc_bytes_148,
3002                               QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3003                               QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3004                               attr->rnr_retry);
3005                roce_set_field(context->qpc_bytes_148,
3006                               QP_CONTEXT_QPC_BYTES_148_LSN_M,
3007                               QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3008
3009                context->rnr_retry = 0;
3010
3011                roce_set_field(context->qpc_bytes_156,
3012                               QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3013                               QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3014                               attr->retry_cnt);
3015                if (attr->timeout < 0x12) {
3016                        dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3017                                 attr->timeout);
3018                        roce_set_field(context->qpc_bytes_156,
3019                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3020                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3021                                       0x12);
3022                } else {
3023                        roce_set_field(context->qpc_bytes_156,
3024                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3025                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3026                                       attr->timeout);
3027                }
3028                roce_set_field(context->qpc_bytes_156,
3029                               QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3030                               QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3031                               attr->rnr_retry);
3032                roce_set_field(context->qpc_bytes_156,
3033                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3034                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3035                               hr_qp->phy_port);
3036                roce_set_field(context->qpc_bytes_156,
3037                               QP_CONTEXT_QPC_BYTES_156_SL_M,
3038                               QP_CONTEXT_QPC_BYTES_156_SL_S,
3039                               rdma_ah_get_sl(&attr->ah_attr));
3040                hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3041                roce_set_field(context->qpc_bytes_156,
3042                               QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3043                               QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3044                               ilog2((unsigned int)attr->max_rd_atomic));
3045                roce_set_field(context->qpc_bytes_156,
3046                               QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3047                               QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3048                context->pkt_use_len = 0;
3049
3050                roce_set_field(context->qpc_bytes_164,
3051                               QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3052                               QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3053                roce_set_field(context->qpc_bytes_164,
3054                               QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3055                               QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3056
3057                roce_set_field(context->qpc_bytes_168,
3058                               QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3059                               QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3060                               attr->sq_psn);
3061                roce_set_field(context->qpc_bytes_168,
3062                               QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3063                               QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3064                roce_set_field(context->qpc_bytes_168,
3065                               QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3066                               QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3067                roce_set_bit(context->qpc_bytes_168,
3068                             QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3069                roce_set_bit(context->qpc_bytes_168,
3070                             QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3071                roce_set_bit(context->qpc_bytes_168,
3072                             QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3073                context->sge_use_len = 0;
3074
3075                roce_set_field(context->qpc_bytes_176,
3076                               QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3077                               QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3078                roce_set_field(context->qpc_bytes_176,
3079                               QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3080                               QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3081                               0);
3082                roce_set_field(context->qpc_bytes_180,
3083                               QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3084                               QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3085                roce_set_field(context->qpc_bytes_180,
3086                               QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3087                               QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3088
3089                context->tx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
3090
3091                roce_set_field(context->qpc_bytes_188,
3092                               QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3093                               QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3094                               (mtts[0]) >> 32);
3095                roce_set_bit(context->qpc_bytes_188,
3096                             QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3097                roce_set_field(context->qpc_bytes_188,
3098                               QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3099                               QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3100                               0);
3101        } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3102                   (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3103                   (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3104                   (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3105                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3106                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3107                   (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3108                   (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
3109                dev_err(dev, "not support this status migration\n");
3110                goto out;
3111        }
3112
3113        /* Every status migrate must change state */
3114        roce_set_field(context->qpc_bytes_144,
3115                       QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3116                       QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3117
3118        /* SW pass context to HW */
3119        ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
3120                                    to_hns_roce_state(cur_state),
3121                                    to_hns_roce_state(new_state), context,
3122                                    hr_qp);
3123        if (ret) {
3124                dev_err(dev, "hns_roce_qp_modify failed\n");
3125                goto out;
3126        }
3127
3128        /*
3129         * Use rst2init to instead of init2init with drv,
3130         * need to hw to flash RQ HEAD by DB again
3131         */
3132        if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3133                /* Memory barrier */
3134                wmb();
3135
3136                roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3137                               RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3138                roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3139                               RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3140                roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3141                               RQ_DOORBELL_U32_8_CMD_S, 1);
3142                roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3143
3144                if (ibqp->uobject) {
3145                        hr_qp->rq.db_reg_l = hr_dev->reg_base +
3146                                     ROCEE_DB_OTHERS_L_0_REG +
3147                                     DB_REG_OFFSET * hr_dev->priv_uar.index;
3148                }
3149
3150                hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
3151        }
3152
3153        hr_qp->state = new_state;
3154
3155        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3156                hr_qp->resp_depth = attr->max_dest_rd_atomic;
3157        if (attr_mask & IB_QP_PORT) {
3158                hr_qp->port = attr->port_num - 1;
3159                hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3160        }
3161
3162        if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3163                hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3164                                     ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3165                if (ibqp->send_cq != ibqp->recv_cq)
3166                        hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3167                                             hr_qp->qpn, NULL);
3168
3169                hr_qp->rq.head = 0;
3170                hr_qp->rq.tail = 0;
3171                hr_qp->sq.head = 0;
3172                hr_qp->sq.tail = 0;
3173                hr_qp->sq_next_wqe = 0;
3174        }
3175out:
3176        kfree(context);
3177        return ret;
3178}
3179
3180int hns_roce_v1_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
3181                          int attr_mask, enum ib_qp_state cur_state,
3182                          enum ib_qp_state new_state)
3183{
3184
3185        if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3186                return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3187                                         new_state);
3188        else
3189                return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3190                                        new_state);
3191}
3192
3193static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3194{
3195        switch (state) {
3196        case HNS_ROCE_QP_STATE_RST:
3197                return IB_QPS_RESET;
3198        case HNS_ROCE_QP_STATE_INIT:
3199                return IB_QPS_INIT;
3200        case HNS_ROCE_QP_STATE_RTR:
3201                return IB_QPS_RTR;
3202        case HNS_ROCE_QP_STATE_RTS:
3203                return IB_QPS_RTS;
3204        case HNS_ROCE_QP_STATE_SQD:
3205                return IB_QPS_SQD;
3206        case HNS_ROCE_QP_STATE_ERR:
3207                return IB_QPS_ERR;
3208        default:
3209                return IB_QPS_ERR;
3210        }
3211}
3212
3213static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3214                                 struct hns_roce_qp *hr_qp,
3215                                 struct hns_roce_qp_context *hr_context)
3216{
3217        struct hns_roce_cmd_mailbox *mailbox;
3218        int ret;
3219
3220        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3221        if (IS_ERR(mailbox))
3222                return PTR_ERR(mailbox);
3223
3224        ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3225                                HNS_ROCE_CMD_QUERY_QP,
3226                                HNS_ROCE_CMD_TIMEOUT_MSECS);
3227        if (!ret)
3228                memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3229        else
3230                dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3231
3232        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3233
3234        return ret;
3235}
3236
3237static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3238                             int qp_attr_mask,
3239                             struct ib_qp_init_attr *qp_init_attr)
3240{
3241        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3242        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3243        struct hns_roce_sqp_context context;
3244        u32 addr;
3245
3246        mutex_lock(&hr_qp->mutex);
3247
3248        if (hr_qp->state == IB_QPS_RESET) {
3249                qp_attr->qp_state = IB_QPS_RESET;
3250                goto done;
3251        }
3252
3253        addr = ROCEE_QP1C_CFG0_0_REG +
3254                hr_qp->port * sizeof(struct hns_roce_sqp_context);
3255        context.qp1c_bytes_4 = roce_read(hr_dev, addr);
3256        context.sq_rq_bt_l = roce_read(hr_dev, addr + 1);
3257        context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2);
3258        context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3);
3259        context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4);
3260        context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5);
3261        context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6);
3262        context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7);
3263        context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8);
3264        context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9);
3265
3266        hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3267                                      QP1C_BYTES_4_QP_STATE_M,
3268                                      QP1C_BYTES_4_QP_STATE_S);
3269        qp_attr->qp_state       = hr_qp->state;
3270        qp_attr->path_mtu       = IB_MTU_256;
3271        qp_attr->path_mig_state = IB_MIG_ARMED;
3272        qp_attr->qkey           = QKEY_VAL;
3273        qp_attr->rq_psn         = 0;
3274        qp_attr->sq_psn         = 0;
3275        qp_attr->dest_qp_num    = 1;
3276        qp_attr->qp_access_flags = 6;
3277
3278        qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3279                                             QP1C_BYTES_20_PKEY_IDX_M,
3280                                             QP1C_BYTES_20_PKEY_IDX_S);
3281        qp_attr->port_num = hr_qp->port + 1;
3282        qp_attr->sq_draining = 0;
3283        qp_attr->max_rd_atomic = 0;
3284        qp_attr->max_dest_rd_atomic = 0;
3285        qp_attr->min_rnr_timer = 0;
3286        qp_attr->timeout = 0;
3287        qp_attr->retry_cnt = 0;
3288        qp_attr->rnr_retry = 0;
3289        qp_attr->alt_timeout = 0;
3290
3291done:
3292        qp_attr->cur_qp_state = qp_attr->qp_state;
3293        qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3294        qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3295        qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3296        qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3297        qp_attr->cap.max_inline_data = 0;
3298        qp_init_attr->cap = qp_attr->cap;
3299        qp_init_attr->create_flags = 0;
3300
3301        mutex_unlock(&hr_qp->mutex);
3302
3303        return 0;
3304}
3305
3306static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3307                            int qp_attr_mask,
3308                            struct ib_qp_init_attr *qp_init_attr)
3309{
3310        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3311        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3312        struct device *dev = &hr_dev->pdev->dev;
3313        struct hns_roce_qp_context *context;
3314        int tmp_qp_state = 0;
3315        int ret = 0;
3316        int state;
3317
3318        context = kzalloc(sizeof(*context), GFP_KERNEL);
3319        if (!context)
3320                return -ENOMEM;
3321
3322        memset(qp_attr, 0, sizeof(*qp_attr));
3323        memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3324
3325        mutex_lock(&hr_qp->mutex);
3326
3327        if (hr_qp->state == IB_QPS_RESET) {
3328                qp_attr->qp_state = IB_QPS_RESET;
3329                goto done;
3330        }
3331
3332        ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3333        if (ret) {
3334                dev_err(dev, "query qpc error\n");
3335                ret = -EINVAL;
3336                goto out;
3337        }
3338
3339        state = roce_get_field(context->qpc_bytes_144,
3340                               QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3341                               QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3342        tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3343        if (tmp_qp_state == -1) {
3344                dev_err(dev, "to_ib_qp_state error\n");
3345                ret = -EINVAL;
3346                goto out;
3347        }
3348        hr_qp->state = (u8)tmp_qp_state;
3349        qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3350        qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3351                                               QP_CONTEXT_QPC_BYTES_48_MTU_M,
3352                                               QP_CONTEXT_QPC_BYTES_48_MTU_S);
3353        qp_attr->path_mig_state = IB_MIG_ARMED;
3354        if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3355                qp_attr->qkey = QKEY_VAL;
3356
3357        qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3358                                         QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3359                                         QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3360        qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3361                                             QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3362                                             QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3363        qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3364                                        QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3365                                        QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3366        qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3367                        QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3368                                   ((roce_get_bit(context->qpc_bytes_4,
3369                        QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3370                                   ((roce_get_bit(context->qpc_bytes_4,
3371                        QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3372
3373        if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3374            hr_qp->ibqp.qp_type == IB_QPT_UC) {
3375                struct ib_global_route *grh =
3376                        rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3377
3378                rdma_ah_set_sl(&qp_attr->ah_attr,
3379                               roce_get_field(context->qpc_bytes_156,
3380                                              QP_CONTEXT_QPC_BYTES_156_SL_M,
3381                                              QP_CONTEXT_QPC_BYTES_156_SL_S));
3382                rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3383                grh->flow_label =
3384                        roce_get_field(context->qpc_bytes_48,
3385                                       QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3386                                       QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3387                grh->sgid_index =
3388                        roce_get_field(context->qpc_bytes_36,
3389                                       QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3390                                       QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3391                grh->hop_limit =
3392                        roce_get_field(context->qpc_bytes_44,
3393                                       QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3394                                       QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3395                grh->traffic_class =
3396                        roce_get_field(context->qpc_bytes_48,
3397                                       QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3398                                       QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3399
3400                memcpy(grh->dgid.raw, context->dgid,
3401                       sizeof(grh->dgid.raw));
3402        }
3403
3404        qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3405                              QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3406                              QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3407        qp_attr->port_num = hr_qp->port + 1;
3408        qp_attr->sq_draining = 0;
3409        qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
3410                                 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3411                                 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3412        qp_attr->max_dest_rd_atomic = roce_get_field(context->qpc_bytes_32,
3413                                 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3414                                 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3415        qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3416                        QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3417                        QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3418        qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3419                            QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3420                            QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3421        qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3422                             QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3423                             QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3424        qp_attr->rnr_retry = context->rnr_retry;
3425
3426done:
3427        qp_attr->cur_qp_state = qp_attr->qp_state;
3428        qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3429        qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3430
3431        if (!ibqp->uobject) {
3432                qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3433                qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3434        } else {
3435                qp_attr->cap.max_send_wr = 0;
3436                qp_attr->cap.max_send_sge = 0;
3437        }
3438
3439        qp_init_attr->cap = qp_attr->cap;
3440
3441out:
3442        mutex_unlock(&hr_qp->mutex);
3443        kfree(context);
3444        return ret;
3445}
3446
3447int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3448                         int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
3449{
3450        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3451
3452        return hr_qp->doorbell_qpn <= 1 ?
3453                hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3454                hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3455}
3456
3457static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
3458                                      struct hns_roce_qp *hr_qp,
3459                                      u32 sdb_issue_ptr,
3460                                      u32 *sdb_inv_cnt,
3461                                      u32 *wait_stage)
3462{
3463        struct device *dev = &hr_dev->pdev->dev;
3464        u32 sdb_retry_cnt, old_retry;
3465        u32 sdb_send_ptr, old_send;
3466        u32 success_flags = 0;
3467        u32 cur_cnt, old_cnt;
3468        unsigned long end;
3469        u32 send_ptr;
3470        u32 inv_cnt;
3471        u32 tsp_st;
3472
3473        if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
3474            *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
3475                dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
3476                        hr_qp->qpn, *wait_stage);
3477                return -EINVAL;
3478        }
3479
3480        /* Calculate the total timeout for the entire verification process */
3481        end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
3482
3483        if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
3484                /* Query db process status, until hw process completely */
3485                sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
3486                while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
3487                                            ROCEE_SDB_PTR_CMP_BITS)) {
3488                        if (!time_before(jiffies, end)) {
3489                                dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
3490                                        hr_qp->qpn, sdb_issue_ptr,
3491                                        sdb_send_ptr);
3492                                return 0;
3493                        }
3494
3495                        msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3496                        sdb_send_ptr = roce_read(hr_dev,
3497                                                 ROCEE_SDB_SEND_PTR_REG);
3498                }
3499
3500                if (roce_get_field(sdb_issue_ptr,
3501                                   ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
3502                                   ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
3503                    roce_get_field(sdb_send_ptr,
3504                                   ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3505                                   ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
3506                        old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
3507                        old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
3508
3509                        do {
3510                                tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
3511                                if (roce_get_bit(tsp_st,
3512                                        ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
3513                                        *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
3514                                        return 0;
3515                                }
3516
3517                                if (!time_before(jiffies, end)) {
3518                                        dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
3519                                                     "issue 0x%x send 0x%x.\n",
3520                                                hr_qp->qpn, sdb_issue_ptr,
3521                                                sdb_send_ptr);
3522                                        return 0;
3523                                }
3524
3525                                msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3526
3527                                sdb_send_ptr = roce_read(hr_dev,
3528                                                        ROCEE_SDB_SEND_PTR_REG);
3529                                sdb_retry_cnt = roce_read(hr_dev,
3530                                                       ROCEE_SDB_RETRY_CNT_REG);
3531                                cur_cnt = roce_get_field(sdb_send_ptr,
3532                                        ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3533                                        ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3534                                        roce_get_field(sdb_retry_cnt,
3535                                        ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3536                                        ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3537                                if (!roce_get_bit(tsp_st,
3538                                        ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
3539                                        old_cnt = roce_get_field(old_send,
3540                                        ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3541                                        ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3542                                        roce_get_field(old_retry,
3543                                        ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3544                                        ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3545                                        if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
3546                                                success_flags = 1;
3547                                } else {
3548                                        old_cnt = roce_get_field(old_send,
3549                                        ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3550                                        ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
3551                                        if (cur_cnt - old_cnt >
3552                                            SDB_ST_CMP_VAL) {
3553                                                success_flags = 1;
3554                                        } else {
3555                                                send_ptr =
3556                                                        roce_get_field(old_send,
3557                                            ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3558                                            ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3559                                            roce_get_field(sdb_retry_cnt,
3560                                            ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3561                                            ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3562                                            roce_set_field(old_send,
3563                                            ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3564                                            ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
3565                                                send_ptr);
3566                                        }
3567                                }
3568                        } while (!success_flags);
3569                }
3570
3571                *wait_stage = HNS_ROCE_V1_DB_STAGE2;
3572
3573                /* Get list pointer */
3574                *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3575                dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
3576                        hr_qp->qpn, *sdb_inv_cnt);
3577        }
3578
3579        if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
3580                /* Query db's list status, until hw reversal */
3581                inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3582                while (roce_hw_index_cmp_lt(inv_cnt,
3583                                            *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
3584                                            ROCEE_SDB_CNT_CMP_BITS)) {
3585                        if (!time_before(jiffies, end)) {
3586                                dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
3587                                        hr_qp->qpn, inv_cnt);
3588                                return 0;
3589                        }
3590
3591                        msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3592                        inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3593                }
3594
3595                *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
3596        }
3597
3598        return 0;
3599}
3600
3601static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
3602                                struct hns_roce_qp *hr_qp,
3603                                struct hns_roce_qp_work *qp_work_entry,
3604                                int *is_timeout)
3605{
3606        struct device *dev = &hr_dev->pdev->dev;
3607        u32 sdb_issue_ptr;
3608        int ret;
3609
3610        if (hr_qp->state != IB_QPS_RESET) {
3611                /* Set qp to ERR, waiting for hw complete processing all dbs */
3612                ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3613                                            IB_QPS_ERR);
3614                if (ret) {
3615                        dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
3616                                hr_qp->qpn);
3617                        return ret;
3618                }
3619
3620                /* Record issued doorbell */
3621                sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
3622                qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
3623                qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
3624
3625                /* Query db process status, until hw process completely */
3626                ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
3627                                                 &qp_work_entry->sdb_inv_cnt,
3628                                                 &qp_work_entry->db_wait_stage);
3629                if (ret) {
3630                        dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
3631                                hr_qp->qpn);
3632                        return ret;
3633                }
3634
3635                if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
3636                        qp_work_entry->sche_cnt = 0;
3637                        *is_timeout = 1;
3638                        return 0;
3639                }
3640
3641                /* Modify qp to reset before destroying qp */
3642                ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3643                                            IB_QPS_RESET);
3644                if (ret) {
3645                        dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
3646                                hr_qp->qpn);
3647                        return ret;
3648                }
3649        }
3650
3651        return 0;
3652}
3653
3654static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3655{
3656        struct hns_roce_qp_work *qp_work_entry;
3657        struct hns_roce_v1_priv *priv;
3658        struct hns_roce_dev *hr_dev;
3659        struct hns_roce_qp *hr_qp;
3660        struct device *dev;
3661        unsigned long qpn;
3662        int ret;
3663
3664        qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
3665        hr_dev = to_hr_dev(qp_work_entry->ib_dev);
3666        dev = &hr_dev->pdev->dev;
3667        priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
3668        hr_qp = qp_work_entry->qp;
3669        qpn = hr_qp->qpn;
3670
3671        dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
3672
3673        qp_work_entry->sche_cnt++;
3674
3675        /* Query db process status, until hw process completely */
3676        ret = check_qp_db_process_status(hr_dev, hr_qp,
3677                                         qp_work_entry->sdb_issue_ptr,
3678                                         &qp_work_entry->sdb_inv_cnt,
3679                                         &qp_work_entry->db_wait_stage);
3680        if (ret) {
3681                dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
3682                        qpn);
3683                return;
3684        }
3685
3686        if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
3687            priv->des_qp.requeue_flag) {
3688                queue_work(priv->des_qp.qp_wq, work);
3689                return;
3690        }
3691
3692        /* Modify qp to reset before destroying qp */
3693        ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3694                                    IB_QPS_RESET);
3695        if (ret) {
3696                dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
3697                return;
3698        }
3699
3700        hns_roce_qp_remove(hr_dev, hr_qp);
3701        hns_roce_qp_free(hr_dev, hr_qp);
3702
3703        if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
3704                /* RC QP, release QPN */
3705                hns_roce_release_range_qp(hr_dev, qpn, 1);
3706                kfree(hr_qp);
3707        } else
3708                kfree(hr_to_hr_sqp(hr_qp));
3709
3710        kfree(qp_work_entry);
3711
3712        dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
3713}
3714
3715int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
3716{
3717        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3718        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3719        struct device *dev = &hr_dev->pdev->dev;
3720        struct hns_roce_qp_work qp_work_entry;
3721        struct hns_roce_qp_work *qp_work;
3722        struct hns_roce_v1_priv *priv;
3723        struct hns_roce_cq *send_cq, *recv_cq;
3724        int is_user = !!ibqp->pd->uobject;
3725        int is_timeout = 0;
3726        int ret;
3727
3728        ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
3729        if (ret) {
3730                dev_err(dev, "QP reset state check failed(%d)!\n", ret);
3731                return ret;
3732        }
3733
3734        send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3735        recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3736
3737        hns_roce_lock_cqs(send_cq, recv_cq);
3738        if (!is_user) {
3739                __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3740                                       to_hr_srq(hr_qp->ibqp.srq) : NULL);
3741                if (send_cq != recv_cq)
3742                        __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3743        }
3744        hns_roce_unlock_cqs(send_cq, recv_cq);
3745
3746        if (!is_timeout) {
3747                hns_roce_qp_remove(hr_dev, hr_qp);
3748                hns_roce_qp_free(hr_dev, hr_qp);
3749
3750                /* RC QP, release QPN */
3751                if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3752                        hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3753        }
3754
3755        hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3756
3757        if (is_user)
3758                ib_umem_release(hr_qp->umem);
3759        else {
3760                kfree(hr_qp->sq.wrid);
3761                kfree(hr_qp->rq.wrid);
3762
3763                hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3764        }
3765
3766        if (!is_timeout) {
3767                if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3768                        kfree(hr_qp);
3769                else
3770                        kfree(hr_to_hr_sqp(hr_qp));
3771        } else {
3772                qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
3773                if (!qp_work)
3774                        return -ENOMEM;
3775
3776                INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
3777                qp_work->ib_dev = &hr_dev->ib_dev;
3778                qp_work->qp             = hr_qp;
3779                qp_work->db_wait_stage  = qp_work_entry.db_wait_stage;
3780                qp_work->sdb_issue_ptr  = qp_work_entry.sdb_issue_ptr;
3781                qp_work->sdb_inv_cnt    = qp_work_entry.sdb_inv_cnt;
3782                qp_work->sche_cnt       = qp_work_entry.sche_cnt;
3783
3784                priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
3785                queue_work(priv->des_qp.qp_wq, &qp_work->work);
3786                dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
3787        }
3788
3789        return 0;
3790}
3791
3792int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
3793{
3794        struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3795        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3796        struct device *dev = &hr_dev->pdev->dev;
3797        u32 cqe_cnt_ori;
3798        u32 cqe_cnt_cur;
3799        u32 cq_buf_size;
3800        int wait_time = 0;
3801        int ret = 0;
3802
3803        hns_roce_free_cq(hr_dev, hr_cq);
3804
3805        /*
3806         * Before freeing cq buffer, we need to ensure that the outstanding CQE
3807         * have been written by checking the CQE counter.
3808         */
3809        cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3810        while (1) {
3811                if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3812                    HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3813                        break;
3814
3815                cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3816                if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3817                        break;
3818
3819                msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3820                if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3821                        dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3822                                hr_cq->cqn);
3823                        ret = -ETIMEDOUT;
3824                        break;
3825                }
3826                wait_time++;
3827        }
3828
3829        hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
3830
3831        if (ibcq->uobject)
3832                ib_umem_release(hr_cq->umem);
3833        else {
3834                /* Free the buff of stored cq */
3835                cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
3836                hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
3837        }
3838
3839        kfree(hr_cq);
3840
3841        return ret;
3842}
3843
3844struct hns_roce_v1_priv hr_v1_priv;
3845
3846struct hns_roce_hw hns_roce_hw_v1 = {
3847        .reset = hns_roce_v1_reset,
3848        .hw_profile = hns_roce_v1_profile,
3849        .hw_init = hns_roce_v1_init,
3850        .hw_exit = hns_roce_v1_exit,
3851        .set_gid = hns_roce_v1_set_gid,
3852        .set_mac = hns_roce_v1_set_mac,
3853        .set_mtu = hns_roce_v1_set_mtu,
3854        .write_mtpt = hns_roce_v1_write_mtpt,
3855        .write_cqc = hns_roce_v1_write_cqc,
3856        .clear_hem = hns_roce_v1_clear_hem,
3857        .modify_qp = hns_roce_v1_modify_qp,
3858        .query_qp = hns_roce_v1_query_qp,
3859        .destroy_qp = hns_roce_v1_destroy_qp,
3860        .post_send = hns_roce_v1_post_send,
3861        .post_recv = hns_roce_v1_post_recv,
3862        .req_notify_cq = hns_roce_v1_req_notify_cq,
3863        .poll_cq = hns_roce_v1_poll_cq,
3864        .dereg_mr = hns_roce_v1_dereg_mr,
3865        .destroy_cq = hns_roce_v1_destroy_cq,
3866        .priv = &hr_v1_priv,
3867};
3868