linux/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016-2017 Hisilicon Limited.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/acpi.h>
  34#include <linux/etherdevice.h>
  35#include <linux/interrupt.h>
  36#include <linux/kernel.h>
  37#include <linux/types.h>
  38#include <net/addrconf.h>
  39#include <rdma/ib_addr.h>
  40#include <rdma/ib_cache.h>
  41#include <rdma/ib_umem.h>
  42#include <rdma/uverbs_ioctl.h>
  43
  44#include "hnae3.h"
  45#include "hns_roce_common.h"
  46#include "hns_roce_device.h"
  47#include "hns_roce_cmd.h"
  48#include "hns_roce_hem.h"
  49#include "hns_roce_hw_v2.h"
  50
  51static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
  52                            struct ib_sge *sg)
  53{
  54        dseg->lkey = cpu_to_le32(sg->lkey);
  55        dseg->addr = cpu_to_le64(sg->addr);
  56        dseg->len  = cpu_to_le32(sg->length);
  57}
  58
  59static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
  60                           unsigned int *sge_ind)
  61{
  62        struct hns_roce_v2_wqe_data_seg *dseg;
  63        struct ib_sge *sg;
  64        int num_in_wqe = 0;
  65        int extend_sge_num;
  66        int fi_sge_num;
  67        int se_sge_num;
  68        int shift;
  69        int i;
  70
  71        if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
  72                num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
  73        extend_sge_num = wr->num_sge - num_in_wqe;
  74        sg = wr->sg_list + num_in_wqe;
  75        shift = qp->hr_buf.page_shift;
  76
  77        /*
  78         * Check whether wr->num_sge sges are in the same page. If not, we
  79         * should calculate how many sges in the first page and the second
  80         * page.
  81         */
  82        dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
  83        fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
  84                      (uintptr_t)dseg) /
  85                      sizeof(struct hns_roce_v2_wqe_data_seg);
  86        if (extend_sge_num > fi_sge_num) {
  87                se_sge_num = extend_sge_num - fi_sge_num;
  88                for (i = 0; i < fi_sge_num; i++) {
  89                        set_data_seg_v2(dseg++, sg + i);
  90                        (*sge_ind)++;
  91                }
  92                dseg = get_send_extend_sge(qp,
  93                                           (*sge_ind) & (qp->sge.sge_cnt - 1));
  94                for (i = 0; i < se_sge_num; i++) {
  95                        set_data_seg_v2(dseg++, sg + fi_sge_num + i);
  96                        (*sge_ind)++;
  97                }
  98        } else {
  99                for (i = 0; i < extend_sge_num; i++) {
 100                        set_data_seg_v2(dseg++, sg + i);
 101                        (*sge_ind)++;
 102                }
 103        }
 104}
 105
 106static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 107                             struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
 108                             void *wqe, unsigned int *sge_ind,
 109                             const struct ib_send_wr **bad_wr)
 110{
 111        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 112        struct hns_roce_v2_wqe_data_seg *dseg = wqe;
 113        struct hns_roce_qp *qp = to_hr_qp(ibqp);
 114        int i;
 115
 116        if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
 117                if (le32_to_cpu(rc_sq_wqe->msg_len) >
 118                    hr_dev->caps.max_sq_inline) {
 119                        *bad_wr = wr;
 120                        dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
 121                                rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
 122                        return -EINVAL;
 123                }
 124
 125                if (wr->opcode == IB_WR_RDMA_READ) {
 126                        *bad_wr =  wr;
 127                        dev_err(hr_dev->dev, "Not support inline data!\n");
 128                        return -EINVAL;
 129                }
 130
 131                for (i = 0; i < wr->num_sge; i++) {
 132                        memcpy(wqe, ((void *)wr->sg_list[i].addr),
 133                               wr->sg_list[i].length);
 134                        wqe += wr->sg_list[i].length;
 135                }
 136
 137                roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
 138                             1);
 139        } else {
 140                if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
 141                        for (i = 0; i < wr->num_sge; i++) {
 142                                if (likely(wr->sg_list[i].length)) {
 143                                        set_data_seg_v2(dseg, wr->sg_list + i);
 144                                        dseg++;
 145                                }
 146                        }
 147                } else {
 148                        roce_set_field(rc_sq_wqe->byte_20,
 149                                     V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
 150                                     V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
 151                                     (*sge_ind) & (qp->sge.sge_cnt - 1));
 152
 153                        for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
 154                                if (likely(wr->sg_list[i].length)) {
 155                                        set_data_seg_v2(dseg, wr->sg_list + i);
 156                                        dseg++;
 157                                }
 158                        }
 159
 160                        set_extend_sge(qp, wr, sge_ind);
 161                }
 162
 163                roce_set_field(rc_sq_wqe->byte_16,
 164                               V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
 165                               V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
 166        }
 167
 168        return 0;
 169}
 170
 171static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
 172                                 const struct ib_qp_attr *attr,
 173                                 int attr_mask, enum ib_qp_state cur_state,
 174                                 enum ib_qp_state new_state);
 175
 176static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 177                                 const struct ib_send_wr *wr,
 178                                 const struct ib_send_wr **bad_wr)
 179{
 180        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 181        struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
 182        struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
 183        struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
 184        struct hns_roce_qp *qp = to_hr_qp(ibqp);
 185        struct device *dev = hr_dev->dev;
 186        struct hns_roce_v2_db sq_db;
 187        struct ib_qp_attr attr;
 188        unsigned int sge_ind = 0;
 189        unsigned int owner_bit;
 190        unsigned long flags;
 191        unsigned int ind;
 192        void *wqe = NULL;
 193        bool loopback;
 194        int attr_mask;
 195        u32 tmp_len;
 196        int ret = 0;
 197        u32 hr_op;
 198        u8 *smac;
 199        int nreq;
 200        int i;
 201
 202        if (unlikely(ibqp->qp_type != IB_QPT_RC &&
 203                     ibqp->qp_type != IB_QPT_GSI &&
 204                     ibqp->qp_type != IB_QPT_UD)) {
 205                dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
 206                *bad_wr = wr;
 207                return -EOPNOTSUPP;
 208        }
 209
 210        if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
 211                     qp->state == IB_QPS_RTR)) {
 212                dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
 213                *bad_wr = wr;
 214                return -EINVAL;
 215        }
 216
 217        spin_lock_irqsave(&qp->sq.lock, flags);
 218        ind = qp->sq_next_wqe;
 219        sge_ind = qp->next_sge;
 220
 221        for (nreq = 0; wr; ++nreq, wr = wr->next) {
 222                if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
 223                        ret = -ENOMEM;
 224                        *bad_wr = wr;
 225                        goto out;
 226                }
 227
 228                if (unlikely(wr->num_sge > qp->sq.max_gs)) {
 229                        dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
 230                                wr->num_sge, qp->sq.max_gs);
 231                        ret = -EINVAL;
 232                        *bad_wr = wr;
 233                        goto out;
 234                }
 235
 236                wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
 237                qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
 238                                                                      wr->wr_id;
 239
 240                owner_bit =
 241                       ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
 242                tmp_len = 0;
 243
 244                /* Corresponding to the QP type, wqe process separately */
 245                if (ibqp->qp_type == IB_QPT_GSI) {
 246                        ud_sq_wqe = wqe;
 247                        memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
 248
 249                        roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
 250                                       V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
 251                        roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
 252                                       V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
 253                        roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
 254                                       V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
 255                        roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
 256                                       V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
 257                        roce_set_field(ud_sq_wqe->byte_48,
 258                                       V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
 259                                       V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
 260                                       ah->av.mac[4]);
 261                        roce_set_field(ud_sq_wqe->byte_48,
 262                                       V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
 263                                       V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
 264                                       ah->av.mac[5]);
 265
 266                        /* MAC loopback */
 267                        smac = (u8 *)hr_dev->dev_addr[qp->port];
 268                        loopback = ether_addr_equal_unaligned(ah->av.mac,
 269                                                              smac) ? 1 : 0;
 270
 271                        roce_set_bit(ud_sq_wqe->byte_40,
 272                                     V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
 273
 274                        roce_set_field(ud_sq_wqe->byte_4,
 275                                       V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
 276                                       V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
 277                                       HNS_ROCE_V2_WQE_OP_SEND);
 278
 279                        for (i = 0; i < wr->num_sge; i++)
 280                                tmp_len += wr->sg_list[i].length;
 281
 282                        ud_sq_wqe->msg_len =
 283                         cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
 284
 285                        switch (wr->opcode) {
 286                        case IB_WR_SEND_WITH_IMM:
 287                        case IB_WR_RDMA_WRITE_WITH_IMM:
 288                                ud_sq_wqe->immtdata =
 289                                      cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
 290                                break;
 291                        default:
 292                                ud_sq_wqe->immtdata = 0;
 293                                break;
 294                        }
 295
 296                        /* Set sig attr */
 297                        roce_set_bit(ud_sq_wqe->byte_4,
 298                                   V2_UD_SEND_WQE_BYTE_4_CQE_S,
 299                                   (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
 300
 301                        /* Set se attr */
 302                        roce_set_bit(ud_sq_wqe->byte_4,
 303                                  V2_UD_SEND_WQE_BYTE_4_SE_S,
 304                                  (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
 305
 306                        roce_set_bit(ud_sq_wqe->byte_4,
 307                                     V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
 308
 309                        roce_set_field(ud_sq_wqe->byte_16,
 310                                       V2_UD_SEND_WQE_BYTE_16_PD_M,
 311                                       V2_UD_SEND_WQE_BYTE_16_PD_S,
 312                                       to_hr_pd(ibqp->pd)->pdn);
 313
 314                        roce_set_field(ud_sq_wqe->byte_16,
 315                                       V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
 316                                       V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
 317                                       wr->num_sge);
 318
 319                        roce_set_field(ud_sq_wqe->byte_20,
 320                                     V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
 321                                     V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
 322                                     sge_ind & (qp->sge.sge_cnt - 1));
 323
 324                        roce_set_field(ud_sq_wqe->byte_24,
 325                                       V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
 326                                       V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
 327                        ud_sq_wqe->qkey =
 328                             cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
 329                             qp->qkey : ud_wr(wr)->remote_qkey);
 330                        roce_set_field(ud_sq_wqe->byte_32,
 331                                       V2_UD_SEND_WQE_BYTE_32_DQPN_M,
 332                                       V2_UD_SEND_WQE_BYTE_32_DQPN_S,
 333                                       ud_wr(wr)->remote_qpn);
 334
 335                        roce_set_field(ud_sq_wqe->byte_36,
 336                                       V2_UD_SEND_WQE_BYTE_36_VLAN_M,
 337                                       V2_UD_SEND_WQE_BYTE_36_VLAN_S,
 338                                       le16_to_cpu(ah->av.vlan));
 339                        roce_set_field(ud_sq_wqe->byte_36,
 340                                       V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
 341                                       V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
 342                                       ah->av.hop_limit);
 343                        roce_set_field(ud_sq_wqe->byte_36,
 344                                       V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
 345                                       V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
 346                                       ah->av.sl_tclass_flowlabel >>
 347                                       HNS_ROCE_TCLASS_SHIFT);
 348                        roce_set_field(ud_sq_wqe->byte_40,
 349                                       V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
 350                                       V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
 351                                       ah->av.sl_tclass_flowlabel &
 352                                       HNS_ROCE_FLOW_LABEL_MASK);
 353                        roce_set_field(ud_sq_wqe->byte_40,
 354                                       V2_UD_SEND_WQE_BYTE_40_SL_M,
 355                                       V2_UD_SEND_WQE_BYTE_40_SL_S,
 356                                      le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
 357                                      HNS_ROCE_SL_SHIFT);
 358                        roce_set_field(ud_sq_wqe->byte_40,
 359                                       V2_UD_SEND_WQE_BYTE_40_PORTN_M,
 360                                       V2_UD_SEND_WQE_BYTE_40_PORTN_S,
 361                                       qp->port);
 362
 363                        roce_set_bit(ud_sq_wqe->byte_40,
 364                                     V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
 365                                     ah->av.vlan_en ? 1 : 0);
 366                        roce_set_field(ud_sq_wqe->byte_48,
 367                                       V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
 368                                       V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
 369                                       hns_get_gid_index(hr_dev, qp->phy_port,
 370                                                         ah->av.gid_index));
 371
 372                        memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
 373                               GID_LEN_V2);
 374
 375                        set_extend_sge(qp, wr, &sge_ind);
 376                        ind++;
 377                } else if (ibqp->qp_type == IB_QPT_RC) {
 378                        rc_sq_wqe = wqe;
 379                        memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
 380                        for (i = 0; i < wr->num_sge; i++)
 381                                tmp_len += wr->sg_list[i].length;
 382
 383                        rc_sq_wqe->msg_len =
 384                         cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
 385
 386                        switch (wr->opcode) {
 387                        case IB_WR_SEND_WITH_IMM:
 388                        case IB_WR_RDMA_WRITE_WITH_IMM:
 389                                rc_sq_wqe->immtdata =
 390                                      cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
 391                                break;
 392                        case IB_WR_SEND_WITH_INV:
 393                                rc_sq_wqe->inv_key =
 394                                        cpu_to_le32(wr->ex.invalidate_rkey);
 395                                break;
 396                        default:
 397                                rc_sq_wqe->immtdata = 0;
 398                                break;
 399                        }
 400
 401                        roce_set_bit(rc_sq_wqe->byte_4,
 402                                     V2_RC_SEND_WQE_BYTE_4_FENCE_S,
 403                                     (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
 404
 405                        roce_set_bit(rc_sq_wqe->byte_4,
 406                                  V2_RC_SEND_WQE_BYTE_4_SE_S,
 407                                  (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
 408
 409                        roce_set_bit(rc_sq_wqe->byte_4,
 410                                   V2_RC_SEND_WQE_BYTE_4_CQE_S,
 411                                   (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
 412
 413                        roce_set_bit(rc_sq_wqe->byte_4,
 414                                     V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
 415
 416                        switch (wr->opcode) {
 417                        case IB_WR_RDMA_READ:
 418                                hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
 419                                rc_sq_wqe->rkey =
 420                                        cpu_to_le32(rdma_wr(wr)->rkey);
 421                                rc_sq_wqe->va =
 422                                        cpu_to_le64(rdma_wr(wr)->remote_addr);
 423                                break;
 424                        case IB_WR_RDMA_WRITE:
 425                                hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
 426                                rc_sq_wqe->rkey =
 427                                        cpu_to_le32(rdma_wr(wr)->rkey);
 428                                rc_sq_wqe->va =
 429                                        cpu_to_le64(rdma_wr(wr)->remote_addr);
 430                                break;
 431                        case IB_WR_RDMA_WRITE_WITH_IMM:
 432                                hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
 433                                rc_sq_wqe->rkey =
 434                                        cpu_to_le32(rdma_wr(wr)->rkey);
 435                                rc_sq_wqe->va =
 436                                        cpu_to_le64(rdma_wr(wr)->remote_addr);
 437                                break;
 438                        case IB_WR_SEND:
 439                                hr_op = HNS_ROCE_V2_WQE_OP_SEND;
 440                                break;
 441                        case IB_WR_SEND_WITH_INV:
 442                                hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
 443                                break;
 444                        case IB_WR_SEND_WITH_IMM:
 445                                hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
 446                                break;
 447                        case IB_WR_LOCAL_INV:
 448                                hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
 449                                break;
 450                        case IB_WR_ATOMIC_CMP_AND_SWP:
 451                                hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
 452                                break;
 453                        case IB_WR_ATOMIC_FETCH_AND_ADD:
 454                                hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
 455                                break;
 456                        case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
 457                                hr_op =
 458                                       HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
 459                                break;
 460                        case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
 461                                hr_op =
 462                                      HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
 463                                break;
 464                        default:
 465                                hr_op = HNS_ROCE_V2_WQE_OP_MASK;
 466                                break;
 467                        }
 468
 469                        roce_set_field(rc_sq_wqe->byte_4,
 470                                       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
 471                                       V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
 472                        wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
 473
 474                        ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
 475                                                &sge_ind, bad_wr);
 476                        if (ret)
 477                                goto out;
 478                        ind++;
 479                } else {
 480                        dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
 481                        spin_unlock_irqrestore(&qp->sq.lock, flags);
 482                        *bad_wr = wr;
 483                        return -EOPNOTSUPP;
 484                }
 485        }
 486
 487out:
 488        if (likely(nreq)) {
 489                qp->sq.head += nreq;
 490                /* Memory barrier */
 491                wmb();
 492
 493                sq_db.byte_4 = 0;
 494                sq_db.parameter = 0;
 495
 496                roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
 497                               V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
 498                roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
 499                               V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
 500                roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
 501                               V2_DB_PARAMETER_IDX_S,
 502                               qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
 503                roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
 504                               V2_DB_PARAMETER_SL_S, qp->sl);
 505
 506                hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
 507
 508                qp->sq_next_wqe = ind;
 509                qp->next_sge = sge_ind;
 510
 511                if (qp->state == IB_QPS_ERR) {
 512                        attr_mask = IB_QP_STATE;
 513                        attr.qp_state = IB_QPS_ERR;
 514
 515                        ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
 516                                                    qp->state, IB_QPS_ERR);
 517                        if (ret) {
 518                                spin_unlock_irqrestore(&qp->sq.lock, flags);
 519                                *bad_wr = wr;
 520                                return ret;
 521                        }
 522                }
 523        }
 524
 525        spin_unlock_irqrestore(&qp->sq.lock, flags);
 526
 527        return ret;
 528}
 529
 530static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
 531                                 const struct ib_recv_wr *wr,
 532                                 const struct ib_recv_wr **bad_wr)
 533{
 534        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 535        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 536        struct hns_roce_v2_wqe_data_seg *dseg;
 537        struct hns_roce_rinl_sge *sge_list;
 538        struct device *dev = hr_dev->dev;
 539        struct ib_qp_attr attr;
 540        unsigned long flags;
 541        void *wqe = NULL;
 542        int attr_mask;
 543        int ret = 0;
 544        int nreq;
 545        int ind;
 546        int i;
 547
 548        spin_lock_irqsave(&hr_qp->rq.lock, flags);
 549        ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
 550
 551        if (hr_qp->state == IB_QPS_RESET) {
 552                spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
 553                *bad_wr = wr;
 554                return -EINVAL;
 555        }
 556
 557        for (nreq = 0; wr; ++nreq, wr = wr->next) {
 558                if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
 559                        hr_qp->ibqp.recv_cq)) {
 560                        ret = -ENOMEM;
 561                        *bad_wr = wr;
 562                        goto out;
 563                }
 564
 565                if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
 566                        dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
 567                                wr->num_sge, hr_qp->rq.max_gs);
 568                        ret = -EINVAL;
 569                        *bad_wr = wr;
 570                        goto out;
 571                }
 572
 573                wqe = get_recv_wqe(hr_qp, ind);
 574                dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
 575                for (i = 0; i < wr->num_sge; i++) {
 576                        if (!wr->sg_list[i].length)
 577                                continue;
 578                        set_data_seg_v2(dseg, wr->sg_list + i);
 579                        dseg++;
 580                }
 581
 582                if (i < hr_qp->rq.max_gs) {
 583                        dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
 584                        dseg->addr = 0;
 585                }
 586
 587                /* rq support inline data */
 588                if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
 589                        sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
 590                        hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
 591                                                               (u32)wr->num_sge;
 592                        for (i = 0; i < wr->num_sge; i++) {
 593                                sge_list[i].addr =
 594                                               (void *)(u64)wr->sg_list[i].addr;
 595                                sge_list[i].len = wr->sg_list[i].length;
 596                        }
 597                }
 598
 599                hr_qp->rq.wrid[ind] = wr->wr_id;
 600
 601                ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
 602        }
 603
 604out:
 605        if (likely(nreq)) {
 606                hr_qp->rq.head += nreq;
 607                /* Memory barrier */
 608                wmb();
 609
 610                *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
 611
 612                if (hr_qp->state == IB_QPS_ERR) {
 613                        attr_mask = IB_QP_STATE;
 614                        attr.qp_state = IB_QPS_ERR;
 615
 616                        ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
 617                                                    attr_mask, hr_qp->state,
 618                                                    IB_QPS_ERR);
 619                        if (ret) {
 620                                spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
 621                                *bad_wr = wr;
 622                                return ret;
 623                        }
 624                }
 625        }
 626        spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
 627
 628        return ret;
 629}
 630
 631static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
 632{
 633        int ntu = ring->next_to_use;
 634        int ntc = ring->next_to_clean;
 635        int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
 636
 637        return ring->desc_num - used - 1;
 638}
 639
 640static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
 641                                   struct hns_roce_v2_cmq_ring *ring)
 642{
 643        int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
 644
 645        ring->desc = kzalloc(size, GFP_KERNEL);
 646        if (!ring->desc)
 647                return -ENOMEM;
 648
 649        ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
 650                                             DMA_BIDIRECTIONAL);
 651        if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
 652                ring->desc_dma_addr = 0;
 653                kfree(ring->desc);
 654                ring->desc = NULL;
 655                return -ENOMEM;
 656        }
 657
 658        return 0;
 659}
 660
 661static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
 662                                   struct hns_roce_v2_cmq_ring *ring)
 663{
 664        dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
 665                         ring->desc_num * sizeof(struct hns_roce_cmq_desc),
 666                         DMA_BIDIRECTIONAL);
 667
 668        ring->desc_dma_addr = 0;
 669        kfree(ring->desc);
 670}
 671
 672static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
 673{
 674        struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
 675        struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
 676                                            &priv->cmq.csq : &priv->cmq.crq;
 677
 678        ring->flag = ring_type;
 679        ring->next_to_clean = 0;
 680        ring->next_to_use = 0;
 681
 682        return hns_roce_alloc_cmq_desc(hr_dev, ring);
 683}
 684
 685static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
 686{
 687        struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
 688        struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
 689                                            &priv->cmq.csq : &priv->cmq.crq;
 690        dma_addr_t dma = ring->desc_dma_addr;
 691
 692        if (ring_type == TYPE_CSQ) {
 693                roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
 694                roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
 695                           upper_32_bits(dma));
 696                roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
 697                          (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
 698                           HNS_ROCE_CMQ_ENABLE);
 699                roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
 700                roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
 701        } else {
 702                roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
 703                roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
 704                           upper_32_bits(dma));
 705                roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
 706                          (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
 707                           HNS_ROCE_CMQ_ENABLE);
 708                roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
 709                roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
 710        }
 711}
 712
 713static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
 714{
 715        struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
 716        int ret;
 717
 718        /* Setup the queue entries for command queue */
 719        priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
 720        priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
 721
 722        /* Setup the lock for command queue */
 723        spin_lock_init(&priv->cmq.csq.lock);
 724        spin_lock_init(&priv->cmq.crq.lock);
 725
 726        /* Setup Tx write back timeout */
 727        priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
 728
 729        /* Init CSQ */
 730        ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
 731        if (ret) {
 732                dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
 733                return ret;
 734        }
 735
 736        /* Init CRQ */
 737        ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
 738        if (ret) {
 739                dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
 740                goto err_crq;
 741        }
 742
 743        /* Init CSQ REG */
 744        hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
 745
 746        /* Init CRQ REG */
 747        hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
 748
 749        return 0;
 750
 751err_crq:
 752        hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
 753
 754        return ret;
 755}
 756
 757static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
 758{
 759        struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
 760
 761        hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
 762        hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
 763}
 764
 765static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
 766                                          enum hns_roce_opcode_type opcode,
 767                                          bool is_read)
 768{
 769        memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
 770        desc->opcode = cpu_to_le16(opcode);
 771        desc->flag =
 772                cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
 773        if (is_read)
 774                desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
 775        else
 776                desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
 777}
 778
 779static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
 780{
 781        struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
 782        u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
 783
 784        return head == priv->cmq.csq.next_to_use;
 785}
 786
 787static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
 788{
 789        struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
 790        struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
 791        struct hns_roce_cmq_desc *desc;
 792        u16 ntc = csq->next_to_clean;
 793        u32 head;
 794        int clean = 0;
 795
 796        desc = &csq->desc[ntc];
 797        head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
 798        while (head != ntc) {
 799                memset(desc, 0, sizeof(*desc));
 800                ntc++;
 801                if (ntc == csq->desc_num)
 802                        ntc = 0;
 803                desc = &csq->desc[ntc];
 804                clean++;
 805        }
 806        csq->next_to_clean = ntc;
 807
 808        return clean;
 809}
 810
 811static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
 812                             struct hns_roce_cmq_desc *desc, int num)
 813{
 814        struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
 815        struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
 816        struct hns_roce_cmq_desc *desc_to_use;
 817        bool complete = false;
 818        u32 timeout = 0;
 819        int handle = 0;
 820        u16 desc_ret;
 821        int ret = 0;
 822        int ntc;
 823
 824        if (hr_dev->is_reset)
 825                return 0;
 826
 827        spin_lock_bh(&csq->lock);
 828
 829        if (num > hns_roce_cmq_space(csq)) {
 830                spin_unlock_bh(&csq->lock);
 831                return -EBUSY;
 832        }
 833
 834        /*
 835         * Record the location of desc in the cmq for this time
 836         * which will be use for hardware to write back
 837         */
 838        ntc = csq->next_to_use;
 839
 840        while (handle < num) {
 841                desc_to_use = &csq->desc[csq->next_to_use];
 842                *desc_to_use = desc[handle];
 843                dev_dbg(hr_dev->dev, "set cmq desc:\n");
 844                csq->next_to_use++;
 845                if (csq->next_to_use == csq->desc_num)
 846                        csq->next_to_use = 0;
 847                handle++;
 848        }
 849
 850        /* Write to hardware */
 851        roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
 852
 853        /*
 854         * If the command is sync, wait for the firmware to write back,
 855         * if multi descriptors to be sent, use the first one to check
 856         */
 857        if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
 858                do {
 859                        if (hns_roce_cmq_csq_done(hr_dev))
 860                                break;
 861                        udelay(1);
 862                        timeout++;
 863                } while (timeout < priv->cmq.tx_timeout);
 864        }
 865
 866        if (hns_roce_cmq_csq_done(hr_dev)) {
 867                complete = true;
 868                handle = 0;
 869                while (handle < num) {
 870                        /* get the result of hardware write back */
 871                        desc_to_use = &csq->desc[ntc];
 872                        desc[handle] = *desc_to_use;
 873                        dev_dbg(hr_dev->dev, "Get cmq desc:\n");
 874                        desc_ret = desc[handle].retval;
 875                        if (desc_ret == CMD_EXEC_SUCCESS)
 876                                ret = 0;
 877                        else
 878                                ret = -EIO;
 879                        priv->cmq.last_status = desc_ret;
 880                        ntc++;
 881                        handle++;
 882                        if (ntc == csq->desc_num)
 883                                ntc = 0;
 884                }
 885        }
 886
 887        if (!complete)
 888                ret = -EAGAIN;
 889
 890        /* clean the command send queue */
 891        handle = hns_roce_cmq_csq_clean(hr_dev);
 892        if (handle != num)
 893                dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
 894                         handle, num);
 895
 896        spin_unlock_bh(&csq->lock);
 897
 898        return ret;
 899}
 900
 901static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
 902{
 903        struct hns_roce_query_version *resp;
 904        struct hns_roce_cmq_desc desc;
 905        int ret;
 906
 907        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
 908        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
 909        if (ret)
 910                return ret;
 911
 912        resp = (struct hns_roce_query_version *)desc.data;
 913        hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
 914        hr_dev->vendor_id = hr_dev->pci_dev->vendor;
 915
 916        return 0;
 917}
 918
 919static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
 920{
 921        struct hns_roce_query_fw_info *resp;
 922        struct hns_roce_cmq_desc desc;
 923        int ret;
 924
 925        hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
 926        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
 927        if (ret)
 928                return ret;
 929
 930        resp = (struct hns_roce_query_fw_info *)desc.data;
 931        hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
 932
 933        return 0;
 934}
 935
 936static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
 937{
 938        struct hns_roce_cfg_global_param *req;
 939        struct hns_roce_cmq_desc desc;
 940
 941        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
 942                                      false);
 943
 944        req = (struct hns_roce_cfg_global_param *)desc.data;
 945        memset(req, 0, sizeof(*req));
 946        roce_set_field(req->time_cfg_udp_port,
 947                       CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
 948                       CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
 949        roce_set_field(req->time_cfg_udp_port,
 950                       CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
 951                       CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
 952
 953        return hns_roce_cmq_send(hr_dev, &desc, 1);
 954}
 955
 956static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
 957{
 958        struct hns_roce_cmq_desc desc[2];
 959        struct hns_roce_pf_res_a *req_a;
 960        struct hns_roce_pf_res_b *req_b;
 961        int ret;
 962        int i;
 963
 964        for (i = 0; i < 2; i++) {
 965                hns_roce_cmq_setup_basic_desc(&desc[i],
 966                                              HNS_ROCE_OPC_QUERY_PF_RES, true);
 967
 968                if (i == 0)
 969                        desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
 970                else
 971                        desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
 972        }
 973
 974        ret = hns_roce_cmq_send(hr_dev, desc, 2);
 975        if (ret)
 976                return ret;
 977
 978        req_a = (struct hns_roce_pf_res_a *)desc[0].data;
 979        req_b = (struct hns_roce_pf_res_b *)desc[1].data;
 980
 981        hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
 982                                                 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
 983                                                 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
 984        hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
 985                                                PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
 986                                                PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
 987        hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
 988                                                 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
 989                                                 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
 990        hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
 991                                                 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
 992                                                 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
 993
 994        hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
 995                                             PF_RES_DATA_3_PF_SL_NUM_M,
 996                                             PF_RES_DATA_3_PF_SL_NUM_S);
 997
 998        return 0;
 999}
1000
1001static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1002                                                  int vf_id)
1003{
1004        struct hns_roce_cmq_desc desc;
1005        struct hns_roce_vf_switch *swt;
1006        int ret;
1007
1008        swt = (struct hns_roce_vf_switch *)desc.data;
1009        hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1010        swt->rocee_sel |= cpu_to_le16(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1011        roce_set_field(swt->fun_id,
1012                        VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1013                        VF_SWITCH_DATA_FUN_ID_VF_ID_S,
1014                        vf_id);
1015        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1016        if (ret)
1017                return ret;
1018        desc.flag =
1019                cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1020        desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1021        roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1022        roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 1);
1023        roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1024
1025        return hns_roce_cmq_send(hr_dev, &desc, 1);
1026}
1027
1028static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1029{
1030        struct hns_roce_cmq_desc desc[2];
1031        struct hns_roce_vf_res_a *req_a;
1032        struct hns_roce_vf_res_b *req_b;
1033        int i;
1034
1035        req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1036        req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1037        memset(req_a, 0, sizeof(*req_a));
1038        memset(req_b, 0, sizeof(*req_b));
1039        for (i = 0; i < 2; i++) {
1040                hns_roce_cmq_setup_basic_desc(&desc[i],
1041                                              HNS_ROCE_OPC_ALLOC_VF_RES, false);
1042
1043                if (i == 0)
1044                        desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1045                else
1046                        desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1047
1048                if (i == 0) {
1049                        roce_set_field(req_a->vf_qpc_bt_idx_num,
1050                                       VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1051                                       VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1052                        roce_set_field(req_a->vf_qpc_bt_idx_num,
1053                                       VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1054                                       VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1055                                       HNS_ROCE_VF_QPC_BT_NUM);
1056
1057                        roce_set_field(req_a->vf_srqc_bt_idx_num,
1058                                       VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1059                                       VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1060                        roce_set_field(req_a->vf_srqc_bt_idx_num,
1061                                       VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1062                                       VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1063                                       HNS_ROCE_VF_SRQC_BT_NUM);
1064
1065                        roce_set_field(req_a->vf_cqc_bt_idx_num,
1066                                       VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1067                                       VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1068                        roce_set_field(req_a->vf_cqc_bt_idx_num,
1069                                       VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1070                                       VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1071                                       HNS_ROCE_VF_CQC_BT_NUM);
1072
1073                        roce_set_field(req_a->vf_mpt_bt_idx_num,
1074                                       VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1075                                       VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1076                        roce_set_field(req_a->vf_mpt_bt_idx_num,
1077                                       VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1078                                       VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1079                                       HNS_ROCE_VF_MPT_BT_NUM);
1080
1081                        roce_set_field(req_a->vf_eqc_bt_idx_num,
1082                                       VF_RES_A_DATA_5_VF_EQC_IDX_M,
1083                                       VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1084                        roce_set_field(req_a->vf_eqc_bt_idx_num,
1085                                       VF_RES_A_DATA_5_VF_EQC_NUM_M,
1086                                       VF_RES_A_DATA_5_VF_EQC_NUM_S,
1087                                       HNS_ROCE_VF_EQC_NUM);
1088                } else {
1089                        roce_set_field(req_b->vf_smac_idx_num,
1090                                       VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1091                                       VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1092                        roce_set_field(req_b->vf_smac_idx_num,
1093                                       VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1094                                       VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1095                                       HNS_ROCE_VF_SMAC_NUM);
1096
1097                        roce_set_field(req_b->vf_sgid_idx_num,
1098                                       VF_RES_B_DATA_2_VF_SGID_IDX_M,
1099                                       VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1100                        roce_set_field(req_b->vf_sgid_idx_num,
1101                                       VF_RES_B_DATA_2_VF_SGID_NUM_M,
1102                                       VF_RES_B_DATA_2_VF_SGID_NUM_S,
1103                                       HNS_ROCE_VF_SGID_NUM);
1104
1105                        roce_set_field(req_b->vf_qid_idx_sl_num,
1106                                       VF_RES_B_DATA_3_VF_QID_IDX_M,
1107                                       VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1108                        roce_set_field(req_b->vf_qid_idx_sl_num,
1109                                       VF_RES_B_DATA_3_VF_SL_NUM_M,
1110                                       VF_RES_B_DATA_3_VF_SL_NUM_S,
1111                                       HNS_ROCE_VF_SL_NUM);
1112                }
1113        }
1114
1115        return hns_roce_cmq_send(hr_dev, desc, 2);
1116}
1117
1118static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1119{
1120        u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1121        u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1122        u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1123        u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1124        struct hns_roce_cfg_bt_attr *req;
1125        struct hns_roce_cmq_desc desc;
1126
1127        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1128        req = (struct hns_roce_cfg_bt_attr *)desc.data;
1129        memset(req, 0, sizeof(*req));
1130
1131        roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1132                       CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1133                       hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1134        roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1135                       CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1136                       hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1137        roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1138                       CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1139                       qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1140
1141        roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1142                       CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1143                       hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1144        roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1145                       CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1146                       hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1147        roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1148                       CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1149                       srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1150
1151        roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1152                       CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1153                       hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1154        roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1155                       CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1156                       hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1157        roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1158                       CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1159                       cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1160
1161        roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1162                       CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1163                       hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1164        roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1165                       CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1166                       hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1167        roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1168                       CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1169                       mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1170
1171        return hns_roce_cmq_send(hr_dev, &desc, 1);
1172}
1173
1174static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1175{
1176        struct hns_roce_caps *caps = &hr_dev->caps;
1177        int ret;
1178
1179        ret = hns_roce_cmq_query_hw_info(hr_dev);
1180        if (ret) {
1181                dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1182                        ret);
1183                return ret;
1184        }
1185
1186        ret = hns_roce_query_fw_ver(hr_dev);
1187        if (ret) {
1188                dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1189                        ret);
1190                return ret;
1191        }
1192
1193        ret = hns_roce_config_global_param(hr_dev);
1194        if (ret) {
1195                dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1196                        ret);
1197                return ret;
1198        }
1199
1200        /* Get pf resource owned by every pf */
1201        ret = hns_roce_query_pf_resource(hr_dev);
1202        if (ret) {
1203                dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1204                        ret);
1205                return ret;
1206        }
1207
1208        ret = hns_roce_alloc_vf_resource(hr_dev);
1209        if (ret) {
1210                dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1211                        ret);
1212                return ret;
1213        }
1214
1215        if (hr_dev->pci_dev->revision == 0x21) {
1216                ret = hns_roce_set_vf_switch_param(hr_dev, 0);
1217                if (ret) {
1218                        dev_err(hr_dev->dev,
1219                                "Set function switch param fail, ret = %d.\n",
1220                                ret);
1221                        return ret;
1222                }
1223        }
1224
1225        hr_dev->vendor_part_id = hr_dev->pci_dev->device;
1226        hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
1227
1228        caps->num_qps           = HNS_ROCE_V2_MAX_QP_NUM;
1229        caps->max_wqes          = HNS_ROCE_V2_MAX_WQE_NUM;
1230        caps->num_cqs           = HNS_ROCE_V2_MAX_CQ_NUM;
1231        caps->max_cqes          = HNS_ROCE_V2_MAX_CQE_NUM;
1232        caps->max_sq_sg         = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1233        caps->max_extend_sg     = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1234        caps->max_rq_sg         = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1235        caps->max_sq_inline     = HNS_ROCE_V2_MAX_SQ_INLINE;
1236        caps->num_uars          = HNS_ROCE_V2_UAR_NUM;
1237        caps->phy_num_uars      = HNS_ROCE_V2_PHY_UAR_NUM;
1238        caps->num_aeq_vectors   = HNS_ROCE_V2_AEQE_VEC_NUM;
1239        caps->num_comp_vectors  = HNS_ROCE_V2_COMP_VEC_NUM;
1240        caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1241        caps->num_mtpts         = HNS_ROCE_V2_MAX_MTPT_NUM;
1242        caps->num_mtt_segs      = HNS_ROCE_V2_MAX_MTT_SEGS;
1243        caps->num_cqe_segs      = HNS_ROCE_V2_MAX_CQE_SEGS;
1244        caps->num_pds           = HNS_ROCE_V2_MAX_PD_NUM;
1245        caps->max_qp_init_rdma  = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1246        caps->max_qp_dest_rdma  = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1247        caps->max_sq_desc_sz    = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1248        caps->max_rq_desc_sz    = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1249        caps->max_srq_desc_sz   = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1250        caps->qpc_entry_sz      = HNS_ROCE_V2_QPC_ENTRY_SZ;
1251        caps->irrl_entry_sz     = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1252        caps->trrl_entry_sz     = HNS_ROCE_V2_TRRL_ENTRY_SZ;
1253        caps->cqc_entry_sz      = HNS_ROCE_V2_CQC_ENTRY_SZ;
1254        caps->mtpt_entry_sz     = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1255        caps->mtt_entry_sz      = HNS_ROCE_V2_MTT_ENTRY_SZ;
1256        caps->cq_entry_sz       = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1257        caps->page_size_cap     = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1258        caps->reserved_lkey     = 0;
1259        caps->reserved_pds      = 0;
1260        caps->reserved_mrws     = 1;
1261        caps->reserved_uars     = 0;
1262        caps->reserved_cqs      = 0;
1263        caps->reserved_qps      = HNS_ROCE_V2_RSV_QPS;
1264
1265        caps->qpc_ba_pg_sz      = 0;
1266        caps->qpc_buf_pg_sz     = 0;
1267        caps->qpc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1268        caps->srqc_ba_pg_sz     = 0;
1269        caps->srqc_buf_pg_sz    = 0;
1270        caps->srqc_hop_num      = HNS_ROCE_HOP_NUM_0;
1271        caps->cqc_ba_pg_sz      = 0;
1272        caps->cqc_buf_pg_sz     = 0;
1273        caps->cqc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1274        caps->mpt_ba_pg_sz      = 0;
1275        caps->mpt_buf_pg_sz     = 0;
1276        caps->mpt_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1277        caps->pbl_ba_pg_sz      = 0;
1278        caps->pbl_buf_pg_sz     = 0;
1279        caps->pbl_hop_num       = HNS_ROCE_PBL_HOP_NUM;
1280        caps->mtt_ba_pg_sz      = 0;
1281        caps->mtt_buf_pg_sz     = 0;
1282        caps->mtt_hop_num       = HNS_ROCE_MTT_HOP_NUM;
1283        caps->cqe_ba_pg_sz      = 0;
1284        caps->cqe_buf_pg_sz     = 0;
1285        caps->cqe_hop_num       = HNS_ROCE_CQE_HOP_NUM;
1286        caps->eqe_ba_pg_sz      = 0;
1287        caps->eqe_buf_pg_sz     = 0;
1288        caps->eqe_hop_num       = HNS_ROCE_EQE_HOP_NUM;
1289        caps->tsq_buf_pg_sz     = 0;
1290        caps->chunk_sz          = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1291
1292        caps->flags             = HNS_ROCE_CAP_FLAG_REREG_MR |
1293                                  HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1294                                  HNS_ROCE_CAP_FLAG_RQ_INLINE |
1295                                  HNS_ROCE_CAP_FLAG_RECORD_DB |
1296                                  HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1297        caps->pkey_table_len[0] = 1;
1298        caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1299        caps->ceqe_depth        = HNS_ROCE_V2_COMP_EQE_NUM;
1300        caps->aeqe_depth        = HNS_ROCE_V2_ASYNC_EQE_NUM;
1301        caps->local_ca_ack_delay = 0;
1302        caps->max_mtu = IB_MTU_4096;
1303
1304        ret = hns_roce_v2_set_bt(hr_dev);
1305        if (ret)
1306                dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1307                        ret);
1308
1309        return ret;
1310}
1311
1312static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1313                                      enum hns_roce_link_table_type type)
1314{
1315        struct hns_roce_cmq_desc desc[2];
1316        struct hns_roce_cfg_llm_a *req_a =
1317                                (struct hns_roce_cfg_llm_a *)desc[0].data;
1318        struct hns_roce_cfg_llm_b *req_b =
1319                                (struct hns_roce_cfg_llm_b *)desc[1].data;
1320        struct hns_roce_v2_priv *priv = hr_dev->priv;
1321        struct hns_roce_link_table *link_tbl;
1322        struct hns_roce_link_table_entry *entry;
1323        enum hns_roce_opcode_type opcode;
1324        u32 page_num;
1325        int i;
1326
1327        switch (type) {
1328        case TSQ_LINK_TABLE:
1329                link_tbl = &priv->tsq;
1330                opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1331                break;
1332        case TPQ_LINK_TABLE:
1333                link_tbl = &priv->tpq;
1334                opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1335                break;
1336        default:
1337                return -EINVAL;
1338        }
1339
1340        page_num = link_tbl->npages;
1341        entry = link_tbl->table.buf;
1342        memset(req_a, 0, sizeof(*req_a));
1343        memset(req_b, 0, sizeof(*req_b));
1344
1345        for (i = 0; i < 2; i++) {
1346                hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1347
1348                if (i == 0)
1349                        desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1350                else
1351                        desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1352
1353                if (i == 0) {
1354                        req_a->base_addr_l = link_tbl->table.map & 0xffffffff;
1355                        req_a->base_addr_h = (link_tbl->table.map >> 32) &
1356                                             0xffffffff;
1357                        roce_set_field(req_a->depth_pgsz_init_en,
1358                                       CFG_LLM_QUE_DEPTH_M,
1359                                       CFG_LLM_QUE_DEPTH_S,
1360                                       link_tbl->npages);
1361                        roce_set_field(req_a->depth_pgsz_init_en,
1362                                       CFG_LLM_QUE_PGSZ_M,
1363                                       CFG_LLM_QUE_PGSZ_S,
1364                                       link_tbl->pg_sz);
1365                        req_a->head_ba_l = entry[0].blk_ba0;
1366                        req_a->head_ba_h_nxtptr = entry[0].blk_ba1_nxt_ptr;
1367                        roce_set_field(req_a->head_ptr,
1368                                       CFG_LLM_HEAD_PTR_M,
1369                                       CFG_LLM_HEAD_PTR_S, 0);
1370                } else {
1371                        req_b->tail_ba_l = entry[page_num - 1].blk_ba0;
1372                        roce_set_field(req_b->tail_ba_h,
1373                                       CFG_LLM_TAIL_BA_H_M,
1374                                       CFG_LLM_TAIL_BA_H_S,
1375                                       entry[page_num - 1].blk_ba1_nxt_ptr &
1376                                       HNS_ROCE_LINK_TABLE_BA1_M);
1377                        roce_set_field(req_b->tail_ptr,
1378                                       CFG_LLM_TAIL_PTR_M,
1379                                       CFG_LLM_TAIL_PTR_S,
1380                                       (entry[page_num - 2].blk_ba1_nxt_ptr &
1381                                       HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1382                                       HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1383                }
1384        }
1385        roce_set_field(req_a->depth_pgsz_init_en,
1386                       CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1387
1388        return hns_roce_cmq_send(hr_dev, desc, 2);
1389}
1390
1391static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1392                                    enum hns_roce_link_table_type type)
1393{
1394        struct hns_roce_v2_priv *priv = hr_dev->priv;
1395        struct hns_roce_link_table *link_tbl;
1396        struct hns_roce_link_table_entry *entry;
1397        struct device *dev = hr_dev->dev;
1398        u32 buf_chk_sz;
1399        dma_addr_t t;
1400        int func_num = 1;
1401        int pg_num_a;
1402        int pg_num_b;
1403        int pg_num;
1404        int size;
1405        int i;
1406
1407        switch (type) {
1408        case TSQ_LINK_TABLE:
1409                link_tbl = &priv->tsq;
1410                buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1411                pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1412                pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1413                break;
1414        case TPQ_LINK_TABLE:
1415                link_tbl = &priv->tpq;
1416                buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
1417                pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1418                pg_num_b = 2 * 4 * func_num + 2;
1419                break;
1420        default:
1421                return -EINVAL;
1422        }
1423
1424        pg_num = max(pg_num_a, pg_num_b);
1425        size = pg_num * sizeof(struct hns_roce_link_table_entry);
1426
1427        link_tbl->table.buf = dma_alloc_coherent(dev, size,
1428                                                 &link_tbl->table.map,
1429                                                 GFP_KERNEL);
1430        if (!link_tbl->table.buf)
1431                goto out;
1432
1433        link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1434                                    GFP_KERNEL);
1435        if (!link_tbl->pg_list)
1436                goto err_kcalloc_failed;
1437
1438        entry = link_tbl->table.buf;
1439        for (i = 0; i < pg_num; ++i) {
1440                link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1441                                                              &t, GFP_KERNEL);
1442                if (!link_tbl->pg_list[i].buf)
1443                        goto err_alloc_buf_failed;
1444
1445                link_tbl->pg_list[i].map = t;
1446
1447                entry[i].blk_ba0 = (t >> 12) & 0xffffffff;
1448                roce_set_field(entry[i].blk_ba1_nxt_ptr,
1449                               HNS_ROCE_LINK_TABLE_BA1_M,
1450                               HNS_ROCE_LINK_TABLE_BA1_S,
1451                               t >> 44);
1452
1453                if (i < (pg_num - 1))
1454                        roce_set_field(entry[i].blk_ba1_nxt_ptr,
1455                                       HNS_ROCE_LINK_TABLE_NXT_PTR_M,
1456                                       HNS_ROCE_LINK_TABLE_NXT_PTR_S,
1457                                       i + 1);
1458        }
1459        link_tbl->npages = pg_num;
1460        link_tbl->pg_sz = buf_chk_sz;
1461
1462        return hns_roce_config_link_table(hr_dev, type);
1463
1464err_alloc_buf_failed:
1465        for (i -= 1; i >= 0; i--)
1466                dma_free_coherent(dev, buf_chk_sz,
1467                                  link_tbl->pg_list[i].buf,
1468                                  link_tbl->pg_list[i].map);
1469        kfree(link_tbl->pg_list);
1470
1471err_kcalloc_failed:
1472        dma_free_coherent(dev, size, link_tbl->table.buf,
1473                          link_tbl->table.map);
1474
1475out:
1476        return -ENOMEM;
1477}
1478
1479static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1480                                     struct hns_roce_link_table *link_tbl)
1481{
1482        struct device *dev = hr_dev->dev;
1483        int size;
1484        int i;
1485
1486        size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1487
1488        for (i = 0; i < link_tbl->npages; ++i)
1489                if (link_tbl->pg_list[i].buf)
1490                        dma_free_coherent(dev, link_tbl->pg_sz,
1491                                          link_tbl->pg_list[i].buf,
1492                                          link_tbl->pg_list[i].map);
1493        kfree(link_tbl->pg_list);
1494
1495        dma_free_coherent(dev, size, link_tbl->table.buf,
1496                          link_tbl->table.map);
1497}
1498
1499static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1500{
1501        struct hns_roce_v2_priv *priv = hr_dev->priv;
1502        int ret;
1503
1504        /* TSQ includes SQ doorbell and ack doorbell */
1505        ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
1506        if (ret) {
1507                dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
1508                return ret;
1509        }
1510
1511        ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1512        if (ret) {
1513                dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1514                goto err_tpq_init_failed;
1515        }
1516
1517        return 0;
1518
1519err_tpq_init_failed:
1520        hns_roce_free_link_table(hr_dev, &priv->tsq);
1521
1522        return ret;
1523}
1524
1525static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
1526{
1527        struct hns_roce_v2_priv *priv = hr_dev->priv;
1528
1529        hns_roce_free_link_table(hr_dev, &priv->tpq);
1530        hns_roce_free_link_table(hr_dev, &priv->tsq);
1531}
1532
1533static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
1534{
1535        struct hns_roce_cmq_desc desc;
1536        struct hns_roce_mbox_status *mb_st =
1537                                       (struct hns_roce_mbox_status *)desc.data;
1538        enum hns_roce_cmd_return_status status;
1539
1540        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
1541
1542        status = hns_roce_cmq_send(hr_dev, &desc, 1);
1543        if (status)
1544                return status;
1545
1546        return cpu_to_le32(mb_st->mb_status_hw_run);
1547}
1548
1549static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
1550{
1551        u32 status = hns_roce_query_mbox_status(hr_dev);
1552
1553        return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
1554}
1555
1556static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
1557{
1558        u32 status = hns_roce_query_mbox_status(hr_dev);
1559
1560        return status & HNS_ROCE_HW_MB_STATUS_MASK;
1561}
1562
1563static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
1564                              u64 out_param, u32 in_modifier, u8 op_modifier,
1565                              u16 op, u16 token, int event)
1566{
1567        struct hns_roce_cmq_desc desc;
1568        struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
1569
1570        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
1571
1572        mb->in_param_l = cpu_to_le64(in_param);
1573        mb->in_param_h = cpu_to_le64(in_param) >> 32;
1574        mb->out_param_l = cpu_to_le64(out_param);
1575        mb->out_param_h = cpu_to_le64(out_param) >> 32;
1576        mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
1577        mb->token_event_en = cpu_to_le32(event << 16 | token);
1578
1579        return hns_roce_cmq_send(hr_dev, &desc, 1);
1580}
1581
1582static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1583                                 u64 out_param, u32 in_modifier, u8 op_modifier,
1584                                 u16 op, u16 token, int event)
1585{
1586        struct device *dev = hr_dev->dev;
1587        unsigned long end;
1588        int ret;
1589
1590        end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
1591        while (hns_roce_v2_cmd_pending(hr_dev)) {
1592                if (time_after(jiffies, end)) {
1593                        dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
1594                                (int)end);
1595                        return -EAGAIN;
1596                }
1597                cond_resched();
1598        }
1599
1600        ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
1601                                 op_modifier, op, token, event);
1602        if (ret)
1603                dev_err(dev, "Post mailbox fail(%d)\n", ret);
1604
1605        return ret;
1606}
1607
1608static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
1609                                unsigned long timeout)
1610{
1611        struct device *dev = hr_dev->dev;
1612        unsigned long end = 0;
1613        u32 status;
1614
1615        end = msecs_to_jiffies(timeout) + jiffies;
1616        while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
1617                cond_resched();
1618
1619        if (hns_roce_v2_cmd_pending(hr_dev)) {
1620                dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1621                return -ETIMEDOUT;
1622        }
1623
1624        status = hns_roce_v2_cmd_complete(hr_dev);
1625        if (status != 0x1) {
1626                dev_err(dev, "mailbox status 0x%x!\n", status);
1627                return -EBUSY;
1628        }
1629
1630        return 0;
1631}
1632
1633static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
1634                                      int gid_index, const union ib_gid *gid,
1635                                      enum hns_roce_sgid_type sgid_type)
1636{
1637        struct hns_roce_cmq_desc desc;
1638        struct hns_roce_cfg_sgid_tb *sgid_tb =
1639                                    (struct hns_roce_cfg_sgid_tb *)desc.data;
1640        u32 *p;
1641
1642        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
1643
1644        roce_set_field(sgid_tb->table_idx_rsv,
1645                       CFG_SGID_TB_TABLE_IDX_M,
1646                       CFG_SGID_TB_TABLE_IDX_S, gid_index);
1647        roce_set_field(sgid_tb->vf_sgid_type_rsv,
1648                       CFG_SGID_TB_VF_SGID_TYPE_M,
1649                       CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
1650
1651        p = (u32 *)&gid->raw[0];
1652        sgid_tb->vf_sgid_l = cpu_to_le32(*p);
1653
1654        p = (u32 *)&gid->raw[4];
1655        sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
1656
1657        p = (u32 *)&gid->raw[8];
1658        sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
1659
1660        p = (u32 *)&gid->raw[0xc];
1661        sgid_tb->vf_sgid_h = cpu_to_le32(*p);
1662
1663        return hns_roce_cmq_send(hr_dev, &desc, 1);
1664}
1665
1666static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1667                               int gid_index, const union ib_gid *gid,
1668                               const struct ib_gid_attr *attr)
1669{
1670        enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
1671        int ret;
1672
1673        if (!gid || !attr)
1674                return -EINVAL;
1675
1676        if (attr->gid_type == IB_GID_TYPE_ROCE)
1677                sgid_type = GID_TYPE_FLAG_ROCE_V1;
1678
1679        if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
1680                if (ipv6_addr_v4mapped((void *)gid))
1681                        sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
1682                else
1683                        sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
1684        }
1685
1686        ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
1687        if (ret)
1688                dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
1689
1690        return ret;
1691}
1692
1693static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1694                               u8 *addr)
1695{
1696        struct hns_roce_cmq_desc desc;
1697        struct hns_roce_cfg_smac_tb *smac_tb =
1698                                    (struct hns_roce_cfg_smac_tb *)desc.data;
1699        u16 reg_smac_h;
1700        u32 reg_smac_l;
1701
1702        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
1703
1704        reg_smac_l = *(u32 *)(&addr[0]);
1705        reg_smac_h = *(u16 *)(&addr[4]);
1706
1707        memset(smac_tb, 0, sizeof(*smac_tb));
1708        roce_set_field(smac_tb->tb_idx_rsv,
1709                       CFG_SMAC_TB_IDX_M,
1710                       CFG_SMAC_TB_IDX_S, phy_port);
1711        roce_set_field(smac_tb->vf_smac_h_rsv,
1712                       CFG_SMAC_TB_VF_SMAC_H_M,
1713                       CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
1714        smac_tb->vf_smac_l = reg_smac_l;
1715
1716        return hns_roce_cmq_send(hr_dev, &desc, 1);
1717}
1718
1719static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
1720                        struct hns_roce_mr *mr)
1721{
1722        struct sg_dma_page_iter sg_iter;
1723        u64 page_addr;
1724        u64 *pages;
1725        int i;
1726
1727        mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
1728        mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
1729        roce_set_field(mpt_entry->byte_48_mode_ba,
1730                       V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
1731                       upper_32_bits(mr->pbl_ba >> 3));
1732
1733        pages = (u64 *)__get_free_page(GFP_KERNEL);
1734        if (!pages)
1735                return -ENOMEM;
1736
1737        i = 0;
1738        for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
1739                page_addr = sg_page_iter_dma_address(&sg_iter);
1740                pages[i] = page_addr >> 6;
1741
1742                /* Record the first 2 entry directly to MTPT table */
1743                if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
1744                        goto found;
1745                i++;
1746        }
1747found:
1748        mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
1749        roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
1750                       V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
1751
1752        mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
1753        roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
1754                       V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
1755        roce_set_field(mpt_entry->byte_64_buf_pa1,
1756                       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
1757                       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
1758                       mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
1759
1760        free_page((unsigned long)pages);
1761
1762        return 0;
1763}
1764
1765static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1766                                  unsigned long mtpt_idx)
1767{
1768        struct hns_roce_v2_mpt_entry *mpt_entry;
1769        int ret;
1770
1771        mpt_entry = mb_buf;
1772        memset(mpt_entry, 0, sizeof(*mpt_entry));
1773
1774        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
1775                       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
1776        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
1777                       V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
1778                       HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
1779        roce_set_field(mpt_entry->byte_4_pd_hop_st,
1780                       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
1781                       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
1782                       mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
1783        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1784                       V2_MPT_BYTE_4_PD_S, mr->pd);
1785
1786        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
1787        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
1788        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
1789        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
1790                     (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1791        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
1792        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1793                     (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1794        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1795                     (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1796        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1797                     (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1798
1799        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1800                     mr->type == MR_TYPE_MR ? 0 : 1);
1801        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1802                     1);
1803
1804        mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
1805        mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
1806        mpt_entry->lkey = cpu_to_le32(mr->key);
1807        mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
1808        mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
1809
1810        if (mr->type == MR_TYPE_DMA)
1811                return 0;
1812
1813        ret = set_mtpt_pbl(mpt_entry, mr);
1814
1815        return ret;
1816}
1817
1818static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
1819                                        struct hns_roce_mr *mr, int flags,
1820                                        u32 pdn, int mr_access_flags, u64 iova,
1821                                        u64 size, void *mb_buf)
1822{
1823        struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
1824        int ret = 0;
1825
1826        if (flags & IB_MR_REREG_PD) {
1827                roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
1828                               V2_MPT_BYTE_4_PD_S, pdn);
1829                mr->pd = pdn;
1830        }
1831
1832        if (flags & IB_MR_REREG_ACCESS) {
1833                roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1834                             V2_MPT_BYTE_8_BIND_EN_S,
1835                             (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
1836                roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
1837                             V2_MPT_BYTE_8_ATOMIC_EN_S,
1838                             mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
1839                roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
1840                             mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
1841                roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
1842                             mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
1843                roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
1844                             mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
1845        }
1846
1847        if (flags & IB_MR_REREG_TRANS) {
1848                mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
1849                mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
1850                mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
1851                mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
1852
1853                mr->iova = iova;
1854                mr->size = size;
1855
1856                ret = set_mtpt_pbl(mpt_entry, mr);
1857        }
1858
1859        return ret;
1860}
1861
1862static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1863{
1864        return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1865                                   n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
1866}
1867
1868static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
1869{
1870        struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
1871
1872        /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1873        return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
1874                !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
1875}
1876
1877static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
1878{
1879        return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
1880}
1881
1882static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1883{
1884        *hr_cq->set_ci_db = cons_index & 0xffffff;
1885}
1886
1887static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1888                                   struct hns_roce_srq *srq)
1889{
1890        struct hns_roce_v2_cqe *cqe, *dest;
1891        u32 prod_index;
1892        int nfreed = 0;
1893        u8 owner_bit;
1894
1895        for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
1896             ++prod_index) {
1897                if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
1898                        break;
1899        }
1900
1901        /*
1902         * Now backwards through the CQ, removing CQ entries
1903         * that match our QP by overwriting them with next entries.
1904         */
1905        while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
1906                cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
1907                if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
1908                                    V2_CQE_BYTE_16_LCL_QPN_S) &
1909                                    HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
1910                        /* In v1 engine, not support SRQ */
1911                        ++nfreed;
1912                } else if (nfreed) {
1913                        dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
1914                                          hr_cq->ib_cq.cqe);
1915                        owner_bit = roce_get_bit(dest->byte_4,
1916                                                 V2_CQE_BYTE_4_OWNER_S);
1917                        memcpy(dest, cqe, sizeof(*cqe));
1918                        roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
1919                                     owner_bit);
1920                }
1921        }
1922
1923        if (nfreed) {
1924                hr_cq->cons_index += nfreed;
1925                /*
1926                 * Make sure update of buffer contents is done before
1927                 * updating consumer index.
1928                 */
1929                wmb();
1930                hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
1931        }
1932}
1933
1934static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1935                                 struct hns_roce_srq *srq)
1936{
1937        spin_lock_irq(&hr_cq->lock);
1938        __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
1939        spin_unlock_irq(&hr_cq->lock);
1940}
1941
1942static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
1943                                  struct hns_roce_cq *hr_cq, void *mb_buf,
1944                                  u64 *mtts, dma_addr_t dma_handle, int nent,
1945                                  u32 vector)
1946{
1947        struct hns_roce_v2_cq_context *cq_context;
1948
1949        cq_context = mb_buf;
1950        memset(cq_context, 0, sizeof(*cq_context));
1951
1952        roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
1953                       V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
1954        roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
1955                       V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
1956        roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
1957                       V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
1958        roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
1959                       V2_CQC_BYTE_4_CEQN_S, vector);
1960        cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
1961
1962        roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
1963                       V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
1964
1965        cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
1966        cq_context->cqe_cur_blk_addr =
1967                                cpu_to_le32(cq_context->cqe_cur_blk_addr);
1968
1969        roce_set_field(cq_context->byte_16_hop_addr,
1970                       V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
1971                       V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
1972                       cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
1973        roce_set_field(cq_context->byte_16_hop_addr,
1974                       V2_CQC_BYTE_16_CQE_HOP_NUM_M,
1975                       V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
1976                       HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
1977
1978        cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
1979        roce_set_field(cq_context->byte_24_pgsz_addr,
1980                       V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
1981                       V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
1982                       cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
1983        roce_set_field(cq_context->byte_24_pgsz_addr,
1984                       V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
1985                       V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
1986                       hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
1987        roce_set_field(cq_context->byte_24_pgsz_addr,
1988                       V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
1989                       V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
1990                       hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
1991
1992        cq_context->cqe_ba = (u32)(dma_handle >> 3);
1993
1994        roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
1995                       V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
1996
1997        if (hr_cq->db_en)
1998                roce_set_bit(cq_context->byte_44_db_record,
1999                             V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2000
2001        roce_set_field(cq_context->byte_44_db_record,
2002                       V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2003                       V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2004                       ((u32)hr_cq->db.dma) >> 1);
2005        cq_context->db_record_addr = hr_cq->db.dma >> 32;
2006
2007        roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2008                       V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2009                       V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2010                       HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2011        roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2012                       V2_CQC_BYTE_56_CQ_PERIOD_M,
2013                       V2_CQC_BYTE_56_CQ_PERIOD_S,
2014                       HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
2015}
2016
2017static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2018                                     enum ib_cq_notify_flags flags)
2019{
2020        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2021        u32 notification_flag;
2022        u32 doorbell[2];
2023
2024        doorbell[0] = 0;
2025        doorbell[1] = 0;
2026
2027        notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2028                             V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2029        /*
2030         * flags = 0; Notification Flag = 1, next
2031         * flags = 1; Notification Flag = 0, solocited
2032         */
2033        roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2034                       hr_cq->cqn);
2035        roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2036                       HNS_ROCE_V2_CQ_DB_NTR);
2037        roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2038                       V2_CQ_DB_PARAMETER_CONS_IDX_S,
2039                       hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2040        roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
2041                       V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
2042        roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2043                     notification_flag);
2044
2045        hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2046
2047        return 0;
2048}
2049
2050static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2051                                                    struct hns_roce_qp **cur_qp,
2052                                                    struct ib_wc *wc)
2053{
2054        struct hns_roce_rinl_sge *sge_list;
2055        u32 wr_num, wr_cnt, sge_num;
2056        u32 sge_cnt, data_len, size;
2057        void *wqe_buf;
2058
2059        wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2060                                V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2061        wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2062
2063        sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2064        sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2065        wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2066        data_len = wc->byte_len;
2067
2068        for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2069                size = min(sge_list[sge_cnt].len, data_len);
2070                memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2071
2072                data_len -= size;
2073                wqe_buf += size;
2074        }
2075
2076        if (data_len) {
2077                wc->status = IB_WC_LOC_LEN_ERR;
2078                return -EAGAIN;
2079        }
2080
2081        return 0;
2082}
2083
2084static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2085                                struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2086{
2087        struct hns_roce_dev *hr_dev;
2088        struct hns_roce_v2_cqe *cqe;
2089        struct hns_roce_qp *hr_qp;
2090        struct hns_roce_wq *wq;
2091        struct ib_qp_attr attr;
2092        int attr_mask;
2093        int is_send;
2094        u16 wqe_ctr;
2095        u32 opcode;
2096        u32 status;
2097        int qpn;
2098        int ret;
2099
2100        /* Find cqe according to consumer index */
2101        cqe = next_cqe_sw_v2(hr_cq);
2102        if (!cqe)
2103                return -EAGAIN;
2104
2105        ++hr_cq->cons_index;
2106        /* Memory barrier */
2107        rmb();
2108
2109        /* 0->SQ, 1->RQ */
2110        is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2111
2112        qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2113                                V2_CQE_BYTE_16_LCL_QPN_S);
2114
2115        if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2116                hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2117                hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2118                if (unlikely(!hr_qp)) {
2119                        dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2120                                hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2121                        return -EINVAL;
2122                }
2123                *cur_qp = hr_qp;
2124        }
2125
2126        wc->qp = &(*cur_qp)->ibqp;
2127        wc->vendor_err = 0;
2128
2129        status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2130                                V2_CQE_BYTE_4_STATUS_S);
2131        switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2132        case HNS_ROCE_CQE_V2_SUCCESS:
2133                wc->status = IB_WC_SUCCESS;
2134                break;
2135        case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2136                wc->status = IB_WC_LOC_LEN_ERR;
2137                break;
2138        case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2139                wc->status = IB_WC_LOC_QP_OP_ERR;
2140                break;
2141        case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2142                wc->status = IB_WC_LOC_PROT_ERR;
2143                break;
2144        case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2145                wc->status = IB_WC_WR_FLUSH_ERR;
2146                break;
2147        case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2148                wc->status = IB_WC_MW_BIND_ERR;
2149                break;
2150        case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2151                wc->status = IB_WC_BAD_RESP_ERR;
2152                break;
2153        case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2154                wc->status = IB_WC_LOC_ACCESS_ERR;
2155                break;
2156        case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2157                wc->status = IB_WC_REM_INV_REQ_ERR;
2158                break;
2159        case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2160                wc->status = IB_WC_REM_ACCESS_ERR;
2161                break;
2162        case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2163                wc->status = IB_WC_REM_OP_ERR;
2164                break;
2165        case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2166                wc->status = IB_WC_RETRY_EXC_ERR;
2167                break;
2168        case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2169                wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2170                break;
2171        case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2172                wc->status = IB_WC_REM_ABORT_ERR;
2173                break;
2174        default:
2175                wc->status = IB_WC_GENERAL_ERR;
2176                break;
2177        }
2178
2179        /* flush cqe if wc status is error, excluding flush error */
2180        if ((wc->status != IB_WC_SUCCESS) &&
2181            (wc->status != IB_WC_WR_FLUSH_ERR)) {
2182                attr_mask = IB_QP_STATE;
2183                attr.qp_state = IB_QPS_ERR;
2184                return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
2185                                             &attr, attr_mask,
2186                                             (*cur_qp)->state, IB_QPS_ERR);
2187        }
2188
2189        if (wc->status == IB_WC_WR_FLUSH_ERR)
2190                return 0;
2191
2192        if (is_send) {
2193                wc->wc_flags = 0;
2194                /* SQ corresponding to CQE */
2195                switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2196                                       V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2197                case HNS_ROCE_SQ_OPCODE_SEND:
2198                        wc->opcode = IB_WC_SEND;
2199                        break;
2200                case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2201                        wc->opcode = IB_WC_SEND;
2202                        break;
2203                case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2204                        wc->opcode = IB_WC_SEND;
2205                        wc->wc_flags |= IB_WC_WITH_IMM;
2206                        break;
2207                case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2208                        wc->opcode = IB_WC_RDMA_READ;
2209                        wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2210                        break;
2211                case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2212                        wc->opcode = IB_WC_RDMA_WRITE;
2213                        break;
2214                case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2215                        wc->opcode = IB_WC_RDMA_WRITE;
2216                        wc->wc_flags |= IB_WC_WITH_IMM;
2217                        break;
2218                case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2219                        wc->opcode = IB_WC_LOCAL_INV;
2220                        wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2221                        break;
2222                case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2223                        wc->opcode = IB_WC_COMP_SWAP;
2224                        wc->byte_len  = 8;
2225                        break;
2226                case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2227                        wc->opcode = IB_WC_FETCH_ADD;
2228                        wc->byte_len  = 8;
2229                        break;
2230                case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2231                        wc->opcode = IB_WC_MASKED_COMP_SWAP;
2232                        wc->byte_len  = 8;
2233                        break;
2234                case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2235                        wc->opcode = IB_WC_MASKED_FETCH_ADD;
2236                        wc->byte_len  = 8;
2237                        break;
2238                case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2239                        wc->opcode = IB_WC_REG_MR;
2240                        break;
2241                case HNS_ROCE_SQ_OPCODE_BIND_MW:
2242                        wc->opcode = IB_WC_REG_MR;
2243                        break;
2244                default:
2245                        wc->status = IB_WC_GENERAL_ERR;
2246                        break;
2247                }
2248
2249                wq = &(*cur_qp)->sq;
2250                if ((*cur_qp)->sq_signal_bits) {
2251                        /*
2252                         * If sg_signal_bit is 1,
2253                         * firstly tail pointer updated to wqe
2254                         * which current cqe correspond to
2255                         */
2256                        wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2257                                                      V2_CQE_BYTE_4_WQE_INDX_M,
2258                                                      V2_CQE_BYTE_4_WQE_INDX_S);
2259                        wq->tail += (wqe_ctr - (u16)wq->tail) &
2260                                    (wq->wqe_cnt - 1);
2261                }
2262
2263                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2264                ++wq->tail;
2265        } else {
2266                /* RQ correspond to CQE */
2267                wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2268
2269                opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2270                                        V2_CQE_BYTE_4_OPCODE_S);
2271                switch (opcode & 0x1f) {
2272                case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2273                        wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2274                        wc->wc_flags = IB_WC_WITH_IMM;
2275                        wc->ex.imm_data =
2276                                cpu_to_be32(le32_to_cpu(cqe->immtdata));
2277                        break;
2278                case HNS_ROCE_V2_OPCODE_SEND:
2279                        wc->opcode = IB_WC_RECV;
2280                        wc->wc_flags = 0;
2281                        break;
2282                case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2283                        wc->opcode = IB_WC_RECV;
2284                        wc->wc_flags = IB_WC_WITH_IMM;
2285                        wc->ex.imm_data =
2286                                cpu_to_be32(le32_to_cpu(cqe->immtdata));
2287                        break;
2288                case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2289                        wc->opcode = IB_WC_RECV;
2290                        wc->wc_flags = IB_WC_WITH_INVALIDATE;
2291                        wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
2292                        break;
2293                default:
2294                        wc->status = IB_WC_GENERAL_ERR;
2295                        break;
2296                }
2297
2298                if ((wc->qp->qp_type == IB_QPT_RC ||
2299                     wc->qp->qp_type == IB_QPT_UC) &&
2300                    (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2301                    opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2302                    opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2303                    (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2304                        ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2305                        if (ret)
2306                                return -EAGAIN;
2307                }
2308
2309                /* Update tail pointer, record wr_id */
2310                wq = &(*cur_qp)->rq;
2311                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2312                ++wq->tail;
2313
2314                wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2315                                            V2_CQE_BYTE_32_SL_S);
2316                wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2317                                                V2_CQE_BYTE_32_RMT_QPN_M,
2318                                                V2_CQE_BYTE_32_RMT_QPN_S);
2319                wc->slid = 0;
2320                wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2321                                              V2_CQE_BYTE_32_GRH_S) ?
2322                                              IB_WC_GRH : 0);
2323                wc->port_num = roce_get_field(cqe->byte_32,
2324                                V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2325                wc->pkey_index = 0;
2326                memcpy(wc->smac, cqe->smac, 4);
2327                wc->smac[4] = roce_get_field(cqe->byte_28,
2328                                             V2_CQE_BYTE_28_SMAC_4_M,
2329                                             V2_CQE_BYTE_28_SMAC_4_S);
2330                wc->smac[5] = roce_get_field(cqe->byte_28,
2331                                             V2_CQE_BYTE_28_SMAC_5_M,
2332                                             V2_CQE_BYTE_28_SMAC_5_S);
2333                wc->vlan_id = 0xffff;
2334                wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
2335                wc->network_hdr_type = roce_get_field(cqe->byte_28,
2336                                                    V2_CQE_BYTE_28_PORT_TYPE_M,
2337                                                    V2_CQE_BYTE_28_PORT_TYPE_S);
2338        }
2339
2340        return 0;
2341}
2342
2343static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2344                               struct ib_wc *wc)
2345{
2346        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2347        struct hns_roce_qp *cur_qp = NULL;
2348        unsigned long flags;
2349        int npolled;
2350
2351        spin_lock_irqsave(&hr_cq->lock, flags);
2352
2353        for (npolled = 0; npolled < num_entries; ++npolled) {
2354                if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2355                        break;
2356        }
2357
2358        if (npolled) {
2359                /* Memory barrier */
2360                wmb();
2361                hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2362        }
2363
2364        spin_unlock_irqrestore(&hr_cq->lock, flags);
2365
2366        return npolled;
2367}
2368
2369static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
2370                               struct hns_roce_hem_table *table, int obj,
2371                               int step_idx)
2372{
2373        struct device *dev = hr_dev->dev;
2374        struct hns_roce_cmd_mailbox *mailbox;
2375        struct hns_roce_hem_iter iter;
2376        struct hns_roce_hem_mhop mhop;
2377        struct hns_roce_hem *hem;
2378        unsigned long mhop_obj = obj;
2379        int i, j, k;
2380        int ret = 0;
2381        u64 hem_idx = 0;
2382        u64 l1_idx = 0;
2383        u64 bt_ba = 0;
2384        u32 chunk_ba_num;
2385        u32 hop_num;
2386        u16 op = 0xff;
2387
2388        if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2389                return 0;
2390
2391        hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
2392        i = mhop.l0_idx;
2393        j = mhop.l1_idx;
2394        k = mhop.l2_idx;
2395        hop_num = mhop.hop_num;
2396        chunk_ba_num = mhop.bt_chunk_size / 8;
2397
2398        if (hop_num == 2) {
2399                hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
2400                          k;
2401                l1_idx = i * chunk_ba_num + j;
2402        } else if (hop_num == 1) {
2403                hem_idx = i * chunk_ba_num + j;
2404        } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
2405                hem_idx = i;
2406        }
2407
2408        switch (table->type) {
2409        case HEM_TYPE_QPC:
2410                op = HNS_ROCE_CMD_WRITE_QPC_BT0;
2411                break;
2412        case HEM_TYPE_MTPT:
2413                op = HNS_ROCE_CMD_WRITE_MPT_BT0;
2414                break;
2415        case HEM_TYPE_CQC:
2416                op = HNS_ROCE_CMD_WRITE_CQC_BT0;
2417                break;
2418        case HEM_TYPE_SRQC:
2419                op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
2420                break;
2421        default:
2422                dev_warn(dev, "Table %d not to be written by mailbox!\n",
2423                         table->type);
2424                return 0;
2425        }
2426        op += step_idx;
2427
2428        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2429        if (IS_ERR(mailbox))
2430                return PTR_ERR(mailbox);
2431
2432        if (check_whether_last_step(hop_num, step_idx)) {
2433                hem = table->hem[hem_idx];
2434                for (hns_roce_hem_first(hem, &iter);
2435                     !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
2436                        bt_ba = hns_roce_hem_addr(&iter);
2437
2438                        /* configure the ba, tag, and op */
2439                        ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
2440                                                obj, 0, op,
2441                                                HNS_ROCE_CMD_TIMEOUT_MSECS);
2442                }
2443        } else {
2444                if (step_idx == 0)
2445                        bt_ba = table->bt_l0_dma_addr[i];
2446                else if (step_idx == 1 && hop_num == 2)
2447                        bt_ba = table->bt_l1_dma_addr[l1_idx];
2448
2449                /* configure the ba, tag, and op */
2450                ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
2451                                        0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
2452        }
2453
2454        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2455        return ret;
2456}
2457
2458static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
2459                                 struct hns_roce_hem_table *table, int obj,
2460                                 int step_idx)
2461{
2462        struct device *dev = hr_dev->dev;
2463        struct hns_roce_cmd_mailbox *mailbox;
2464        int ret = 0;
2465        u16 op = 0xff;
2466
2467        if (!hns_roce_check_whether_mhop(hr_dev, table->type))
2468                return 0;
2469
2470        switch (table->type) {
2471        case HEM_TYPE_QPC:
2472                op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
2473                break;
2474        case HEM_TYPE_MTPT:
2475                op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
2476                break;
2477        case HEM_TYPE_CQC:
2478                op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
2479                break;
2480        case HEM_TYPE_SRQC:
2481                op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
2482                break;
2483        default:
2484                dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
2485                         table->type);
2486                return 0;
2487        }
2488        op += step_idx;
2489
2490        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2491        if (IS_ERR(mailbox))
2492                return PTR_ERR(mailbox);
2493
2494        /* configure the tag and op */
2495        ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
2496                                HNS_ROCE_CMD_TIMEOUT_MSECS);
2497
2498        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2499        return ret;
2500}
2501
2502static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
2503                                 struct hns_roce_mtt *mtt,
2504                                 enum ib_qp_state cur_state,
2505                                 enum ib_qp_state new_state,
2506                                 struct hns_roce_v2_qp_context *context,
2507                                 struct hns_roce_qp *hr_qp)
2508{
2509        struct hns_roce_cmd_mailbox *mailbox;
2510        int ret;
2511
2512        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2513        if (IS_ERR(mailbox))
2514                return PTR_ERR(mailbox);
2515
2516        memcpy(mailbox->buf, context, sizeof(*context) * 2);
2517
2518        ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2519                                HNS_ROCE_CMD_MODIFY_QPC,
2520                                HNS_ROCE_CMD_TIMEOUT_MSECS);
2521
2522        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2523
2524        return ret;
2525}
2526
2527static void set_access_flags(struct hns_roce_qp *hr_qp,
2528                             struct hns_roce_v2_qp_context *context,
2529                             struct hns_roce_v2_qp_context *qpc_mask,
2530                             const struct ib_qp_attr *attr, int attr_mask)
2531{
2532        u8 dest_rd_atomic;
2533        u32 access_flags;
2534
2535        dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
2536                         attr->max_dest_rd_atomic : hr_qp->resp_depth;
2537
2538        access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
2539                       attr->qp_access_flags : hr_qp->atomic_rd_en;
2540
2541        if (!dest_rd_atomic)
2542                access_flags &= IB_ACCESS_REMOTE_WRITE;
2543
2544        roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2545                     !!(access_flags & IB_ACCESS_REMOTE_READ));
2546        roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
2547
2548        roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2549                     !!(access_flags & IB_ACCESS_REMOTE_WRITE));
2550        roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
2551
2552        roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2553                     !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
2554        roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
2555}
2556
2557static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2558                                    const struct ib_qp_attr *attr,
2559                                    int attr_mask,
2560                                    struct hns_roce_v2_qp_context *context,
2561                                    struct hns_roce_v2_qp_context *qpc_mask)
2562{
2563        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2564        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2565
2566        /*
2567         * In v2 engine, software pass context and context mask to hardware
2568         * when modifying qp. If software need modify some fields in context,
2569         * we should set all bits of the relevant fields in context mask to
2570         * 0 at the same time, else set them to 0x1.
2571         */
2572        roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2573                       V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2574        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2575                       V2_QPC_BYTE_4_TST_S, 0);
2576
2577        if (ibqp->qp_type == IB_QPT_GSI)
2578                roce_set_field(context->byte_4_sqpn_tst,
2579                               V2_QPC_BYTE_4_SGE_SHIFT_M,
2580                               V2_QPC_BYTE_4_SGE_SHIFT_S,
2581                               ilog2((unsigned int)hr_qp->sge.sge_cnt));
2582        else
2583                roce_set_field(context->byte_4_sqpn_tst,
2584                               V2_QPC_BYTE_4_SGE_SHIFT_M,
2585                               V2_QPC_BYTE_4_SGE_SHIFT_S,
2586                               hr_qp->sq.max_gs > 2 ?
2587                               ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2588
2589        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2590                       V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2591
2592        roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2593                       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2594        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2595                       V2_QPC_BYTE_4_SQPN_S, 0);
2596
2597        roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2598                       V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2599        roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2600                       V2_QPC_BYTE_16_PD_S, 0);
2601
2602        roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2603                       V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
2604        roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
2605                       V2_QPC_BYTE_20_RQWS_S, 0);
2606
2607        roce_set_field(context->byte_20_smac_sgid_idx,
2608                       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2609                       ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2610        roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2611                       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2612
2613        roce_set_field(context->byte_20_smac_sgid_idx,
2614                       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2615                       ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2616        roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2617                       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2618
2619        /* No VLAN need to set 0xFFF */
2620        roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
2621                       V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
2622        roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
2623                       V2_QPC_BYTE_24_VLAN_ID_S, 0);
2624
2625        /*
2626         * Set some fields in context to zero, Because the default values
2627         * of all fields in context are zero, we need not set them to 0 again.
2628         * but we should set the relevant fields of context mask to 0.
2629         */
2630        roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
2631        roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
2632        roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
2633        roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
2634
2635        roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
2636                       V2_QPC_BYTE_60_TEMPID_S, 0);
2637
2638        roce_set_field(qpc_mask->byte_60_qpst_tempid,
2639                       V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
2640                       0);
2641        roce_set_bit(qpc_mask->byte_60_qpst_tempid,
2642                     V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
2643        roce_set_bit(qpc_mask->byte_60_qpst_tempid,
2644                     V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
2645        roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
2646        roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
2647
2648        if (attr_mask & IB_QP_QKEY) {
2649                context->qkey_xrcd = attr->qkey;
2650                qpc_mask->qkey_xrcd = 0;
2651                hr_qp->qkey = attr->qkey;
2652        }
2653
2654        if (hr_qp->rdb_en) {
2655                roce_set_bit(context->byte_68_rq_db,
2656                             V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
2657                roce_set_bit(qpc_mask->byte_68_rq_db,
2658                             V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
2659        }
2660
2661        roce_set_field(context->byte_68_rq_db,
2662                       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2663                       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
2664                       ((u32)hr_qp->rdb.dma) >> 1);
2665        roce_set_field(qpc_mask->byte_68_rq_db,
2666                       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
2667                       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
2668        context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
2669        qpc_mask->rq_db_record_addr = 0;
2670
2671        roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
2672                    (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
2673        roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
2674
2675        roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2676                       V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2677        roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2678                       V2_QPC_BYTE_80_RX_CQN_S, 0);
2679        if (ibqp->srq) {
2680                roce_set_field(context->byte_76_srqn_op_en,
2681                               V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2682                               to_hr_srq(ibqp->srq)->srqn);
2683                roce_set_field(qpc_mask->byte_76_srqn_op_en,
2684                               V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2685                roce_set_bit(context->byte_76_srqn_op_en,
2686                             V2_QPC_BYTE_76_SRQ_EN_S, 1);
2687                roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2688                             V2_QPC_BYTE_76_SRQ_EN_S, 0);
2689        }
2690
2691        roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2692                       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
2693                       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
2694        roce_set_field(qpc_mask->byte_84_rq_ci_pi,
2695                       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
2696                       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
2697
2698        roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
2699                       V2_QPC_BYTE_92_SRQ_INFO_S, 0);
2700
2701        roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
2702                       V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
2703
2704        roce_set_field(qpc_mask->byte_104_rq_sge,
2705                       V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
2706                       V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
2707
2708        roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2709                     V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
2710        roce_set_field(qpc_mask->byte_108_rx_reqepsn,
2711                       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
2712                       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
2713        roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
2714                     V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
2715
2716        qpc_mask->rq_rnr_timer = 0;
2717        qpc_mask->rx_msg_len = 0;
2718        qpc_mask->rx_rkey_pkt_info = 0;
2719        qpc_mask->rx_va = 0;
2720
2721        roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
2722                       V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
2723        roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
2724                       V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
2725
2726        roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
2727                     0);
2728        roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
2729                       V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
2730        roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
2731                       V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
2732
2733        roce_set_field(qpc_mask->byte_144_raq,
2734                       V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
2735                       V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
2736        roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
2737                       V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
2738        roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
2739
2740        roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
2741                       V2_QPC_BYTE_148_RQ_MSN_S, 0);
2742        roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
2743                       V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
2744
2745        roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
2746                       V2_QPC_BYTE_152_RAQ_PSN_S, 0);
2747        roce_set_field(qpc_mask->byte_152_raq,
2748                       V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
2749                       V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
2750
2751        roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
2752                       V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
2753
2754        roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2755                       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
2756                       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
2757        roce_set_field(qpc_mask->byte_160_sq_ci_pi,
2758                       V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
2759                       V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
2760
2761        roce_set_bit(qpc_mask->byte_168_irrl_idx,
2762                     V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
2763        roce_set_bit(qpc_mask->byte_168_irrl_idx,
2764                     V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
2765        roce_set_bit(qpc_mask->byte_168_irrl_idx,
2766                     V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
2767        roce_set_bit(qpc_mask->byte_168_irrl_idx,
2768                     V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
2769        roce_set_bit(qpc_mask->byte_168_irrl_idx,
2770                     V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
2771        roce_set_field(qpc_mask->byte_168_irrl_idx,
2772                       V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
2773                       V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
2774
2775        roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2776                       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
2777        roce_set_field(qpc_mask->byte_172_sq_psn,
2778                       V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
2779                       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
2780
2781        roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
2782                     0);
2783
2784        roce_set_field(qpc_mask->byte_176_msg_pktn,
2785                       V2_QPC_BYTE_176_MSG_USE_PKTN_M,
2786                       V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
2787        roce_set_field(qpc_mask->byte_176_msg_pktn,
2788                       V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
2789                       V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
2790
2791        roce_set_field(qpc_mask->byte_184_irrl_idx,
2792                       V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
2793                       V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
2794
2795        qpc_mask->cur_sge_offset = 0;
2796
2797        roce_set_field(qpc_mask->byte_192_ext_sge,
2798                       V2_QPC_BYTE_192_CUR_SGE_IDX_M,
2799                       V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
2800        roce_set_field(qpc_mask->byte_192_ext_sge,
2801                       V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
2802                       V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
2803
2804        roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
2805                       V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
2806
2807        roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
2808                       V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
2809        roce_set_field(qpc_mask->byte_200_sq_max,
2810                       V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
2811                       V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
2812
2813        roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
2814        roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
2815
2816        roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
2817                       V2_QPC_BYTE_212_CHECK_FLG_S, 0);
2818
2819        qpc_mask->sq_timer = 0;
2820
2821        roce_set_field(qpc_mask->byte_220_retry_psn_msn,
2822                       V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
2823                       V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
2824        roce_set_field(qpc_mask->byte_232_irrl_sge,
2825                       V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
2826                       V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
2827
2828        roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
2829                     0);
2830        roce_set_bit(qpc_mask->byte_232_irrl_sge,
2831                     V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
2832        roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
2833                     0);
2834
2835        qpc_mask->irrl_cur_sge_offset = 0;
2836
2837        roce_set_field(qpc_mask->byte_240_irrl_tail,
2838                       V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
2839                       V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
2840        roce_set_field(qpc_mask->byte_240_irrl_tail,
2841                       V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
2842                       V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
2843        roce_set_field(qpc_mask->byte_240_irrl_tail,
2844                       V2_QPC_BYTE_240_RX_ACK_MSN_M,
2845                       V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
2846
2847        roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
2848                       V2_QPC_BYTE_248_IRRL_PSN_S, 0);
2849        roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
2850                     0);
2851        roce_set_field(qpc_mask->byte_248_ack_psn,
2852                       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
2853                       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
2854        roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
2855                     0);
2856        roce_set_bit(qpc_mask->byte_248_ack_psn,
2857                     V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
2858        roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
2859                     0);
2860
2861        hr_qp->access_flags = attr->qp_access_flags;
2862        hr_qp->pkey_index = attr->pkey_index;
2863        roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2864                       V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
2865        roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2866                       V2_QPC_BYTE_252_TX_CQN_S, 0);
2867
2868        roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
2869                       V2_QPC_BYTE_252_ERR_TYPE_S, 0);
2870
2871        roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
2872                       V2_QPC_BYTE_256_RQ_CQE_IDX_M,
2873                       V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
2874        roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
2875                       V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
2876                       V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
2877}
2878
2879static void modify_qp_init_to_init(struct ib_qp *ibqp,
2880                                   const struct ib_qp_attr *attr, int attr_mask,
2881                                   struct hns_roce_v2_qp_context *context,
2882                                   struct hns_roce_v2_qp_context *qpc_mask)
2883{
2884        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2885
2886        /*
2887         * In v2 engine, software pass context and context mask to hardware
2888         * when modifying qp. If software need modify some fields in context,
2889         * we should set all bits of the relevant fields in context mask to
2890         * 0 at the same time, else set them to 0x1.
2891         */
2892        roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2893                       V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
2894        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
2895                       V2_QPC_BYTE_4_TST_S, 0);
2896
2897        if (ibqp->qp_type == IB_QPT_GSI)
2898                roce_set_field(context->byte_4_sqpn_tst,
2899                               V2_QPC_BYTE_4_SGE_SHIFT_M,
2900                               V2_QPC_BYTE_4_SGE_SHIFT_S,
2901                               ilog2((unsigned int)hr_qp->sge.sge_cnt));
2902        else
2903                roce_set_field(context->byte_4_sqpn_tst,
2904                               V2_QPC_BYTE_4_SGE_SHIFT_M,
2905                               V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
2906                               ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
2907
2908        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
2909                       V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
2910
2911        if (attr_mask & IB_QP_ACCESS_FLAGS) {
2912                roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2913                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2914                roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2915                             0);
2916
2917                roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2918                             !!(attr->qp_access_flags &
2919                             IB_ACCESS_REMOTE_WRITE));
2920                roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2921                             0);
2922
2923                roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2924                             !!(attr->qp_access_flags &
2925                             IB_ACCESS_REMOTE_ATOMIC));
2926                roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2927                             0);
2928        } else {
2929                roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2930                             !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
2931                roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
2932                             0);
2933
2934                roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2935                             !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
2936                roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
2937                             0);
2938
2939                roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2940                             !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
2941                roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
2942                             0);
2943        }
2944
2945        roce_set_field(context->byte_20_smac_sgid_idx,
2946                       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
2947                       ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2948        roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2949                       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
2950
2951        roce_set_field(context->byte_20_smac_sgid_idx,
2952                       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
2953                       ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2954        roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
2955                       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
2956
2957        roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2958                       V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
2959        roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
2960                       V2_QPC_BYTE_16_PD_S, 0);
2961
2962        roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2963                       V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
2964        roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
2965                       V2_QPC_BYTE_80_RX_CQN_S, 0);
2966
2967        roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2968                       V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
2969        roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
2970                       V2_QPC_BYTE_252_TX_CQN_S, 0);
2971
2972        if (ibqp->srq) {
2973                roce_set_bit(context->byte_76_srqn_op_en,
2974                             V2_QPC_BYTE_76_SRQ_EN_S, 1);
2975                roce_set_bit(qpc_mask->byte_76_srqn_op_en,
2976                             V2_QPC_BYTE_76_SRQ_EN_S, 0);
2977                roce_set_field(context->byte_76_srqn_op_en,
2978                               V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
2979                               to_hr_srq(ibqp->srq)->srqn);
2980                roce_set_field(qpc_mask->byte_76_srqn_op_en,
2981                               V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
2982        }
2983
2984        if (attr_mask & IB_QP_QKEY) {
2985                context->qkey_xrcd = attr->qkey;
2986                qpc_mask->qkey_xrcd = 0;
2987        }
2988
2989        roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2990                       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
2991        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
2992                       V2_QPC_BYTE_4_SQPN_S, 0);
2993
2994        if (attr_mask & IB_QP_DEST_QPN) {
2995                roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
2996                               V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
2997                roce_set_field(qpc_mask->byte_56_dqpn_err,
2998                               V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
2999        }
3000}
3001
3002static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3003                                 const struct ib_qp_attr *attr, int attr_mask,
3004                                 struct hns_roce_v2_qp_context *context,
3005                                 struct hns_roce_v2_qp_context *qpc_mask)
3006{
3007        const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
3008        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3009        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3010        struct device *dev = hr_dev->dev;
3011        dma_addr_t dma_handle_3;
3012        dma_addr_t dma_handle_2;
3013        dma_addr_t dma_handle;
3014        u32 page_size;
3015        u8 port_num;
3016        u64 *mtts_3;
3017        u64 *mtts_2;
3018        u64 *mtts;
3019        u8 *dmac;
3020        u8 *smac;
3021        int port;
3022
3023        /* Search qp buf's mtts */
3024        mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
3025                                   hr_qp->mtt.first_seg, &dma_handle);
3026        if (!mtts) {
3027                dev_err(dev, "qp buf pa find failed\n");
3028                return -EINVAL;
3029        }
3030
3031        /* Search IRRL's mtts */
3032        mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
3033                                     hr_qp->qpn, &dma_handle_2);
3034        if (!mtts_2) {
3035                dev_err(dev, "qp irrl_table find failed\n");
3036                return -EINVAL;
3037        }
3038
3039        /* Search TRRL's mtts */
3040        mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3041                                     hr_qp->qpn, &dma_handle_3);
3042        if (!mtts_3) {
3043                dev_err(dev, "qp trrl_table find failed\n");
3044                return -EINVAL;
3045        }
3046
3047        if (attr_mask & IB_QP_ALT_PATH) {
3048                dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
3049                return -EINVAL;
3050        }
3051
3052        dmac = (u8 *)attr->ah_attr.roce.dmac;
3053        context->wqe_sge_ba = (u32)(dma_handle >> 3);
3054        qpc_mask->wqe_sge_ba = 0;
3055
3056        /*
3057         * In v2 engine, software pass context and context mask to hardware
3058         * when modifying qp. If software need modify some fields in context,
3059         * we should set all bits of the relevant fields in context mask to
3060         * 0 at the same time, else set them to 0x1.
3061         */
3062        roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3063                       V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
3064        roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3065                       V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3066
3067        roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3068                       V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3069                       hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3070                       0 : hr_dev->caps.mtt_hop_num);
3071        roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3072                       V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3073
3074        roce_set_field(context->byte_20_smac_sgid_idx,
3075                       V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3076                       V2_QPC_BYTE_20_SGE_HOP_NUM_S,
3077                       ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3078                       hr_dev->caps.mtt_hop_num : 0);
3079        roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3080                       V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3081                       V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3082
3083        roce_set_field(context->byte_20_smac_sgid_idx,
3084                       V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3085                       V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3086                       hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
3087                       0 : hr_dev->caps.mtt_hop_num);
3088        roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3089                       V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3090                       V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3091
3092        roce_set_field(context->byte_16_buf_ba_pg_sz,
3093                       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3094                       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3095                       hr_dev->caps.mtt_ba_pg_sz + PG_SHIFT_OFFSET);
3096        roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3097                       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3098                       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3099
3100        roce_set_field(context->byte_16_buf_ba_pg_sz,
3101                       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3102                       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3103                       hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3104        roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3105                       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3106                       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3107
3108        roce_set_field(context->byte_80_rnr_rx_cqn,
3109                       V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3110                       V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
3111        roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
3112                       V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3113                       V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
3114
3115        page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3116        context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
3117                                    >> PAGE_ADDR_SHIFT);
3118        qpc_mask->rq_cur_blk_addr = 0;
3119
3120        roce_set_field(context->byte_92_srq_info,
3121                       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3122                       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3123                       mtts[hr_qp->rq.offset / page_size]
3124                       >> (32 + PAGE_ADDR_SHIFT));
3125        roce_set_field(qpc_mask->byte_92_srq_info,
3126                       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3127                       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3128
3129        context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
3130                                    >> PAGE_ADDR_SHIFT);
3131        qpc_mask->rq_nxt_blk_addr = 0;
3132
3133        roce_set_field(context->byte_104_rq_sge,
3134                       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3135                       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3136                       mtts[hr_qp->rq.offset / page_size + 1]
3137                       >> (32 + PAGE_ADDR_SHIFT));
3138        roce_set_field(qpc_mask->byte_104_rq_sge,
3139                       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3140                       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3141
3142        roce_set_field(context->byte_108_rx_reqepsn,
3143                       V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3144                       V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
3145        roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3146                       V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3147                       V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
3148
3149        roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3150                       V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3151        roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3152                       V2_QPC_BYTE_132_TRRL_BA_S, 0);
3153        context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
3154        qpc_mask->trrl_ba = 0;
3155        roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3156                       V2_QPC_BYTE_140_TRRL_BA_S,
3157                       (u32)(dma_handle_3 >> (32 + 16 + 4)));
3158        roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3159                       V2_QPC_BYTE_140_TRRL_BA_S, 0);
3160
3161        context->irrl_ba = (u32)(dma_handle_2 >> 6);
3162        qpc_mask->irrl_ba = 0;
3163        roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3164                       V2_QPC_BYTE_208_IRRL_BA_S,
3165                       dma_handle_2 >> (32 + 6));
3166        roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3167                       V2_QPC_BYTE_208_IRRL_BA_S, 0);
3168
3169        roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3170        roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3171
3172        roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3173                     hr_qp->sq_signal_bits);
3174        roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3175                     0);
3176
3177        port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3178
3179        smac = (u8 *)hr_dev->dev_addr[port];
3180        /* when dmac equals smac or loop_idc is 1, it should loopback */
3181        if (ether_addr_equal_unaligned(dmac, smac) ||
3182            hr_dev->loop_idc == 0x1) {
3183                roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3184                roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3185        }
3186
3187        if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
3188             attr->max_dest_rd_atomic) {
3189                roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
3190                               V2_QPC_BYTE_140_RR_MAX_S,
3191                               fls(attr->max_dest_rd_atomic - 1));
3192                roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
3193                               V2_QPC_BYTE_140_RR_MAX_S, 0);
3194        }
3195
3196        if (attr_mask & IB_QP_DEST_QPN) {
3197                roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3198                               V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3199                roce_set_field(qpc_mask->byte_56_dqpn_err,
3200                               V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3201        }
3202
3203        /* Configure GID index */
3204        port_num = rdma_ah_get_port_num(&attr->ah_attr);
3205        roce_set_field(context->byte_20_smac_sgid_idx,
3206                       V2_QPC_BYTE_20_SGID_IDX_M,
3207                       V2_QPC_BYTE_20_SGID_IDX_S,
3208                       hns_get_gid_index(hr_dev, port_num - 1,
3209                                         grh->sgid_index));
3210        roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3211                       V2_QPC_BYTE_20_SGID_IDX_M,
3212                       V2_QPC_BYTE_20_SGID_IDX_S, 0);
3213        memcpy(&(context->dmac), dmac, 4);
3214        roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3215                       V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3216        qpc_mask->dmac = 0;
3217        roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3218                       V2_QPC_BYTE_52_DMAC_S, 0);
3219
3220        roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3221                       V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3222        roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3223                       V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3224
3225        if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3226                roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3227                               V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3228        else if (attr_mask & IB_QP_PATH_MTU)
3229                roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3230                               V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3231
3232        roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3233                       V2_QPC_BYTE_24_MTU_S, 0);
3234
3235        roce_set_field(context->byte_84_rq_ci_pi,
3236                       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3237                       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3238        roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3239                       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3240                       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3241
3242        roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3243                       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3244                       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3245        roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3246                     V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3247        roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3248                       V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3249        roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3250                       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3251                       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3252
3253        context->rq_rnr_timer = 0;
3254        qpc_mask->rq_rnr_timer = 0;
3255
3256        roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3257                       V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
3258        roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3259                       V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3260
3261        roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3262                       V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3263        roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3264                       V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3265
3266        roce_set_field(context->byte_168_irrl_idx,
3267                       V2_QPC_BYTE_168_LP_SGEN_INI_M,
3268                       V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3269        roce_set_field(qpc_mask->byte_168_irrl_idx,
3270                       V2_QPC_BYTE_168_LP_SGEN_INI_M,
3271                       V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3272
3273        return 0;
3274}
3275
3276static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3277                                const struct ib_qp_attr *attr, int attr_mask,
3278                                struct hns_roce_v2_qp_context *context,
3279                                struct hns_roce_v2_qp_context *qpc_mask)
3280{
3281        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3282        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3283        struct device *dev = hr_dev->dev;
3284        dma_addr_t dma_handle;
3285        u32 page_size;
3286        u64 *mtts;
3287
3288        /* Search qp buf's mtts */
3289        mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
3290                                   hr_qp->mtt.first_seg, &dma_handle);
3291        if (!mtts) {
3292                dev_err(dev, "qp buf pa find failed\n");
3293                return -EINVAL;
3294        }
3295
3296        /* Not support alternate path and path migration */
3297        if ((attr_mask & IB_QP_ALT_PATH) ||
3298            (attr_mask & IB_QP_PATH_MIG_STATE)) {
3299                dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3300                return -EINVAL;
3301        }
3302
3303        /*
3304         * In v2 engine, software pass context and context mask to hardware
3305         * when modifying qp. If software need modify some fields in context,
3306         * we should set all bits of the relevant fields in context mask to
3307         * 0 at the same time, else set them to 0x1.
3308         */
3309        context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3310        roce_set_field(context->byte_168_irrl_idx,
3311                       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3312                       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
3313                       mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3314        qpc_mask->sq_cur_blk_addr = 0;
3315        roce_set_field(qpc_mask->byte_168_irrl_idx,
3316                       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3317                       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3318
3319        page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3320        context->sq_cur_sge_blk_addr =
3321                       ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3322                                      ((u32)(mtts[hr_qp->sge.offset / page_size]
3323                                      >> PAGE_ADDR_SHIFT)) : 0;
3324        roce_set_field(context->byte_184_irrl_idx,
3325                       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3326                       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
3327                       ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
3328                       (mtts[hr_qp->sge.offset / page_size] >>
3329                       (32 + PAGE_ADDR_SHIFT)) : 0);
3330        qpc_mask->sq_cur_sge_blk_addr = 0;
3331        roce_set_field(qpc_mask->byte_184_irrl_idx,
3332                       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3333                       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3334
3335        context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
3336        roce_set_field(context->byte_232_irrl_sge,
3337                       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3338                       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3339                       mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3340        qpc_mask->rx_sq_cur_blk_addr = 0;
3341        roce_set_field(qpc_mask->byte_232_irrl_sge,
3342                       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3343                       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3344
3345        /*
3346         * Set some fields in context to zero, Because the default values
3347         * of all fields in context are zero, we need not set them to 0 again.
3348         * but we should set the relevant fields of context mask to 0.
3349         */
3350        roce_set_field(qpc_mask->byte_232_irrl_sge,
3351                       V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3352                       V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3353
3354        roce_set_field(qpc_mask->byte_240_irrl_tail,
3355                       V2_QPC_BYTE_240_RX_ACK_MSN_M,
3356                       V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3357
3358        roce_set_field(context->byte_244_rnr_rxack,
3359                       V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3360                       V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
3361        roce_set_field(qpc_mask->byte_244_rnr_rxack,
3362                       V2_QPC_BYTE_244_RX_ACK_EPSN_M,
3363                       V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
3364
3365        roce_set_field(qpc_mask->byte_248_ack_psn,
3366                       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3367                       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3368        roce_set_bit(qpc_mask->byte_248_ack_psn,
3369                     V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
3370        roce_set_field(qpc_mask->byte_248_ack_psn,
3371                       V2_QPC_BYTE_248_IRRL_PSN_M,
3372                       V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3373
3374        roce_set_field(qpc_mask->byte_240_irrl_tail,
3375                       V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3376                       V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3377
3378        roce_set_field(context->byte_220_retry_psn_msn,
3379                       V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3380                       V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
3381        roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3382                       V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
3383                       V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
3384
3385        roce_set_field(context->byte_224_retry_msg,
3386                       V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3387                       V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
3388        roce_set_field(qpc_mask->byte_224_retry_msg,
3389                       V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
3390                       V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
3391
3392        roce_set_field(context->byte_224_retry_msg,
3393                       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3394                       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
3395        roce_set_field(qpc_mask->byte_224_retry_msg,
3396                       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
3397                       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
3398
3399        roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3400                       V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3401                       V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3402
3403        roce_set_bit(qpc_mask->byte_248_ack_psn,
3404                     V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3405
3406        roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3407                       V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3408
3409        roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3410                       V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
3411        roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
3412                       V2_QPC_BYTE_212_RETRY_CNT_S, 0);
3413
3414        roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3415                       V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
3416        roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
3417                       V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
3418
3419        roce_set_field(context->byte_244_rnr_rxack,
3420                       V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3421                       V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
3422        roce_set_field(qpc_mask->byte_244_rnr_rxack,
3423                       V2_QPC_BYTE_244_RNR_NUM_INIT_M,
3424                       V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
3425
3426        roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3427                       V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
3428        roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
3429                       V2_QPC_BYTE_244_RNR_CNT_S, 0);
3430
3431        roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3432                       V2_QPC_BYTE_212_LSN_S, 0x100);
3433        roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
3434                       V2_QPC_BYTE_212_LSN_S, 0);
3435
3436        if (attr_mask & IB_QP_TIMEOUT) {
3437                roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3438                               V2_QPC_BYTE_28_AT_S, attr->timeout);
3439                roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
3440                              V2_QPC_BYTE_28_AT_S, 0);
3441        }
3442
3443        roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3444                       V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
3445        roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3446                       V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
3447
3448        roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3449                       V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3450        roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3451                       V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
3452        roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
3453                       V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
3454
3455        if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
3456                roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
3457                               V2_QPC_BYTE_208_SR_MAX_S,
3458                               fls(attr->max_rd_atomic - 1));
3459                roce_set_field(qpc_mask->byte_208_irrl,
3460                               V2_QPC_BYTE_208_SR_MAX_M,
3461                               V2_QPC_BYTE_208_SR_MAX_S, 0);
3462        }
3463        return 0;
3464}
3465
3466static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
3467                                 const struct ib_qp_attr *attr,
3468                                 int attr_mask, enum ib_qp_state cur_state,
3469                                 enum ib_qp_state new_state)
3470{
3471        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3472        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3473        struct hns_roce_v2_qp_context *context;
3474        struct hns_roce_v2_qp_context *qpc_mask;
3475        struct device *dev = hr_dev->dev;
3476        int ret = -EINVAL;
3477
3478        context = kcalloc(2, sizeof(*context), GFP_KERNEL);
3479        if (!context)
3480                return -ENOMEM;
3481
3482        qpc_mask = context + 1;
3483        /*
3484         * In v2 engine, software pass context and context mask to hardware
3485         * when modifying qp. If software need modify some fields in context,
3486         * we should set all bits of the relevant fields in context mask to
3487         * 0 at the same time, else set them to 0x1.
3488         */
3489        memset(qpc_mask, 0xff, sizeof(*qpc_mask));
3490        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
3491                memset(qpc_mask, 0, sizeof(*qpc_mask));
3492                modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
3493                                        qpc_mask);
3494        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3495                modify_qp_init_to_init(ibqp, attr, attr_mask, context,
3496                                       qpc_mask);
3497        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3498                ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
3499                                            qpc_mask);
3500                if (ret)
3501                        goto out;
3502        } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
3503                ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
3504                                           qpc_mask);
3505                if (ret)
3506                        goto out;
3507        } else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
3508                   (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
3509                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
3510                   (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
3511                   (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
3512                   (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3513                   (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3514                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3515                   (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3516                   (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3517                   (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3518                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3519                   (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
3520                   (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
3521                   (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
3522                /* Nothing */
3523                ;
3524        } else {
3525                dev_err(dev, "Illegal state for QP!\n");
3526                ret = -EINVAL;
3527                goto out;
3528        }
3529
3530        /* When QP state is err, SQ and RQ WQE should be flushed */
3531        if (new_state == IB_QPS_ERR) {
3532                roce_set_field(context->byte_160_sq_ci_pi,
3533                               V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3534                               V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
3535                               hr_qp->sq.head);
3536                roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3537                               V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3538                               V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3539                roce_set_field(context->byte_84_rq_ci_pi,
3540                               V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3541                               V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
3542                               hr_qp->rq.head);
3543                roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3544                               V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3545                               V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3546        }
3547
3548        if (attr_mask & IB_QP_AV) {
3549                const struct ib_global_route *grh =
3550                                            rdma_ah_read_grh(&attr->ah_attr);
3551                const struct ib_gid_attr *gid_attr = NULL;
3552                int is_roce_protocol;
3553                u16 vlan = 0xffff;
3554                u8 ib_port;
3555                u8 hr_port;
3556
3557                ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
3558                           hr_qp->port + 1;
3559                hr_port = ib_port - 1;
3560                is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
3561                               rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
3562
3563                if (is_roce_protocol) {
3564                        gid_attr = attr->ah_attr.grh.sgid_attr;
3565                        ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
3566                        if (ret)
3567                                goto out;
3568                }
3569
3570                if (vlan < VLAN_CFI_MASK) {
3571                        roce_set_bit(context->byte_76_srqn_op_en,
3572                                     V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
3573                        roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3574                                     V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
3575                        roce_set_bit(context->byte_168_irrl_idx,
3576                                     V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
3577                        roce_set_bit(qpc_mask->byte_168_irrl_idx,
3578                                     V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
3579                }
3580
3581                roce_set_field(context->byte_24_mtu_tc,
3582                               V2_QPC_BYTE_24_VLAN_ID_M,
3583                               V2_QPC_BYTE_24_VLAN_ID_S, vlan);
3584                roce_set_field(qpc_mask->byte_24_mtu_tc,
3585                               V2_QPC_BYTE_24_VLAN_ID_M,
3586                               V2_QPC_BYTE_24_VLAN_ID_S, 0);
3587
3588                if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
3589                        dev_err(hr_dev->dev,
3590                                "sgid_index(%u) too large. max is %d\n",
3591                                grh->sgid_index,
3592                                hr_dev->caps.gid_table_len[hr_port]);
3593                        ret = -EINVAL;
3594                        goto out;
3595                }
3596
3597                if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
3598                        dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
3599                        ret = -EINVAL;
3600                        goto out;
3601                }
3602
3603                roce_set_field(context->byte_52_udpspn_dmac,
3604                           V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
3605                           (gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
3606                           0 : 0x12b7);
3607
3608                roce_set_field(qpc_mask->byte_52_udpspn_dmac,
3609                               V2_QPC_BYTE_52_UDPSPN_M,
3610                               V2_QPC_BYTE_52_UDPSPN_S, 0);
3611
3612                roce_set_field(context->byte_20_smac_sgid_idx,
3613                               V2_QPC_BYTE_20_SGID_IDX_M,
3614                               V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
3615
3616                roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3617                               V2_QPC_BYTE_20_SGID_IDX_M,
3618                               V2_QPC_BYTE_20_SGID_IDX_S, 0);
3619
3620                roce_set_field(context->byte_24_mtu_tc,
3621                               V2_QPC_BYTE_24_HOP_LIMIT_M,
3622                               V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
3623                roce_set_field(qpc_mask->byte_24_mtu_tc,
3624                               V2_QPC_BYTE_24_HOP_LIMIT_M,
3625                               V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
3626
3627                roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
3628                               V2_QPC_BYTE_24_TC_S, grh->traffic_class);
3629                roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
3630                               V2_QPC_BYTE_24_TC_S, 0);
3631                roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
3632                               V2_QPC_BYTE_28_FL_S, grh->flow_label);
3633                roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
3634                               V2_QPC_BYTE_28_FL_S, 0);
3635                memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
3636                memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
3637                roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3638                               V2_QPC_BYTE_28_SL_S,
3639                               rdma_ah_get_sl(&attr->ah_attr));
3640                roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
3641                               V2_QPC_BYTE_28_SL_S, 0);
3642                hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3643        }
3644
3645        if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
3646                set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
3647
3648        /* Every status migrate must change state */
3649        roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
3650                       V2_QPC_BYTE_60_QP_ST_S, new_state);
3651        roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
3652                       V2_QPC_BYTE_60_QP_ST_S, 0);
3653
3654        /* SW pass context to HW */
3655        ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
3656                                    context, hr_qp);
3657        if (ret) {
3658                dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
3659                goto out;
3660        }
3661
3662        hr_qp->state = new_state;
3663
3664        if (attr_mask & IB_QP_ACCESS_FLAGS)
3665                hr_qp->atomic_rd_en = attr->qp_access_flags;
3666
3667        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3668                hr_qp->resp_depth = attr->max_dest_rd_atomic;
3669        if (attr_mask & IB_QP_PORT) {
3670                hr_qp->port = attr->port_num - 1;
3671                hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3672        }
3673
3674        if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3675                hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3676                                     ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3677                if (ibqp->send_cq != ibqp->recv_cq)
3678                        hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
3679                                             hr_qp->qpn, NULL);
3680
3681                hr_qp->rq.head = 0;
3682                hr_qp->rq.tail = 0;
3683                hr_qp->sq.head = 0;
3684                hr_qp->sq.tail = 0;
3685                hr_qp->sq_next_wqe = 0;
3686                hr_qp->next_sge = 0;
3687                if (hr_qp->rq.wqe_cnt)
3688                        *hr_qp->rdb.db_record = 0;
3689        }
3690
3691out:
3692        kfree(context);
3693        return ret;
3694}
3695
3696static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
3697{
3698        switch (state) {
3699        case HNS_ROCE_QP_ST_RST:        return IB_QPS_RESET;
3700        case HNS_ROCE_QP_ST_INIT:       return IB_QPS_INIT;
3701        case HNS_ROCE_QP_ST_RTR:        return IB_QPS_RTR;
3702        case HNS_ROCE_QP_ST_RTS:        return IB_QPS_RTS;
3703        case HNS_ROCE_QP_ST_SQ_DRAINING:
3704        case HNS_ROCE_QP_ST_SQD:        return IB_QPS_SQD;
3705        case HNS_ROCE_QP_ST_SQER:       return IB_QPS_SQE;
3706        case HNS_ROCE_QP_ST_ERR:        return IB_QPS_ERR;
3707        default:                        return -1;
3708        }
3709}
3710
3711static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
3712                                 struct hns_roce_qp *hr_qp,
3713                                 struct hns_roce_v2_qp_context *hr_context)
3714{
3715        struct hns_roce_cmd_mailbox *mailbox;
3716        int ret;
3717
3718        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3719        if (IS_ERR(mailbox))
3720                return PTR_ERR(mailbox);
3721
3722        ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3723                                HNS_ROCE_CMD_QUERY_QPC,
3724                                HNS_ROCE_CMD_TIMEOUT_MSECS);
3725        if (ret) {
3726                dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
3727                goto out;
3728        }
3729
3730        memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3731
3732out:
3733        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3734        return ret;
3735}
3736
3737static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3738                                int qp_attr_mask,
3739                                struct ib_qp_init_attr *qp_init_attr)
3740{
3741        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3742        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3743        struct hns_roce_v2_qp_context *context;
3744        struct device *dev = hr_dev->dev;
3745        int tmp_qp_state;
3746        int state;
3747        int ret;
3748
3749        context = kzalloc(sizeof(*context), GFP_KERNEL);
3750        if (!context)
3751                return -ENOMEM;
3752
3753        memset(qp_attr, 0, sizeof(*qp_attr));
3754        memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3755
3756        mutex_lock(&hr_qp->mutex);
3757
3758        if (hr_qp->state == IB_QPS_RESET) {
3759                qp_attr->qp_state = IB_QPS_RESET;
3760                ret = 0;
3761                goto done;
3762        }
3763
3764        ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
3765        if (ret) {
3766                dev_err(dev, "query qpc error\n");
3767                ret = -EINVAL;
3768                goto out;
3769        }
3770
3771        state = roce_get_field(context->byte_60_qpst_tempid,
3772                               V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
3773        tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
3774        if (tmp_qp_state == -1) {
3775                dev_err(dev, "Illegal ib_qp_state\n");
3776                ret = -EINVAL;
3777                goto out;
3778        }
3779        hr_qp->state = (u8)tmp_qp_state;
3780        qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3781        qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
3782                                                        V2_QPC_BYTE_24_MTU_M,
3783                                                        V2_QPC_BYTE_24_MTU_S);
3784        qp_attr->path_mig_state = IB_MIG_ARMED;
3785        qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
3786        if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3787                qp_attr->qkey = V2_QKEY_VAL;
3788
3789        qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
3790                                         V2_QPC_BYTE_108_RX_REQ_EPSN_M,
3791                                         V2_QPC_BYTE_108_RX_REQ_EPSN_S);
3792        qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
3793                                              V2_QPC_BYTE_172_SQ_CUR_PSN_M,
3794                                              V2_QPC_BYTE_172_SQ_CUR_PSN_S);
3795        qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
3796                                                  V2_QPC_BYTE_56_DQPN_M,
3797                                                  V2_QPC_BYTE_56_DQPN_S);
3798        qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
3799                                                  V2_QPC_BYTE_76_RRE_S)) << 2) |
3800                                   ((roce_get_bit(context->byte_76_srqn_op_en,
3801                                                  V2_QPC_BYTE_76_RWE_S)) << 1) |
3802                                   ((roce_get_bit(context->byte_76_srqn_op_en,
3803                                                  V2_QPC_BYTE_76_ATE_S)) << 3);
3804        if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3805            hr_qp->ibqp.qp_type == IB_QPT_UC) {
3806                struct ib_global_route *grh =
3807                                rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3808
3809                rdma_ah_set_sl(&qp_attr->ah_attr,
3810                               roce_get_field(context->byte_28_at_fl,
3811                                              V2_QPC_BYTE_28_SL_M,
3812                                              V2_QPC_BYTE_28_SL_S));
3813                grh->flow_label = roce_get_field(context->byte_28_at_fl,
3814                                                 V2_QPC_BYTE_28_FL_M,
3815                                                 V2_QPC_BYTE_28_FL_S);
3816                grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
3817                                                 V2_QPC_BYTE_20_SGID_IDX_M,
3818                                                 V2_QPC_BYTE_20_SGID_IDX_S);
3819                grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
3820                                                V2_QPC_BYTE_24_HOP_LIMIT_M,
3821                                                V2_QPC_BYTE_24_HOP_LIMIT_S);
3822                grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
3823                                                    V2_QPC_BYTE_24_TC_M,
3824                                                    V2_QPC_BYTE_24_TC_S);
3825
3826                memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
3827        }
3828
3829        qp_attr->port_num = hr_qp->port + 1;
3830        qp_attr->sq_draining = 0;
3831        qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
3832                                                     V2_QPC_BYTE_208_SR_MAX_M,
3833                                                     V2_QPC_BYTE_208_SR_MAX_S);
3834        qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
3835                                                     V2_QPC_BYTE_140_RR_MAX_M,
3836                                                     V2_QPC_BYTE_140_RR_MAX_S);
3837        qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
3838                                                 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
3839                                                 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
3840        qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
3841                                              V2_QPC_BYTE_28_AT_M,
3842                                              V2_QPC_BYTE_28_AT_S);
3843        qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
3844                                            V2_QPC_BYTE_212_RETRY_CNT_M,
3845                                            V2_QPC_BYTE_212_RETRY_CNT_S);
3846        qp_attr->rnr_retry = context->rq_rnr_timer;
3847
3848done:
3849        qp_attr->cur_qp_state = qp_attr->qp_state;
3850        qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3851        qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3852
3853        if (!ibqp->uobject) {
3854                qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3855                qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3856        } else {
3857                qp_attr->cap.max_send_wr = 0;
3858                qp_attr->cap.max_send_sge = 0;
3859        }
3860
3861        qp_init_attr->cap = qp_attr->cap;
3862
3863out:
3864        mutex_unlock(&hr_qp->mutex);
3865        kfree(context);
3866        return ret;
3867}
3868
3869static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
3870                                         struct hns_roce_qp *hr_qp,
3871                                         struct ib_udata *udata)
3872{
3873        struct hns_roce_cq *send_cq, *recv_cq;
3874        struct ib_device *ibdev = &hr_dev->ib_dev;
3875        int ret;
3876
3877        if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
3878                /* Modify qp to reset before destroying qp */
3879                ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
3880                                            hr_qp->state, IB_QPS_RESET);
3881                if (ret) {
3882                        ibdev_err(ibdev, "modify QP to Reset failed.\n");
3883                        return ret;
3884                }
3885        }
3886
3887        send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3888        recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3889
3890        hns_roce_lock_cqs(send_cq, recv_cq);
3891
3892        if (!udata) {
3893                __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3894                                       to_hr_srq(hr_qp->ibqp.srq) : NULL);
3895                if (send_cq != recv_cq)
3896                        __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
3897        }
3898
3899        hns_roce_qp_remove(hr_dev, hr_qp);
3900
3901        hns_roce_unlock_cqs(send_cq, recv_cq);
3902
3903        hns_roce_qp_free(hr_dev, hr_qp);
3904
3905        /* Not special_QP, free their QPN */
3906        if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
3907            (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
3908            (hr_qp->ibqp.qp_type == IB_QPT_UD))
3909                hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3910
3911        hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3912
3913        if (udata) {
3914                struct hns_roce_ucontext *context =
3915                        rdma_udata_to_drv_context(
3916                                udata,
3917                                struct hns_roce_ucontext,
3918                                ibucontext);
3919
3920                if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
3921                        hns_roce_db_unmap_user(context, &hr_qp->sdb);
3922
3923                if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
3924                        hns_roce_db_unmap_user(context, &hr_qp->rdb);
3925        } else {
3926                kfree(hr_qp->sq.wrid);
3927                kfree(hr_qp->rq.wrid);
3928                hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3929                if (hr_qp->rq.wqe_cnt)
3930                        hns_roce_free_db(hr_dev, &hr_qp->rdb);
3931        }
3932        ib_umem_release(hr_qp->umem);
3933
3934        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
3935                kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
3936                kfree(hr_qp->rq_inl_buf.wqe_list);
3937        }
3938
3939        return 0;
3940}
3941
3942static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3943{
3944        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3945        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3946        int ret;
3947
3948        ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
3949        if (ret) {
3950                ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
3951                          hr_qp->qpn, ret);
3952                return ret;
3953        }
3954
3955        if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3956                kfree(hr_to_hr_sqp(hr_qp));
3957        else
3958                kfree(hr_qp);
3959
3960        return 0;
3961}
3962
3963static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
3964{
3965        struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
3966        struct hns_roce_v2_cq_context *cq_context;
3967        struct hns_roce_cq *hr_cq = to_hr_cq(cq);
3968        struct hns_roce_v2_cq_context *cqc_mask;
3969        struct hns_roce_cmd_mailbox *mailbox;
3970        int ret;
3971
3972        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3973        if (IS_ERR(mailbox))
3974                return PTR_ERR(mailbox);
3975
3976        cq_context = mailbox->buf;
3977        cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
3978
3979        memset(cqc_mask, 0xff, sizeof(*cqc_mask));
3980
3981        roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3982                       V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3983                       cq_count);
3984        roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
3985                       V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
3986                       0);
3987        roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
3988                       V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
3989                       cq_period);
3990        roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
3991                       V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
3992                       0);
3993
3994        ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
3995                                HNS_ROCE_CMD_MODIFY_CQC,
3996                                HNS_ROCE_CMD_TIMEOUT_MSECS);
3997        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3998        if (ret)
3999                dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
4000
4001        return ret;
4002}
4003
4004static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
4005{
4006        struct hns_roce_qp *hr_qp;
4007        struct ib_qp_attr attr;
4008        int attr_mask;
4009        int ret;
4010
4011        hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
4012        if (!hr_qp) {
4013                dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
4014                return;
4015        }
4016
4017        if (hr_qp->ibqp.uobject) {
4018                if (hr_qp->sdb_en == 1) {
4019                        hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
4020                        hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
4021                } else {
4022                        dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
4023                        return;
4024                }
4025        }
4026
4027        attr_mask = IB_QP_STATE;
4028        attr.qp_state = IB_QPS_ERR;
4029        ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
4030                                    hr_qp->state, IB_QPS_ERR);
4031        if (ret)
4032                dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
4033                        qpn);
4034}
4035
4036static void hns_roce_irq_work_handle(struct work_struct *work)
4037{
4038        struct hns_roce_work *irq_work =
4039                                container_of(work, struct hns_roce_work, work);
4040        struct device *dev = irq_work->hr_dev->dev;
4041        u32 qpn = irq_work->qpn;
4042        u32 cqn = irq_work->cqn;
4043
4044        switch (irq_work->event_type) {
4045        case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4046                dev_info(dev, "Path migrated succeeded.\n");
4047                break;
4048        case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4049                dev_warn(dev, "Path migration failed.\n");
4050                break;
4051        case HNS_ROCE_EVENT_TYPE_COMM_EST:
4052                dev_info(dev, "Communication established.\n");
4053                break;
4054        case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4055                dev_warn(dev, "Send queue drained.\n");
4056                break;
4057        case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4058                dev_err(dev, "Local work queue catastrophic error.\n");
4059                hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4060                switch (irq_work->sub_type) {
4061                case HNS_ROCE_LWQCE_QPC_ERROR:
4062                        dev_err(dev, "QP %d, QPC error.\n", qpn);
4063                        break;
4064                case HNS_ROCE_LWQCE_MTU_ERROR:
4065                        dev_err(dev, "QP %d, MTU error.\n", qpn);
4066                        break;
4067                case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
4068                        dev_err(dev, "QP %d, WQE BA addr error.\n", qpn);
4069                        break;
4070                case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
4071                        dev_err(dev, "QP %d, WQE addr error.\n", qpn);
4072                        break;
4073                case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
4074                        dev_err(dev, "QP %d, WQE shift error.\n", qpn);
4075                        break;
4076                default:
4077                        dev_err(dev, "Unhandled sub_event type %d.\n",
4078                                irq_work->sub_type);
4079                        break;
4080                }
4081                break;
4082        case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4083                dev_err(dev, "Invalid request local work queue error.\n");
4084                hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4085                break;
4086        case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4087                dev_err(dev, "Local access violation work queue error.\n");
4088                hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4089                switch (irq_work->sub_type) {
4090                case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
4091                        dev_err(dev, "QP %d, R_key violation.\n", qpn);
4092                        break;
4093                case HNS_ROCE_LAVWQE_LENGTH_ERROR:
4094                        dev_err(dev, "QP %d, length error.\n", qpn);
4095                        break;
4096                case HNS_ROCE_LAVWQE_VA_ERROR:
4097                        dev_err(dev, "QP %d, VA error.\n", qpn);
4098                        break;
4099                case HNS_ROCE_LAVWQE_PD_ERROR:
4100                        dev_err(dev, "QP %d, PD error.\n", qpn);
4101                        break;
4102                case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
4103                        dev_err(dev, "QP %d, rw acc error.\n", qpn);
4104                        break;
4105                case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
4106                        dev_err(dev, "QP %d, key state error.\n", qpn);
4107                        break;
4108                case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
4109                        dev_err(dev, "QP %d, MR operation error.\n", qpn);
4110                        break;
4111                default:
4112                        dev_err(dev, "Unhandled sub_event type %d.\n",
4113                                irq_work->sub_type);
4114                        break;
4115                }
4116                break;
4117        case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4118                dev_warn(dev, "SRQ limit reach.\n");
4119                break;
4120        case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4121                dev_warn(dev, "SRQ last wqe reach.\n");
4122                break;
4123        case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4124                dev_err(dev, "SRQ catas error.\n");
4125                break;
4126        case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4127                dev_err(dev, "CQ 0x%x access err.\n", cqn);
4128                break;
4129        case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4130                dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4131                break;
4132        case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4133                dev_warn(dev, "DB overflow.\n");
4134                break;
4135        case HNS_ROCE_EVENT_TYPE_FLR:
4136                dev_warn(dev, "Function level reset.\n");
4137                break;
4138        default:
4139                break;
4140        }
4141
4142        kfree(irq_work);
4143}
4144
4145static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
4146                                      struct hns_roce_eq *eq,
4147                                      u32 qpn, u32 cqn)
4148{
4149        struct hns_roce_work *irq_work;
4150
4151        irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4152        if (!irq_work)
4153                return;
4154
4155        INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4156        irq_work->hr_dev = hr_dev;
4157        irq_work->qpn = qpn;
4158        irq_work->cqn = cqn;
4159        irq_work->event_type = eq->event_type;
4160        irq_work->sub_type = eq->sub_type;
4161        queue_work(hr_dev->irq_workq, &(irq_work->work));
4162}
4163
4164static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4165{
4166        u32 doorbell[2];
4167
4168        doorbell[0] = 0;
4169        doorbell[1] = 0;
4170
4171        if (eq->type_flag == HNS_ROCE_AEQ) {
4172                roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4173                               HNS_ROCE_V2_EQ_DB_CMD_S,
4174                               eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4175                               HNS_ROCE_EQ_DB_CMD_AEQ :
4176                               HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4177        } else {
4178                roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4179                               HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4180
4181                roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4182                               HNS_ROCE_V2_EQ_DB_CMD_S,
4183                               eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4184                               HNS_ROCE_EQ_DB_CMD_CEQ :
4185                               HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4186        }
4187
4188        roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4189                       HNS_ROCE_V2_EQ_DB_PARA_S,
4190                       (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4191
4192        hns_roce_write64_k(doorbell, eq->doorbell);
4193}
4194
4195static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
4196{
4197        u32 buf_chk_sz;
4198        unsigned long off;
4199
4200        buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4201        off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4202
4203        return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4204                off % buf_chk_sz);
4205}
4206
4207static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4208{
4209        u32 buf_chk_sz;
4210        unsigned long off;
4211
4212        buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4213
4214        off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4215
4216        if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4217                return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
4218                        off % buf_chk_sz);
4219        else
4220                return (struct hns_roce_aeqe *)((u8 *)
4221                        (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
4222}
4223
4224static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
4225{
4226        struct hns_roce_aeqe *aeqe;
4227
4228        if (!eq->hop_num)
4229                aeqe = get_aeqe_v2(eq, eq->cons_index);
4230        else
4231                aeqe = mhop_get_aeqe(eq, eq->cons_index);
4232
4233        return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
4234                !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
4235}
4236
4237static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
4238                               struct hns_roce_eq *eq)
4239{
4240        struct device *dev = hr_dev->dev;
4241        struct hns_roce_aeqe *aeqe;
4242        int aeqe_found = 0;
4243        int event_type;
4244        int sub_type;
4245        u32 qpn;
4246        u32 cqn;
4247
4248        while ((aeqe = next_aeqe_sw_v2(eq))) {
4249
4250                /* Make sure we read AEQ entry after we have checked the
4251                 * ownership bit
4252                 */
4253                dma_rmb();
4254
4255                event_type = roce_get_field(aeqe->asyn,
4256                                            HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
4257                                            HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
4258                sub_type = roce_get_field(aeqe->asyn,
4259                                          HNS_ROCE_V2_AEQE_SUB_TYPE_M,
4260                                          HNS_ROCE_V2_AEQE_SUB_TYPE_S);
4261                qpn = roce_get_field(aeqe->event.qp_event.qp,
4262                                     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4263                                     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4264                cqn = roce_get_field(aeqe->event.cq_event.cq,
4265                                     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
4266                                     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
4267
4268                switch (event_type) {
4269                case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4270                case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4271                case HNS_ROCE_EVENT_TYPE_COMM_EST:
4272                case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4273                case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4274                case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4275                case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4276                        hns_roce_qp_event(hr_dev, qpn, event_type);
4277                        break;
4278                case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4279                case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4280                case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4281                        break;
4282                case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4283                case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4284                        hns_roce_cq_event(hr_dev, cqn, event_type);
4285                        break;
4286                case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4287                        break;
4288                case HNS_ROCE_EVENT_TYPE_MB:
4289                        hns_roce_cmd_event(hr_dev,
4290                                        le16_to_cpu(aeqe->event.cmd.token),
4291                                        aeqe->event.cmd.status,
4292                                        le64_to_cpu(aeqe->event.cmd.out_param));
4293                        break;
4294                case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
4295                        break;
4296                case HNS_ROCE_EVENT_TYPE_FLR:
4297                        break;
4298                default:
4299                        dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
4300                                event_type, eq->eqn, eq->cons_index);
4301                        break;
4302                };
4303
4304                eq->event_type = event_type;
4305                eq->sub_type = sub_type;
4306                ++eq->cons_index;
4307                aeqe_found = 1;
4308
4309                if (eq->cons_index > (2 * eq->entries - 1)) {
4310                        dev_warn(dev, "cons_index overflow, set back to 0.\n");
4311                        eq->cons_index = 0;
4312                }
4313                hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
4314        }
4315
4316        set_eq_cons_index_v2(eq);
4317        return aeqe_found;
4318}
4319
4320static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
4321{
4322        u32 buf_chk_sz;
4323        unsigned long off;
4324
4325        buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4326        off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4327
4328        return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
4329                off % buf_chk_sz);
4330}
4331
4332static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
4333{
4334        u32 buf_chk_sz;
4335        unsigned long off;
4336
4337        buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4338
4339        off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
4340
4341        if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4342                return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
4343                        off % buf_chk_sz);
4344        else
4345                return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
4346                        buf_chk_sz]) + off % buf_chk_sz);
4347}
4348
4349static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
4350{
4351        struct hns_roce_ceqe *ceqe;
4352
4353        if (!eq->hop_num)
4354                ceqe = get_ceqe_v2(eq, eq->cons_index);
4355        else
4356                ceqe = mhop_get_ceqe(eq, eq->cons_index);
4357
4358        return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
4359                (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
4360}
4361
4362static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
4363                               struct hns_roce_eq *eq)
4364{
4365        struct device *dev = hr_dev->dev;
4366        struct hns_roce_ceqe *ceqe;
4367        int ceqe_found = 0;
4368        u32 cqn;
4369
4370        while ((ceqe = next_ceqe_sw_v2(eq))) {
4371
4372                /* Make sure we read CEQ entry after we have checked the
4373                 * ownership bit
4374                 */
4375                dma_rmb();
4376
4377                cqn = roce_get_field(ceqe->comp,
4378                                     HNS_ROCE_V2_CEQE_COMP_CQN_M,
4379                                     HNS_ROCE_V2_CEQE_COMP_CQN_S);
4380
4381                hns_roce_cq_completion(hr_dev, cqn);
4382
4383                ++eq->cons_index;
4384                ceqe_found = 1;
4385
4386                if (eq->cons_index > (2 * eq->entries - 1)) {
4387                        dev_warn(dev, "cons_index overflow, set back to 0.\n");
4388                        eq->cons_index = 0;
4389                }
4390        }
4391
4392        set_eq_cons_index_v2(eq);
4393
4394        return ceqe_found;
4395}
4396
4397static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
4398{
4399        struct hns_roce_eq *eq = eq_ptr;
4400        struct hns_roce_dev *hr_dev = eq->hr_dev;
4401        int int_work = 0;
4402
4403        if (eq->type_flag == HNS_ROCE_CEQ)
4404                /* Completion event interrupt */
4405                int_work = hns_roce_v2_ceq_int(hr_dev, eq);
4406        else
4407                /* Asychronous event interrupt */
4408                int_work = hns_roce_v2_aeq_int(hr_dev, eq);
4409
4410        return IRQ_RETVAL(int_work);
4411}
4412
4413static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
4414{
4415        struct hns_roce_dev *hr_dev = dev_id;
4416        struct device *dev = hr_dev->dev;
4417        int int_work = 0;
4418        u32 int_st;
4419        u32 int_en;
4420
4421        /* Abnormal interrupt */
4422        int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
4423        int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
4424
4425        if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
4426                dev_err(dev, "AEQ overflow!\n");
4427
4428                roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
4429                roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4430
4431                roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4432                roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4433
4434                int_work = 1;
4435        } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
4436                dev_err(dev, "BUS ERR!\n");
4437
4438                roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
4439                roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4440
4441                roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4442                roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4443
4444                int_work = 1;
4445        } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
4446                dev_err(dev, "OTHER ERR!\n");
4447
4448                roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
4449                roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
4450
4451                roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
4452                roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
4453
4454                int_work = 1;
4455        } else
4456                dev_err(dev, "There is no abnormal irq found!\n");
4457
4458        return IRQ_RETVAL(int_work);
4459}
4460
4461static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
4462                                        int eq_num, int enable_flag)
4463{
4464        int i;
4465
4466        if (enable_flag == EQ_ENABLE) {
4467                for (i = 0; i < eq_num; i++)
4468                        roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
4469                                   i * EQ_REG_OFFSET,
4470                                   HNS_ROCE_V2_VF_EVENT_INT_EN_M);
4471
4472                roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
4473                           HNS_ROCE_V2_VF_ABN_INT_EN_M);
4474                roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
4475                           HNS_ROCE_V2_VF_ABN_INT_CFG_M);
4476        } else {
4477                for (i = 0; i < eq_num; i++)
4478                        roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
4479                                   i * EQ_REG_OFFSET,
4480                                   HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
4481
4482                roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
4483                           HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
4484                roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
4485                           HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
4486        }
4487}
4488
4489static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
4490{
4491        struct device *dev = hr_dev->dev;
4492        int ret;
4493
4494        if (eqn < hr_dev->caps.num_comp_vectors)
4495                ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
4496                                        0, HNS_ROCE_CMD_DESTROY_CEQC,
4497                                        HNS_ROCE_CMD_TIMEOUT_MSECS);
4498        else
4499                ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
4500                                        0, HNS_ROCE_CMD_DESTROY_AEQC,
4501                                        HNS_ROCE_CMD_TIMEOUT_MSECS);
4502        if (ret)
4503                dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
4504}
4505
4506static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
4507                                  struct hns_roce_eq *eq)
4508{
4509        struct device *dev = hr_dev->dev;
4510        u64 idx;
4511        u64 size;
4512        u32 buf_chk_sz;
4513        u32 bt_chk_sz;
4514        u32 mhop_num;
4515        int eqe_alloc;
4516        int i = 0;
4517        int j = 0;
4518
4519        mhop_num = hr_dev->caps.eqe_hop_num;
4520        buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4521        bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4522
4523        /* hop_num = 0 */
4524        if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4525                dma_free_coherent(dev, (unsigned int)(eq->entries *
4526                                  eq->eqe_size), eq->bt_l0, eq->l0_dma);
4527                return;
4528        }
4529
4530        /* hop_num = 1 or hop = 2 */
4531        dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4532        if (mhop_num == 1) {
4533                for (i = 0; i < eq->l0_last_num; i++) {
4534                        if (i == eq->l0_last_num - 1) {
4535                                eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4536                                size = (eq->entries - eqe_alloc) * eq->eqe_size;
4537                                dma_free_coherent(dev, size, eq->buf[i],
4538                                                  eq->buf_dma[i]);
4539                                break;
4540                        }
4541                        dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4542                                          eq->buf_dma[i]);
4543                }
4544        } else if (mhop_num == 2) {
4545                for (i = 0; i < eq->l0_last_num; i++) {
4546                        dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4547                                          eq->l1_dma[i]);
4548
4549                        for (j = 0; j < bt_chk_sz / 8; j++) {
4550                                idx = i * (bt_chk_sz / 8) + j;
4551                                if ((i == eq->l0_last_num - 1)
4552                                     && j == eq->l1_last_num - 1) {
4553                                        eqe_alloc = (buf_chk_sz / eq->eqe_size)
4554                                                    * idx;
4555                                        size = (eq->entries - eqe_alloc)
4556                                                * eq->eqe_size;
4557                                        dma_free_coherent(dev, size,
4558                                                          eq->buf[idx],
4559                                                          eq->buf_dma[idx]);
4560                                        break;
4561                                }
4562                                dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4563                                                  eq->buf_dma[idx]);
4564                        }
4565                }
4566        }
4567        kfree(eq->buf_dma);
4568        kfree(eq->buf);
4569        kfree(eq->l1_dma);
4570        kfree(eq->bt_l1);
4571        eq->buf_dma = NULL;
4572        eq->buf = NULL;
4573        eq->l1_dma = NULL;
4574        eq->bt_l1 = NULL;
4575}
4576
4577static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
4578                                struct hns_roce_eq *eq)
4579{
4580        u32 buf_chk_sz;
4581
4582        buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4583
4584        if (hr_dev->caps.eqe_hop_num) {
4585                hns_roce_mhop_free_eq(hr_dev, eq);
4586                return;
4587        }
4588
4589        if (eq->buf_list)
4590                dma_free_coherent(hr_dev->dev, buf_chk_sz,
4591                                  eq->buf_list->buf, eq->buf_list->map);
4592}
4593
4594static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
4595                                struct hns_roce_eq *eq,
4596                                void *mb_buf)
4597{
4598        struct hns_roce_eq_context *eqc;
4599
4600        eqc = mb_buf;
4601        memset(eqc, 0, sizeof(struct hns_roce_eq_context));
4602
4603        /* init eqc */
4604        eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
4605        eq->hop_num = hr_dev->caps.eqe_hop_num;
4606        eq->cons_index = 0;
4607        eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
4608        eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
4609        eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
4610        eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
4611        eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
4612        eq->shift = ilog2((unsigned int)eq->entries);
4613
4614        if (!eq->hop_num)
4615                eq->eqe_ba = eq->buf_list->map;
4616        else
4617                eq->eqe_ba = eq->l0_dma;
4618
4619        /* set eqc state */
4620        roce_set_field(eqc->byte_4,
4621                       HNS_ROCE_EQC_EQ_ST_M,
4622                       HNS_ROCE_EQC_EQ_ST_S,
4623                       HNS_ROCE_V2_EQ_STATE_VALID);
4624
4625        /* set eqe hop num */
4626        roce_set_field(eqc->byte_4,
4627                       HNS_ROCE_EQC_HOP_NUM_M,
4628                       HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
4629
4630        /* set eqc over_ignore */
4631        roce_set_field(eqc->byte_4,
4632                       HNS_ROCE_EQC_OVER_IGNORE_M,
4633                       HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
4634
4635        /* set eqc coalesce */
4636        roce_set_field(eqc->byte_4,
4637                       HNS_ROCE_EQC_COALESCE_M,
4638                       HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
4639
4640        /* set eqc arm_state */
4641        roce_set_field(eqc->byte_4,
4642                       HNS_ROCE_EQC_ARM_ST_M,
4643                       HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
4644
4645        /* set eqn */
4646        roce_set_field(eqc->byte_4,
4647                       HNS_ROCE_EQC_EQN_M,
4648                       HNS_ROCE_EQC_EQN_S, eq->eqn);
4649
4650        /* set eqe_cnt */
4651        roce_set_field(eqc->byte_4,
4652                       HNS_ROCE_EQC_EQE_CNT_M,
4653                       HNS_ROCE_EQC_EQE_CNT_S,
4654                       HNS_ROCE_EQ_INIT_EQE_CNT);
4655
4656        /* set eqe_ba_pg_sz */
4657        roce_set_field(eqc->byte_8,
4658                       HNS_ROCE_EQC_BA_PG_SZ_M,
4659                       HNS_ROCE_EQC_BA_PG_SZ_S,
4660                       eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
4661
4662        /* set eqe_buf_pg_sz */
4663        roce_set_field(eqc->byte_8,
4664                       HNS_ROCE_EQC_BUF_PG_SZ_M,
4665                       HNS_ROCE_EQC_BUF_PG_SZ_S,
4666                       eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
4667
4668        /* set eq_producer_idx */
4669        roce_set_field(eqc->byte_8,
4670                       HNS_ROCE_EQC_PROD_INDX_M,
4671                       HNS_ROCE_EQC_PROD_INDX_S,
4672                       HNS_ROCE_EQ_INIT_PROD_IDX);
4673
4674        /* set eq_max_cnt */
4675        roce_set_field(eqc->byte_12,
4676                       HNS_ROCE_EQC_MAX_CNT_M,
4677                       HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
4678
4679        /* set eq_period */
4680        roce_set_field(eqc->byte_12,
4681                       HNS_ROCE_EQC_PERIOD_M,
4682                       HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
4683
4684        /* set eqe_report_timer */
4685        roce_set_field(eqc->eqe_report_timer,
4686                       HNS_ROCE_EQC_REPORT_TIMER_M,
4687                       HNS_ROCE_EQC_REPORT_TIMER_S,
4688                       HNS_ROCE_EQ_INIT_REPORT_TIMER);
4689
4690        /* set eqe_ba [34:3] */
4691        roce_set_field(eqc->eqe_ba0,
4692                       HNS_ROCE_EQC_EQE_BA_L_M,
4693                       HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
4694
4695        /* set eqe_ba [64:35] */
4696        roce_set_field(eqc->eqe_ba1,
4697                       HNS_ROCE_EQC_EQE_BA_H_M,
4698                       HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
4699
4700        /* set eq shift */
4701        roce_set_field(eqc->byte_28,
4702                       HNS_ROCE_EQC_SHIFT_M,
4703                       HNS_ROCE_EQC_SHIFT_S, eq->shift);
4704
4705        /* set eq MSI_IDX */
4706        roce_set_field(eqc->byte_28,
4707                       HNS_ROCE_EQC_MSI_INDX_M,
4708                       HNS_ROCE_EQC_MSI_INDX_S,
4709                       HNS_ROCE_EQ_INIT_MSI_IDX);
4710
4711        /* set cur_eqe_ba [27:12] */
4712        roce_set_field(eqc->byte_28,
4713                       HNS_ROCE_EQC_CUR_EQE_BA_L_M,
4714                       HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
4715
4716        /* set cur_eqe_ba [59:28] */
4717        roce_set_field(eqc->byte_32,
4718                       HNS_ROCE_EQC_CUR_EQE_BA_M_M,
4719                       HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
4720
4721        /* set cur_eqe_ba [63:60] */
4722        roce_set_field(eqc->byte_36,
4723                       HNS_ROCE_EQC_CUR_EQE_BA_H_M,
4724                       HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
4725
4726        /* set eq consumer idx */
4727        roce_set_field(eqc->byte_36,
4728                       HNS_ROCE_EQC_CONS_INDX_M,
4729                       HNS_ROCE_EQC_CONS_INDX_S,
4730                       HNS_ROCE_EQ_INIT_CONS_IDX);
4731
4732        /* set nex_eqe_ba[43:12] */
4733        roce_set_field(eqc->nxt_eqe_ba0,
4734                       HNS_ROCE_EQC_NXT_EQE_BA_L_M,
4735                       HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
4736
4737        /* set nex_eqe_ba[63:44] */
4738        roce_set_field(eqc->nxt_eqe_ba1,
4739                       HNS_ROCE_EQC_NXT_EQE_BA_H_M,
4740                       HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
4741}
4742
4743static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
4744                                  struct hns_roce_eq *eq)
4745{
4746        struct device *dev = hr_dev->dev;
4747        int eq_alloc_done = 0;
4748        int eq_buf_cnt = 0;
4749        int eqe_alloc;
4750        u32 buf_chk_sz;
4751        u32 bt_chk_sz;
4752        u32 mhop_num;
4753        u64 size;
4754        u64 idx;
4755        int ba_num;
4756        int bt_num;
4757        int record_i;
4758        int record_j;
4759        int i = 0;
4760        int j = 0;
4761
4762        mhop_num = hr_dev->caps.eqe_hop_num;
4763        buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4764        bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
4765
4766        ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
4767                  / buf_chk_sz;
4768        bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
4769
4770        /* hop_num = 0 */
4771        if (mhop_num == HNS_ROCE_HOP_NUM_0) {
4772                if (eq->entries > buf_chk_sz / eq->eqe_size) {
4773                        dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
4774                                eq->entries);
4775                        return -EINVAL;
4776                }
4777                eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
4778                                               &(eq->l0_dma), GFP_KERNEL);
4779                if (!eq->bt_l0)
4780                        return -ENOMEM;
4781
4782                eq->cur_eqe_ba = eq->l0_dma;
4783                eq->nxt_eqe_ba = 0;
4784
4785                return 0;
4786        }
4787
4788        eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
4789        if (!eq->buf_dma)
4790                return -ENOMEM;
4791        eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
4792        if (!eq->buf)
4793                goto err_kcalloc_buf;
4794
4795        if (mhop_num == 2) {
4796                eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
4797                if (!eq->l1_dma)
4798                        goto err_kcalloc_l1_dma;
4799
4800                eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
4801                if (!eq->bt_l1)
4802                        goto err_kcalloc_bt_l1;
4803        }
4804
4805        /* alloc L0 BT */
4806        eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
4807        if (!eq->bt_l0)
4808                goto err_dma_alloc_l0;
4809
4810        if (mhop_num == 1) {
4811                if (ba_num > (bt_chk_sz / 8))
4812                        dev_err(dev, "ba_num %d is too large for 1 hop\n",
4813                                ba_num);
4814
4815                /* alloc buf */
4816                for (i = 0; i < bt_chk_sz / 8; i++) {
4817                        if (eq_buf_cnt + 1 < ba_num) {
4818                                size = buf_chk_sz;
4819                        } else {
4820                                eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
4821                                size = (eq->entries - eqe_alloc) * eq->eqe_size;
4822                        }
4823                        eq->buf[i] = dma_alloc_coherent(dev, size,
4824                                                        &(eq->buf_dma[i]),
4825                                                        GFP_KERNEL);
4826                        if (!eq->buf[i])
4827                                goto err_dma_alloc_buf;
4828
4829                        memset(eq->buf[i], 0, size);
4830                        *(eq->bt_l0 + i) = eq->buf_dma[i];
4831
4832                        eq_buf_cnt++;
4833                        if (eq_buf_cnt >= ba_num)
4834                                break;
4835                }
4836                eq->cur_eqe_ba = eq->buf_dma[0];
4837                eq->nxt_eqe_ba = eq->buf_dma[1];
4838
4839        } else if (mhop_num == 2) {
4840                /* alloc L1 BT and buf */
4841                for (i = 0; i < bt_chk_sz / 8; i++) {
4842                        eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
4843                                                          &(eq->l1_dma[i]),
4844                                                          GFP_KERNEL);
4845                        if (!eq->bt_l1[i])
4846                                goto err_dma_alloc_l1;
4847                        *(eq->bt_l0 + i) = eq->l1_dma[i];
4848
4849                        for (j = 0; j < bt_chk_sz / 8; j++) {
4850                                idx = i * bt_chk_sz / 8 + j;
4851                                if (eq_buf_cnt + 1 < ba_num) {
4852                                        size = buf_chk_sz;
4853                                } else {
4854                                        eqe_alloc = (buf_chk_sz / eq->eqe_size)
4855                                                    * idx;
4856                                        size = (eq->entries - eqe_alloc)
4857                                                * eq->eqe_size;
4858                                }
4859                                eq->buf[idx] = dma_alloc_coherent(dev, size,
4860                                                            &(eq->buf_dma[idx]),
4861                                                            GFP_KERNEL);
4862                                if (!eq->buf[idx])
4863                                        goto err_dma_alloc_buf;
4864
4865                                memset(eq->buf[idx], 0, size);
4866                                *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
4867
4868                                eq_buf_cnt++;
4869                                if (eq_buf_cnt >= ba_num) {
4870                                        eq_alloc_done = 1;
4871                                        break;
4872                                }
4873                        }
4874
4875                        if (eq_alloc_done)
4876                                break;
4877                }
4878                eq->cur_eqe_ba = eq->buf_dma[0];
4879                eq->nxt_eqe_ba = eq->buf_dma[1];
4880        }
4881
4882        eq->l0_last_num = i + 1;
4883        if (mhop_num == 2)
4884                eq->l1_last_num = j + 1;
4885
4886        return 0;
4887
4888err_dma_alloc_l1:
4889        dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4890        eq->bt_l0 = NULL;
4891        eq->l0_dma = 0;
4892        for (i -= 1; i >= 0; i--) {
4893                dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4894                                  eq->l1_dma[i]);
4895
4896                for (j = 0; j < bt_chk_sz / 8; j++) {
4897                        idx = i * bt_chk_sz / 8 + j;
4898                        dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
4899                                          eq->buf_dma[idx]);
4900                }
4901        }
4902        goto err_dma_alloc_l0;
4903
4904err_dma_alloc_buf:
4905        dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
4906        eq->bt_l0 = NULL;
4907        eq->l0_dma = 0;
4908
4909        if (mhop_num == 1)
4910                for (i -= 1; i >= 0; i--)
4911                        dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
4912                                          eq->buf_dma[i]);
4913        else if (mhop_num == 2) {
4914                record_i = i;
4915                record_j = j;
4916                for (; i >= 0; i--) {
4917                        dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
4918                                          eq->l1_dma[i]);
4919
4920                        for (j = 0; j < bt_chk_sz / 8; j++) {
4921                                if (i == record_i && j >= record_j)
4922                                        break;
4923
4924                                idx = i * bt_chk_sz / 8 + j;
4925                                dma_free_coherent(dev, buf_chk_sz,
4926                                                  eq->buf[idx],
4927                                                  eq->buf_dma[idx]);
4928                        }
4929                }
4930        }
4931
4932err_dma_alloc_l0:
4933        kfree(eq->bt_l1);
4934        eq->bt_l1 = NULL;
4935
4936err_kcalloc_bt_l1:
4937        kfree(eq->l1_dma);
4938        eq->l1_dma = NULL;
4939
4940err_kcalloc_l1_dma:
4941        kfree(eq->buf);
4942        eq->buf = NULL;
4943
4944err_kcalloc_buf:
4945        kfree(eq->buf_dma);
4946        eq->buf_dma = NULL;
4947
4948        return -ENOMEM;
4949}
4950
4951static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
4952                                 struct hns_roce_eq *eq,
4953                                 unsigned int eq_cmd)
4954{
4955        struct device *dev = hr_dev->dev;
4956        struct hns_roce_cmd_mailbox *mailbox;
4957        u32 buf_chk_sz = 0;
4958        int ret;
4959
4960        /* Allocate mailbox memory */
4961        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4962        if (IS_ERR(mailbox))
4963                return PTR_ERR(mailbox);
4964
4965        if (!hr_dev->caps.eqe_hop_num) {
4966                buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
4967
4968                eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
4969                                       GFP_KERNEL);
4970                if (!eq->buf_list) {
4971                        ret = -ENOMEM;
4972                        goto free_cmd_mbox;
4973                }
4974
4975                eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
4976                                                       &(eq->buf_list->map),
4977                                                       GFP_KERNEL);
4978                if (!eq->buf_list->buf) {
4979                        ret = -ENOMEM;
4980                        goto err_alloc_buf;
4981                }
4982
4983                memset(eq->buf_list->buf, 0, buf_chk_sz);
4984        } else {
4985                ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
4986                if (ret) {
4987                        ret = -ENOMEM;
4988                        goto free_cmd_mbox;
4989                }
4990        }
4991
4992        hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
4993
4994        ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
4995                                eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
4996        if (ret) {
4997                dev_err(dev, "[mailbox cmd] create eqc failed.\n");
4998                goto err_cmd_mbox;
4999        }
5000
5001        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5002
5003        return 0;
5004
5005err_cmd_mbox:
5006        if (!hr_dev->caps.eqe_hop_num)
5007                dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
5008                                  eq->buf_list->map);
5009        else {
5010                hns_roce_mhop_free_eq(hr_dev, eq);
5011                goto free_cmd_mbox;
5012        }
5013
5014err_alloc_buf:
5015        kfree(eq->buf_list);
5016
5017free_cmd_mbox:
5018        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5019
5020        return ret;
5021}
5022
5023static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5024{
5025        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5026        struct device *dev = hr_dev->dev;
5027        struct hns_roce_eq *eq;
5028        unsigned int eq_cmd;
5029        int irq_num;
5030        int eq_num;
5031        int other_num;
5032        int comp_num;
5033        int aeq_num;
5034        int i, j, k;
5035        int ret;
5036
5037        other_num = hr_dev->caps.num_other_vectors;
5038        comp_num = hr_dev->caps.num_comp_vectors;
5039        aeq_num = hr_dev->caps.num_aeq_vectors;
5040
5041        eq_num = comp_num + aeq_num;
5042        irq_num = eq_num + other_num;
5043
5044        eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5045        if (!eq_table->eq)
5046                return -ENOMEM;
5047
5048        for (i = 0; i < irq_num; i++) {
5049                hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5050                                               GFP_KERNEL);
5051                if (!hr_dev->irq_names[i]) {
5052                        ret = -ENOMEM;
5053                        goto err_failed_kzalloc;
5054                }
5055        }
5056
5057        /* create eq */
5058        for (j = 0; j < eq_num; j++) {
5059                eq = &eq_table->eq[j];
5060                eq->hr_dev = hr_dev;
5061                eq->eqn = j;
5062                if (j < comp_num) {
5063                        /* CEQ */
5064                        eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5065                        eq->type_flag = HNS_ROCE_CEQ;
5066                        eq->entries = hr_dev->caps.ceqe_depth;
5067                        eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5068                        eq->irq = hr_dev->irq[j + other_num + aeq_num];
5069                        eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5070                        eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5071                } else {
5072                        /* AEQ */
5073                        eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5074                        eq->type_flag = HNS_ROCE_AEQ;
5075                        eq->entries = hr_dev->caps.aeqe_depth;
5076                        eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5077                        eq->irq = hr_dev->irq[j - comp_num + other_num];
5078                        eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5079                        eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5080                }
5081
5082                ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5083                if (ret) {
5084                        dev_err(dev, "eq create failed.\n");
5085                        goto err_create_eq_fail;
5086                }
5087        }
5088
5089        /* enable irq */
5090        hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5091
5092        /* irq contains: abnormal + AEQ + CEQ*/
5093        for (k = 0; k < irq_num; k++)
5094                if (k < other_num)
5095                        snprintf((char *)hr_dev->irq_names[k],
5096                                 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
5097                else if (k < (other_num + aeq_num))
5098                        snprintf((char *)hr_dev->irq_names[k],
5099                                 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
5100                                 k - other_num);
5101                else
5102                        snprintf((char *)hr_dev->irq_names[k],
5103                                 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
5104                                 k - other_num - aeq_num);
5105
5106        for (k = 0; k < irq_num; k++) {
5107                if (k < other_num)
5108                        ret = request_irq(hr_dev->irq[k],
5109                                          hns_roce_v2_msix_interrupt_abn,
5110                                          0, hr_dev->irq_names[k], hr_dev);
5111
5112                else if (k < (other_num + comp_num))
5113                        ret = request_irq(eq_table->eq[k - other_num].irq,
5114                                          hns_roce_v2_msix_interrupt_eq,
5115                                          0, hr_dev->irq_names[k + aeq_num],
5116                                          &eq_table->eq[k - other_num]);
5117                else
5118                        ret = request_irq(eq_table->eq[k - other_num].irq,
5119                                          hns_roce_v2_msix_interrupt_eq,
5120                                          0, hr_dev->irq_names[k - comp_num],
5121                                          &eq_table->eq[k - other_num]);
5122                if (ret) {
5123                        dev_err(dev, "Request irq error!\n");
5124                        goto err_request_irq_fail;
5125                }
5126        }
5127
5128        hr_dev->irq_workq =
5129                create_singlethread_workqueue("hns_roce_irq_workqueue");
5130        if (!hr_dev->irq_workq) {
5131                dev_err(dev, "Create irq workqueue failed!\n");
5132                ret = -ENOMEM;
5133                goto err_request_irq_fail;
5134        }
5135
5136        return 0;
5137
5138err_request_irq_fail:
5139        for (k -= 1; k >= 0; k--)
5140                if (k < other_num)
5141                        free_irq(hr_dev->irq[k], hr_dev);
5142                else
5143                        free_irq(eq_table->eq[k - other_num].irq,
5144                                 &eq_table->eq[k - other_num]);
5145
5146err_create_eq_fail:
5147        for (j -= 1; j >= 0; j--)
5148                hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
5149
5150err_failed_kzalloc:
5151        for (i -= 1; i >= 0; i--)
5152                kfree(hr_dev->irq_names[i]);
5153        kfree(eq_table->eq);
5154
5155        return ret;
5156}
5157
5158static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
5159{
5160        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5161        int irq_num;
5162        int eq_num;
5163        int i;
5164
5165        eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5166        irq_num = eq_num + hr_dev->caps.num_other_vectors;
5167
5168        /* Disable irq */
5169        hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5170
5171        for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5172                free_irq(hr_dev->irq[i], hr_dev);
5173
5174        for (i = 0; i < eq_num; i++) {
5175                hns_roce_v2_destroy_eqc(hr_dev, i);
5176
5177                free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
5178
5179                hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
5180        }
5181
5182        for (i = 0; i < irq_num; i++)
5183                kfree(hr_dev->irq_names[i]);
5184
5185        kfree(eq_table->eq);
5186
5187        flush_workqueue(hr_dev->irq_workq);
5188        destroy_workqueue(hr_dev->irq_workq);
5189}
5190
5191static const struct ib_device_ops hns_roce_v2_dev_ops = {
5192        .destroy_qp = hns_roce_v2_destroy_qp,
5193        .modify_cq = hns_roce_v2_modify_cq,
5194        .poll_cq = hns_roce_v2_poll_cq,
5195        .post_recv = hns_roce_v2_post_recv,
5196        .post_send = hns_roce_v2_post_send,
5197        .query_qp = hns_roce_v2_query_qp,
5198        .req_notify_cq = hns_roce_v2_req_notify_cq,
5199};
5200
5201static const struct hns_roce_hw hns_roce_hw_v2 = {
5202        .cmq_init = hns_roce_v2_cmq_init,
5203        .cmq_exit = hns_roce_v2_cmq_exit,
5204        .hw_profile = hns_roce_v2_profile,
5205        .hw_init = hns_roce_v2_init,
5206        .hw_exit = hns_roce_v2_exit,
5207        .post_mbox = hns_roce_v2_post_mbox,
5208        .chk_mbox = hns_roce_v2_chk_mbox,
5209        .set_gid = hns_roce_v2_set_gid,
5210        .set_mac = hns_roce_v2_set_mac,
5211        .write_mtpt = hns_roce_v2_write_mtpt,
5212        .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
5213        .write_cqc = hns_roce_v2_write_cqc,
5214        .set_hem = hns_roce_v2_set_hem,
5215        .clear_hem = hns_roce_v2_clear_hem,
5216        .modify_qp = hns_roce_v2_modify_qp,
5217        .query_qp = hns_roce_v2_query_qp,
5218        .destroy_qp = hns_roce_v2_destroy_qp,
5219        .modify_cq = hns_roce_v2_modify_cq,
5220        .post_send = hns_roce_v2_post_send,
5221        .post_recv = hns_roce_v2_post_recv,
5222        .req_notify_cq = hns_roce_v2_req_notify_cq,
5223        .poll_cq = hns_roce_v2_poll_cq,
5224        .init_eq = hns_roce_v2_init_eq_table,
5225        .cleanup_eq = hns_roce_v2_cleanup_eq_table,
5226        .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
5227};
5228
5229static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
5230        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
5231        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
5232        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
5233        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
5234        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
5235        /* required last entry */
5236        {0, }
5237};
5238
5239MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
5240
5241static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
5242                                  struct hnae3_handle *handle)
5243{
5244        const struct pci_device_id *id;
5245        int i;
5246
5247        id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
5248        if (!id) {
5249                dev_err(hr_dev->dev, "device is not compatible!\n");
5250                return -ENXIO;
5251        }
5252
5253        hr_dev->hw = &hns_roce_hw_v2;
5254        hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
5255        hr_dev->odb_offset = hr_dev->sdb_offset;
5256
5257        /* Get info from NIC driver. */
5258        hr_dev->reg_base = handle->rinfo.roce_io_base;
5259        hr_dev->caps.num_ports = 1;
5260        hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
5261        hr_dev->iboe.phy_port[0] = 0;
5262
5263        addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
5264                            hr_dev->iboe.netdevs[0]->dev_addr);
5265
5266        for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
5267                hr_dev->irq[i] = pci_irq_vector(handle->pdev,
5268                                                i + handle->rinfo.base_vector);
5269
5270        /* cmd issue mode: 0 is poll, 1 is event */
5271        hr_dev->cmd_mod = 1;
5272        hr_dev->loop_idc = 0;
5273
5274        return 0;
5275}
5276
5277static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
5278{
5279        struct hns_roce_dev *hr_dev;
5280        int ret;
5281
5282        hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
5283        if (!hr_dev)
5284                return -ENOMEM;
5285
5286        hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
5287        if (!hr_dev->priv) {
5288                ret = -ENOMEM;
5289                goto error_failed_kzalloc;
5290        }
5291
5292        hr_dev->pci_dev = handle->pdev;
5293        hr_dev->dev = &handle->pdev->dev;
5294        handle->priv = hr_dev;
5295
5296        ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
5297        if (ret) {
5298                dev_err(hr_dev->dev, "Get Configuration failed!\n");
5299                goto error_failed_get_cfg;
5300        }
5301
5302        ret = hns_roce_init(hr_dev);
5303        if (ret) {
5304                dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
5305                goto error_failed_get_cfg;
5306        }
5307
5308        return 0;
5309
5310error_failed_get_cfg:
5311        kfree(hr_dev->priv);
5312
5313error_failed_kzalloc:
5314        ib_dealloc_device(&hr_dev->ib_dev);
5315
5316        return ret;
5317}
5318
5319static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
5320                                           bool reset)
5321{
5322        struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
5323
5324        if (!hr_dev)
5325                return;
5326
5327        hns_roce_exit(hr_dev);
5328        kfree(hr_dev->priv);
5329        ib_dealloc_device(&hr_dev->ib_dev);
5330}
5331
5332static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
5333{
5334        struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
5335        struct ib_event event;
5336
5337        if (!hr_dev) {
5338                dev_err(&handle->pdev->dev,
5339                        "Input parameter handle->priv is NULL!\n");
5340                return -EINVAL;
5341        }
5342
5343        hr_dev->active = false;
5344        hr_dev->is_reset = true;
5345
5346        event.event = IB_EVENT_DEVICE_FATAL;
5347        event.device = &hr_dev->ib_dev;
5348        event.element.port_num = 1;
5349        ib_dispatch_event(&event);
5350
5351        return 0;
5352}
5353
5354static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
5355{
5356        int ret;
5357
5358        ret = hns_roce_hw_v2_init_instance(handle);
5359        if (ret) {
5360                /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
5361                 * callback function, RoCE Engine reinitialize. If RoCE reinit
5362                 * failed, we should inform NIC driver.
5363                 */
5364                handle->priv = NULL;
5365                dev_err(&handle->pdev->dev,
5366                        "In reset process RoCE reinit failed %d.\n", ret);
5367        }
5368
5369        return ret;
5370}
5371
5372static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
5373{
5374        msleep(100);
5375        hns_roce_hw_v2_uninit_instance(handle, false);
5376        return 0;
5377}
5378
5379static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
5380                                       enum hnae3_reset_notify_type type)
5381{
5382        int ret = 0;
5383
5384        switch (type) {
5385        case HNAE3_DOWN_CLIENT:
5386                ret = hns_roce_hw_v2_reset_notify_down(handle);
5387                break;
5388        case HNAE3_INIT_CLIENT:
5389                ret = hns_roce_hw_v2_reset_notify_init(handle);
5390                break;
5391        case HNAE3_UNINIT_CLIENT:
5392                ret = hns_roce_hw_v2_reset_notify_uninit(handle);
5393                break;
5394        default:
5395                break;
5396        }
5397
5398        return ret;
5399}
5400
5401static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
5402        .init_instance = hns_roce_hw_v2_init_instance,
5403        .uninit_instance = hns_roce_hw_v2_uninit_instance,
5404        .reset_notify = hns_roce_hw_v2_reset_notify,
5405};
5406
5407static struct hnae3_client hns_roce_hw_v2_client = {
5408        .name = "hns_roce_hw_v2",
5409        .type = HNAE3_CLIENT_ROCE,
5410        .ops = &hns_roce_hw_v2_ops,
5411};
5412
5413static int __init hns_roce_hw_v2_init(void)
5414{
5415        return hnae3_register_client(&hns_roce_hw_v2_client);
5416}
5417
5418static void __exit hns_roce_hw_v2_exit(void)
5419{
5420        hnae3_unregister_client(&hns_roce_hw_v2_client);
5421}
5422
5423module_init(hns_roce_hw_v2_init);
5424module_exit(hns_roce_hw_v2_exit);
5425
5426MODULE_LICENSE("Dual BSD/GPL");
5427MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
5428MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
5429MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
5430MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
5431