linux/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Hisilicon Limited.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/platform_device.h>
  34#include <linux/acpi.h>
  35#include <linux/etherdevice.h>
  36#include <linux/interrupt.h>
  37#include <linux/of.h>
  38#include <linux/of_platform.h>
  39#include <rdma/ib_umem.h>
  40#include "hns_roce_common.h"
  41#include "hns_roce_device.h"
  42#include "hns_roce_cmd.h"
  43#include "hns_roce_hem.h"
  44#include "hns_roce_hw_v1.h"
  45
  46static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
  47{
  48        dseg->lkey = cpu_to_le32(sg->lkey);
  49        dseg->addr = cpu_to_le64(sg->addr);
  50        dseg->len  = cpu_to_le32(sg->length);
  51}
  52
  53static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
  54                          u32 rkey)
  55{
  56        rseg->raddr = cpu_to_le64(remote_addr);
  57        rseg->rkey  = cpu_to_le32(rkey);
  58        rseg->len   = 0;
  59}
  60
  61static int hns_roce_v1_post_send(struct ib_qp *ibqp,
  62                                 const struct ib_send_wr *wr,
  63                                 const struct ib_send_wr **bad_wr)
  64{
  65        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  66        struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
  67        struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
  68        struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
  69        struct hns_roce_wqe_data_seg *dseg = NULL;
  70        struct hns_roce_qp *qp = to_hr_qp(ibqp);
  71        struct device *dev = &hr_dev->pdev->dev;
  72        struct hns_roce_sq_db sq_db;
  73        int ps_opcode = 0, i = 0;
  74        unsigned long flags = 0;
  75        void *wqe = NULL;
  76        __le32 doorbell[2];
  77        int nreq = 0;
  78        u32 ind = 0;
  79        int ret = 0;
  80        u8 *smac;
  81        int loopback;
  82
  83        if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
  84                ibqp->qp_type != IB_QPT_RC)) {
  85                dev_err(dev, "un-supported QP type\n");
  86                *bad_wr = NULL;
  87                return -EOPNOTSUPP;
  88        }
  89
  90        spin_lock_irqsave(&qp->sq.lock, flags);
  91        ind = qp->sq_next_wqe;
  92        for (nreq = 0; wr; ++nreq, wr = wr->next) {
  93                if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
  94                        ret = -ENOMEM;
  95                        *bad_wr = wr;
  96                        goto out;
  97                }
  98
  99                if (unlikely(wr->num_sge > qp->sq.max_gs)) {
 100                        dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
 101                                wr->num_sge, qp->sq.max_gs);
 102                        ret = -EINVAL;
 103                        *bad_wr = wr;
 104                        goto out;
 105                }
 106
 107                wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
 108                qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
 109                                                                      wr->wr_id;
 110
 111                /* Corresponding to the RC and RD type wqe process separately */
 112                if (ibqp->qp_type == IB_QPT_GSI) {
 113                        ud_sq_wqe = wqe;
 114                        roce_set_field(ud_sq_wqe->dmac_h,
 115                                       UD_SEND_WQE_U32_4_DMAC_0_M,
 116                                       UD_SEND_WQE_U32_4_DMAC_0_S,
 117                                       ah->av.mac[0]);
 118                        roce_set_field(ud_sq_wqe->dmac_h,
 119                                       UD_SEND_WQE_U32_4_DMAC_1_M,
 120                                       UD_SEND_WQE_U32_4_DMAC_1_S,
 121                                       ah->av.mac[1]);
 122                        roce_set_field(ud_sq_wqe->dmac_h,
 123                                       UD_SEND_WQE_U32_4_DMAC_2_M,
 124                                       UD_SEND_WQE_U32_4_DMAC_2_S,
 125                                       ah->av.mac[2]);
 126                        roce_set_field(ud_sq_wqe->dmac_h,
 127                                       UD_SEND_WQE_U32_4_DMAC_3_M,
 128                                       UD_SEND_WQE_U32_4_DMAC_3_S,
 129                                       ah->av.mac[3]);
 130
 131                        roce_set_field(ud_sq_wqe->u32_8,
 132                                       UD_SEND_WQE_U32_8_DMAC_4_M,
 133                                       UD_SEND_WQE_U32_8_DMAC_4_S,
 134                                       ah->av.mac[4]);
 135                        roce_set_field(ud_sq_wqe->u32_8,
 136                                       UD_SEND_WQE_U32_8_DMAC_5_M,
 137                                       UD_SEND_WQE_U32_8_DMAC_5_S,
 138                                       ah->av.mac[5]);
 139
 140                        smac = (u8 *)hr_dev->dev_addr[qp->port];
 141                        loopback = ether_addr_equal_unaligned(ah->av.mac,
 142                                                              smac) ? 1 : 0;
 143                        roce_set_bit(ud_sq_wqe->u32_8,
 144                                     UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
 145                                     loopback);
 146
 147                        roce_set_field(ud_sq_wqe->u32_8,
 148                                       UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
 149                                       UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
 150                                       HNS_ROCE_WQE_OPCODE_SEND);
 151                        roce_set_field(ud_sq_wqe->u32_8,
 152                                       UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
 153                                       UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
 154                                       2);
 155                        roce_set_bit(ud_sq_wqe->u32_8,
 156                                UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
 157                                1);
 158
 159                        ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
 160                                cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
 161                                (wr->send_flags & IB_SEND_SOLICITED ?
 162                                cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
 163                                ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
 164                                cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
 165
 166                        roce_set_field(ud_sq_wqe->u32_16,
 167                                       UD_SEND_WQE_U32_16_DEST_QP_M,
 168                                       UD_SEND_WQE_U32_16_DEST_QP_S,
 169                                       ud_wr(wr)->remote_qpn);
 170                        roce_set_field(ud_sq_wqe->u32_16,
 171                                       UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
 172                                       UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
 173                                       ah->av.stat_rate);
 174
 175                        roce_set_field(ud_sq_wqe->u32_36,
 176                                       UD_SEND_WQE_U32_36_FLOW_LABEL_M,
 177                                       UD_SEND_WQE_U32_36_FLOW_LABEL_S,
 178                                       ah->av.flowlabel);
 179                        roce_set_field(ud_sq_wqe->u32_36,
 180                                      UD_SEND_WQE_U32_36_PRIORITY_M,
 181                                      UD_SEND_WQE_U32_36_PRIORITY_S,
 182                                      ah->av.sl);
 183                        roce_set_field(ud_sq_wqe->u32_36,
 184                                       UD_SEND_WQE_U32_36_SGID_INDEX_M,
 185                                       UD_SEND_WQE_U32_36_SGID_INDEX_S,
 186                                       hns_get_gid_index(hr_dev, qp->phy_port,
 187                                                         ah->av.gid_index));
 188
 189                        roce_set_field(ud_sq_wqe->u32_40,
 190                                       UD_SEND_WQE_U32_40_HOP_LIMIT_M,
 191                                       UD_SEND_WQE_U32_40_HOP_LIMIT_S,
 192                                       ah->av.hop_limit);
 193                        roce_set_field(ud_sq_wqe->u32_40,
 194                                       UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
 195                                       UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
 196                                       ah->av.tclass);
 197
 198                        memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
 199
 200                        ud_sq_wqe->va0_l =
 201                                       cpu_to_le32((u32)wr->sg_list[0].addr);
 202                        ud_sq_wqe->va0_h =
 203                                       cpu_to_le32((wr->sg_list[0].addr) >> 32);
 204                        ud_sq_wqe->l_key0 =
 205                                       cpu_to_le32(wr->sg_list[0].lkey);
 206
 207                        ud_sq_wqe->va1_l =
 208                                       cpu_to_le32((u32)wr->sg_list[1].addr);
 209                        ud_sq_wqe->va1_h =
 210                                       cpu_to_le32((wr->sg_list[1].addr) >> 32);
 211                        ud_sq_wqe->l_key1 =
 212                                       cpu_to_le32(wr->sg_list[1].lkey);
 213                        ind++;
 214                } else if (ibqp->qp_type == IB_QPT_RC) {
 215                        u32 tmp_len = 0;
 216
 217                        ctrl = wqe;
 218                        memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
 219                        for (i = 0; i < wr->num_sge; i++)
 220                                tmp_len += wr->sg_list[i].length;
 221
 222                        ctrl->msg_length =
 223                          cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
 224
 225                        ctrl->sgl_pa_h = 0;
 226                        ctrl->flag = 0;
 227
 228                        switch (wr->opcode) {
 229                        case IB_WR_SEND_WITH_IMM:
 230                        case IB_WR_RDMA_WRITE_WITH_IMM:
 231                                ctrl->imm_data = wr->ex.imm_data;
 232                                break;
 233                        case IB_WR_SEND_WITH_INV:
 234                                ctrl->inv_key =
 235                                        cpu_to_le32(wr->ex.invalidate_rkey);
 236                                break;
 237                        default:
 238                                ctrl->imm_data = 0;
 239                                break;
 240                        }
 241
 242                        /*Ctrl field, ctrl set type: sig, solic, imm, fence */
 243                        /* SO wait for conforming application scenarios */
 244                        ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
 245                                      cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
 246                                      (wr->send_flags & IB_SEND_SOLICITED ?
 247                                      cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
 248                                      ((wr->opcode == IB_WR_SEND_WITH_IMM ||
 249                                      wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
 250                                      cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
 251                                      (wr->send_flags & IB_SEND_FENCE ?
 252                                      (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
 253
 254                        wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
 255
 256                        switch (wr->opcode) {
 257                        case IB_WR_RDMA_READ:
 258                                ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
 259                                set_raddr_seg(wqe,  rdma_wr(wr)->remote_addr,
 260                                               rdma_wr(wr)->rkey);
 261                                break;
 262                        case IB_WR_RDMA_WRITE:
 263                        case IB_WR_RDMA_WRITE_WITH_IMM:
 264                                ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
 265                                set_raddr_seg(wqe,  rdma_wr(wr)->remote_addr,
 266                                              rdma_wr(wr)->rkey);
 267                                break;
 268                        case IB_WR_SEND:
 269                        case IB_WR_SEND_WITH_INV:
 270                        case IB_WR_SEND_WITH_IMM:
 271                                ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
 272                                break;
 273                        case IB_WR_LOCAL_INV:
 274                                break;
 275                        case IB_WR_ATOMIC_CMP_AND_SWP:
 276                        case IB_WR_ATOMIC_FETCH_AND_ADD:
 277                        case IB_WR_LSO:
 278                        default:
 279                                ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
 280                                break;
 281                        }
 282                        ctrl->flag |= cpu_to_le32(ps_opcode);
 283                        wqe += sizeof(struct hns_roce_wqe_raddr_seg);
 284
 285                        dseg = wqe;
 286                        if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
 287                                if (le32_to_cpu(ctrl->msg_length) >
 288                                    hr_dev->caps.max_sq_inline) {
 289                                        ret = -EINVAL;
 290                                        *bad_wr = wr;
 291                                        dev_err(dev, "inline len(1-%d)=%d, illegal",
 292                                                ctrl->msg_length,
 293                                                hr_dev->caps.max_sq_inline);
 294                                        goto out;
 295                                }
 296                                for (i = 0; i < wr->num_sge; i++) {
 297                                        memcpy(wqe, ((void *) (uintptr_t)
 298                                               wr->sg_list[i].addr),
 299                                               wr->sg_list[i].length);
 300                                        wqe += wr->sg_list[i].length;
 301                                }
 302                                ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
 303                        } else {
 304                                /*sqe num is two */
 305                                for (i = 0; i < wr->num_sge; i++)
 306                                        set_data_seg(dseg + i, wr->sg_list + i);
 307
 308                                ctrl->flag |= cpu_to_le32(wr->num_sge <<
 309                                              HNS_ROCE_WQE_SGE_NUM_BIT);
 310                        }
 311                        ind++;
 312                }
 313        }
 314
 315out:
 316        /* Set DB return */
 317        if (likely(nreq)) {
 318                qp->sq.head += nreq;
 319                /* Memory barrier */
 320                wmb();
 321
 322                sq_db.u32_4 = 0;
 323                sq_db.u32_8 = 0;
 324                roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
 325                               SQ_DOORBELL_U32_4_SQ_HEAD_S,
 326                              (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
 327                roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
 328                               SQ_DOORBELL_U32_4_SL_S, qp->sl);
 329                roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
 330                               SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
 331                roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
 332                               SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
 333                roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
 334
 335                doorbell[0] = sq_db.u32_4;
 336                doorbell[1] = sq_db.u32_8;
 337
 338                hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
 339                qp->sq_next_wqe = ind;
 340        }
 341
 342        spin_unlock_irqrestore(&qp->sq.lock, flags);
 343
 344        return ret;
 345}
 346
 347static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
 348                                 const struct ib_recv_wr *wr,
 349                                 const struct ib_recv_wr **bad_wr)
 350{
 351        int ret = 0;
 352        int nreq = 0;
 353        int ind = 0;
 354        int i = 0;
 355        u32 reg_val;
 356        unsigned long flags = 0;
 357        struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
 358        struct hns_roce_wqe_data_seg *scat = NULL;
 359        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 360        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 361        struct device *dev = &hr_dev->pdev->dev;
 362        struct hns_roce_rq_db rq_db;
 363        __le32 doorbell[2] = {0};
 364
 365        spin_lock_irqsave(&hr_qp->rq.lock, flags);
 366        ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
 367
 368        for (nreq = 0; wr; ++nreq, wr = wr->next) {
 369                if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
 370                        hr_qp->ibqp.recv_cq)) {
 371                        ret = -ENOMEM;
 372                        *bad_wr = wr;
 373                        goto out;
 374                }
 375
 376                if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
 377                        dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
 378                                wr->num_sge, hr_qp->rq.max_gs);
 379                        ret = -EINVAL;
 380                        *bad_wr = wr;
 381                        goto out;
 382                }
 383
 384                ctrl = get_recv_wqe(hr_qp, ind);
 385
 386                roce_set_field(ctrl->rwqe_byte_12,
 387                               RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
 388                               RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
 389                               wr->num_sge);
 390
 391                scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
 392
 393                for (i = 0; i < wr->num_sge; i++)
 394                        set_data_seg(scat + i, wr->sg_list + i);
 395
 396                hr_qp->rq.wrid[ind] = wr->wr_id;
 397
 398                ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
 399        }
 400
 401out:
 402        if (likely(nreq)) {
 403                hr_qp->rq.head += nreq;
 404                /* Memory barrier */
 405                wmb();
 406
 407                if (ibqp->qp_type == IB_QPT_GSI) {
 408                        __le32 tmp;
 409
 410                        /* SW update GSI rq header */
 411                        reg_val = roce_read(to_hr_dev(ibqp->device),
 412                                            ROCEE_QP1C_CFG3_0_REG +
 413                                            QP1C_CFGN_OFFSET * hr_qp->phy_port);
 414                        tmp = cpu_to_le32(reg_val);
 415                        roce_set_field(tmp,
 416                                       ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
 417                                       ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
 418                                       hr_qp->rq.head);
 419                        reg_val = le32_to_cpu(tmp);
 420                        roce_write(to_hr_dev(ibqp->device),
 421                                   ROCEE_QP1C_CFG3_0_REG +
 422                                   QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
 423                } else {
 424                        rq_db.u32_4 = 0;
 425                        rq_db.u32_8 = 0;
 426
 427                        roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
 428                                       RQ_DOORBELL_U32_4_RQ_HEAD_S,
 429                                       hr_qp->rq.head);
 430                        roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
 431                                       RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
 432                        roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
 433                                       RQ_DOORBELL_U32_8_CMD_S, 1);
 434                        roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
 435                                     1);
 436
 437                        doorbell[0] = rq_db.u32_4;
 438                        doorbell[1] = rq_db.u32_8;
 439
 440                        hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
 441                }
 442        }
 443        spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
 444
 445        return ret;
 446}
 447
 448static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
 449                                       int sdb_mode, int odb_mode)
 450{
 451        __le32 tmp;
 452        u32 val;
 453
 454        val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
 455        tmp = cpu_to_le32(val);
 456        roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
 457        roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
 458        val = le32_to_cpu(tmp);
 459        roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
 460}
 461
 462static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
 463                                     u32 odb_mode)
 464{
 465        __le32 tmp;
 466        u32 val;
 467
 468        /* Configure SDB/ODB extend mode */
 469        val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
 470        tmp = cpu_to_le32(val);
 471        roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
 472        roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
 473        val = le32_to_cpu(tmp);
 474        roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
 475}
 476
 477static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
 478                             u32 sdb_alful)
 479{
 480        __le32 tmp;
 481        u32 val;
 482
 483        /* Configure SDB */
 484        val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
 485        tmp = cpu_to_le32(val);
 486        roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
 487                       ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
 488        roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
 489                       ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
 490        val = le32_to_cpu(tmp);
 491        roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
 492}
 493
 494static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
 495                             u32 odb_alful)
 496{
 497        __le32 tmp;
 498        u32 val;
 499
 500        /* Configure ODB */
 501        val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
 502        tmp = cpu_to_le32(val);
 503        roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
 504                       ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
 505        roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
 506                       ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
 507        val = le32_to_cpu(tmp);
 508        roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
 509}
 510
 511static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
 512                                 u32 ext_sdb_alful)
 513{
 514        struct device *dev = &hr_dev->pdev->dev;
 515        struct hns_roce_v1_priv *priv;
 516        struct hns_roce_db_table *db;
 517        dma_addr_t sdb_dma_addr;
 518        __le32 tmp;
 519        u32 val;
 520
 521        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 522        db = &priv->db_table;
 523
 524        /* Configure extend SDB threshold */
 525        roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
 526        roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
 527
 528        /* Configure extend SDB base addr */
 529        sdb_dma_addr = db->ext_db->sdb_buf_list->map;
 530        roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
 531
 532        /* Configure extend SDB depth */
 533        val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
 534        tmp = cpu_to_le32(val);
 535        roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
 536                       ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
 537                       db->ext_db->esdb_dep);
 538        /*
 539         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
 540         * using 4K page, and shift more 32 because of
 541         * caculating the high 32 bit value evaluated to hardware.
 542         */
 543        roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
 544                       ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
 545        val = le32_to_cpu(tmp);
 546        roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
 547
 548        dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
 549        dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
 550                ext_sdb_alept, ext_sdb_alful);
 551}
 552
 553static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
 554                                 u32 ext_odb_alful)
 555{
 556        struct device *dev = &hr_dev->pdev->dev;
 557        struct hns_roce_v1_priv *priv;
 558        struct hns_roce_db_table *db;
 559        dma_addr_t odb_dma_addr;
 560        __le32 tmp;
 561        u32 val;
 562
 563        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 564        db = &priv->db_table;
 565
 566        /* Configure extend ODB threshold */
 567        roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
 568        roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
 569
 570        /* Configure extend ODB base addr */
 571        odb_dma_addr = db->ext_db->odb_buf_list->map;
 572        roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
 573
 574        /* Configure extend ODB depth */
 575        val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
 576        tmp = cpu_to_le32(val);
 577        roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
 578                       ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
 579                       db->ext_db->eodb_dep);
 580        roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
 581                       ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
 582                       db->ext_db->eodb_dep);
 583        val = le32_to_cpu(tmp);
 584        roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
 585
 586        dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
 587        dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
 588                ext_odb_alept, ext_odb_alful);
 589}
 590
 591static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
 592                                u32 odb_ext_mod)
 593{
 594        struct device *dev = &hr_dev->pdev->dev;
 595        struct hns_roce_v1_priv *priv;
 596        struct hns_roce_db_table *db;
 597        dma_addr_t sdb_dma_addr;
 598        dma_addr_t odb_dma_addr;
 599        int ret = 0;
 600
 601        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 602        db = &priv->db_table;
 603
 604        db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
 605        if (!db->ext_db)
 606                return -ENOMEM;
 607
 608        if (sdb_ext_mod) {
 609                db->ext_db->sdb_buf_list = kmalloc(
 610                                sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
 611                if (!db->ext_db->sdb_buf_list) {
 612                        ret = -ENOMEM;
 613                        goto ext_sdb_buf_fail_out;
 614                }
 615
 616                db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
 617                                                     HNS_ROCE_V1_EXT_SDB_SIZE,
 618                                                     &sdb_dma_addr, GFP_KERNEL);
 619                if (!db->ext_db->sdb_buf_list->buf) {
 620                        ret = -ENOMEM;
 621                        goto alloc_sq_db_buf_fail;
 622                }
 623                db->ext_db->sdb_buf_list->map = sdb_dma_addr;
 624
 625                db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
 626                hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
 627                                     HNS_ROCE_V1_EXT_SDB_ALFUL);
 628        } else
 629                hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
 630                                 HNS_ROCE_V1_SDB_ALFUL);
 631
 632        if (odb_ext_mod) {
 633                db->ext_db->odb_buf_list = kmalloc(
 634                                sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
 635                if (!db->ext_db->odb_buf_list) {
 636                        ret = -ENOMEM;
 637                        goto ext_odb_buf_fail_out;
 638                }
 639
 640                db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
 641                                                     HNS_ROCE_V1_EXT_ODB_SIZE,
 642                                                     &odb_dma_addr, GFP_KERNEL);
 643                if (!db->ext_db->odb_buf_list->buf) {
 644                        ret = -ENOMEM;
 645                        goto alloc_otr_db_buf_fail;
 646                }
 647                db->ext_db->odb_buf_list->map = odb_dma_addr;
 648
 649                db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
 650                hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
 651                                     HNS_ROCE_V1_EXT_ODB_ALFUL);
 652        } else
 653                hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
 654                                 HNS_ROCE_V1_ODB_ALFUL);
 655
 656        hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
 657
 658        return 0;
 659
 660alloc_otr_db_buf_fail:
 661        kfree(db->ext_db->odb_buf_list);
 662
 663ext_odb_buf_fail_out:
 664        if (sdb_ext_mod) {
 665                dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
 666                                  db->ext_db->sdb_buf_list->buf,
 667                                  db->ext_db->sdb_buf_list->map);
 668        }
 669
 670alloc_sq_db_buf_fail:
 671        if (sdb_ext_mod)
 672                kfree(db->ext_db->sdb_buf_list);
 673
 674ext_sdb_buf_fail_out:
 675        kfree(db->ext_db);
 676        return ret;
 677}
 678
 679static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
 680                                                    struct ib_pd *pd)
 681{
 682        struct device *dev = &hr_dev->pdev->dev;
 683        struct ib_qp_init_attr init_attr;
 684        struct ib_qp *qp;
 685
 686        memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
 687        init_attr.qp_type               = IB_QPT_RC;
 688        init_attr.sq_sig_type           = IB_SIGNAL_ALL_WR;
 689        init_attr.cap.max_recv_wr       = HNS_ROCE_MIN_WQE_NUM;
 690        init_attr.cap.max_send_wr       = HNS_ROCE_MIN_WQE_NUM;
 691
 692        qp = hns_roce_create_qp(pd, &init_attr, NULL);
 693        if (IS_ERR(qp)) {
 694                dev_err(dev, "Create loop qp for mr free failed!");
 695                return NULL;
 696        }
 697
 698        return to_hr_qp(qp);
 699}
 700
 701static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
 702{
 703        struct hns_roce_caps *caps = &hr_dev->caps;
 704        struct device *dev = &hr_dev->pdev->dev;
 705        struct ib_cq_init_attr cq_init_attr;
 706        struct hns_roce_free_mr *free_mr;
 707        struct ib_qp_attr attr = { 0 };
 708        struct hns_roce_v1_priv *priv;
 709        struct hns_roce_qp *hr_qp;
 710        struct ib_device *ibdev;
 711        struct ib_cq *cq;
 712        struct ib_pd *pd;
 713        union ib_gid dgid;
 714        __be64 subnet_prefix;
 715        int attr_mask = 0;
 716        int ret;
 717        int i, j;
 718        u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
 719        u8 phy_port;
 720        u8 port = 0;
 721        u8 sl;
 722
 723        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 724        free_mr = &priv->free_mr;
 725
 726        /* Reserved cq for loop qp */
 727        cq_init_attr.cqe                = HNS_ROCE_MIN_WQE_NUM * 2;
 728        cq_init_attr.comp_vector        = 0;
 729
 730        ibdev = &hr_dev->ib_dev;
 731        cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
 732        if (!cq)
 733                return -ENOMEM;
 734
 735        ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
 736        if (ret) {
 737                dev_err(dev, "Create cq for reserved loop qp failed!");
 738                goto alloc_cq_failed;
 739        }
 740        free_mr->mr_free_cq = to_hr_cq(cq);
 741        free_mr->mr_free_cq->ib_cq.device               = &hr_dev->ib_dev;
 742        free_mr->mr_free_cq->ib_cq.uobject              = NULL;
 743        free_mr->mr_free_cq->ib_cq.comp_handler         = NULL;
 744        free_mr->mr_free_cq->ib_cq.event_handler        = NULL;
 745        free_mr->mr_free_cq->ib_cq.cq_context           = NULL;
 746        atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
 747
 748        pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
 749        if (!pd) {
 750                ret = -ENOMEM;
 751                goto alloc_mem_failed;
 752        }
 753
 754        pd->device  = ibdev;
 755        ret = hns_roce_alloc_pd(pd, NULL);
 756        if (ret)
 757                goto alloc_pd_failed;
 758
 759        free_mr->mr_free_pd = to_hr_pd(pd);
 760        free_mr->mr_free_pd->ibpd.device  = &hr_dev->ib_dev;
 761        free_mr->mr_free_pd->ibpd.uobject = NULL;
 762        free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
 763        atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
 764
 765        attr.qp_access_flags    = IB_ACCESS_REMOTE_WRITE;
 766        attr.pkey_index         = 0;
 767        attr.min_rnr_timer      = 0;
 768        /* Disable read ability */
 769        attr.max_dest_rd_atomic = 0;
 770        attr.max_rd_atomic      = 0;
 771        /* Use arbitrary values as rq_psn and sq_psn */
 772        attr.rq_psn             = 0x0808;
 773        attr.sq_psn             = 0x0808;
 774        attr.retry_cnt          = 7;
 775        attr.rnr_retry          = 7;
 776        attr.timeout            = 0x12;
 777        attr.path_mtu           = IB_MTU_256;
 778        attr.ah_attr.type       = RDMA_AH_ATTR_TYPE_ROCE;
 779        rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
 780        rdma_ah_set_static_rate(&attr.ah_attr, 3);
 781
 782        subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
 783        for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
 784                phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
 785                                (i % HNS_ROCE_MAX_PORTS);
 786                sl = i / HNS_ROCE_MAX_PORTS;
 787
 788                for (j = 0; j < caps->num_ports; j++) {
 789                        if (hr_dev->iboe.phy_port[j] == phy_port) {
 790                                queue_en[i] = 1;
 791                                port = j;
 792                                break;
 793                        }
 794                }
 795
 796                if (!queue_en[i])
 797                        continue;
 798
 799                free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
 800                if (!free_mr->mr_free_qp[i]) {
 801                        dev_err(dev, "Create loop qp failed!\n");
 802                        ret = -ENOMEM;
 803                        goto create_lp_qp_failed;
 804                }
 805                hr_qp = free_mr->mr_free_qp[i];
 806
 807                hr_qp->port             = port;
 808                hr_qp->phy_port         = phy_port;
 809                hr_qp->ibqp.qp_type     = IB_QPT_RC;
 810                hr_qp->ibqp.device      = &hr_dev->ib_dev;
 811                hr_qp->ibqp.uobject     = NULL;
 812                atomic_set(&hr_qp->ibqp.usecnt, 0);
 813                hr_qp->ibqp.pd          = pd;
 814                hr_qp->ibqp.recv_cq     = cq;
 815                hr_qp->ibqp.send_cq     = cq;
 816
 817                rdma_ah_set_port_num(&attr.ah_attr, port + 1);
 818                rdma_ah_set_sl(&attr.ah_attr, sl);
 819                attr.port_num           = port + 1;
 820
 821                attr.dest_qp_num        = hr_qp->qpn;
 822                memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
 823                       hr_dev->dev_addr[port],
 824                       ETH_ALEN);
 825
 826                memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
 827                memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
 828                memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
 829                dgid.raw[11] = 0xff;
 830                dgid.raw[12] = 0xfe;
 831                dgid.raw[8] ^= 2;
 832                rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
 833
 834                ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
 835                                            IB_QPS_RESET, IB_QPS_INIT);
 836                if (ret) {
 837                        dev_err(dev, "modify qp failed(%d)!\n", ret);
 838                        goto create_lp_qp_failed;
 839                }
 840
 841                ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
 842                                            IB_QPS_INIT, IB_QPS_RTR);
 843                if (ret) {
 844                        dev_err(dev, "modify qp failed(%d)!\n", ret);
 845                        goto create_lp_qp_failed;
 846                }
 847
 848                ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
 849                                            IB_QPS_RTR, IB_QPS_RTS);
 850                if (ret) {
 851                        dev_err(dev, "modify qp failed(%d)!\n", ret);
 852                        goto create_lp_qp_failed;
 853                }
 854        }
 855
 856        return 0;
 857
 858create_lp_qp_failed:
 859        for (i -= 1; i >= 0; i--) {
 860                hr_qp = free_mr->mr_free_qp[i];
 861                if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
 862                        dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
 863        }
 864
 865        hns_roce_dealloc_pd(pd, NULL);
 866
 867alloc_pd_failed:
 868        kfree(pd);
 869
 870alloc_mem_failed:
 871        hns_roce_destroy_cq(cq, NULL);
 872alloc_cq_failed:
 873        kfree(cq);
 874        return ret;
 875}
 876
 877static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
 878{
 879        struct device *dev = &hr_dev->pdev->dev;
 880        struct hns_roce_free_mr *free_mr;
 881        struct hns_roce_v1_priv *priv;
 882        struct hns_roce_qp *hr_qp;
 883        int ret;
 884        int i;
 885
 886        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 887        free_mr = &priv->free_mr;
 888
 889        for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
 890                hr_qp = free_mr->mr_free_qp[i];
 891                if (!hr_qp)
 892                        continue;
 893
 894                ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
 895                if (ret)
 896                        dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
 897                                i, ret);
 898        }
 899
 900        hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
 901        kfree(&free_mr->mr_free_cq->ib_cq);
 902        hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
 903        kfree(&free_mr->mr_free_pd->ibpd);
 904}
 905
 906static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
 907{
 908        struct device *dev = &hr_dev->pdev->dev;
 909        struct hns_roce_v1_priv *priv;
 910        struct hns_roce_db_table *db;
 911        u32 sdb_ext_mod;
 912        u32 odb_ext_mod;
 913        u32 sdb_evt_mod;
 914        u32 odb_evt_mod;
 915        int ret = 0;
 916
 917        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 918        db = &priv->db_table;
 919
 920        memset(db, 0, sizeof(*db));
 921
 922        /* Default DB mode */
 923        sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
 924        odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
 925        sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
 926        odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
 927
 928        db->sdb_ext_mod = sdb_ext_mod;
 929        db->odb_ext_mod = odb_ext_mod;
 930
 931        /* Init extend DB */
 932        ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
 933        if (ret) {
 934                dev_err(dev, "Failed in extend DB configuration.\n");
 935                return ret;
 936        }
 937
 938        hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
 939
 940        return 0;
 941}
 942
 943static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
 944{
 945        struct hns_roce_recreate_lp_qp_work *lp_qp_work;
 946        struct hns_roce_dev *hr_dev;
 947
 948        lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
 949                                  work);
 950        hr_dev = to_hr_dev(lp_qp_work->ib_dev);
 951
 952        hns_roce_v1_release_lp_qp(hr_dev);
 953
 954        if (hns_roce_v1_rsv_lp_qp(hr_dev))
 955                dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
 956
 957        if (lp_qp_work->comp_flag)
 958                complete(lp_qp_work->comp);
 959
 960        kfree(lp_qp_work);
 961}
 962
 963static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
 964{
 965        struct device *dev = &hr_dev->pdev->dev;
 966        struct hns_roce_recreate_lp_qp_work *lp_qp_work;
 967        struct hns_roce_free_mr *free_mr;
 968        struct hns_roce_v1_priv *priv;
 969        struct completion comp;
 970        long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
 971
 972        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 973        free_mr = &priv->free_mr;
 974
 975        lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
 976                             GFP_KERNEL);
 977        if (!lp_qp_work)
 978                return -ENOMEM;
 979
 980        INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
 981
 982        lp_qp_work->ib_dev = &(hr_dev->ib_dev);
 983        lp_qp_work->comp = &comp;
 984        lp_qp_work->comp_flag = 1;
 985
 986        init_completion(lp_qp_work->comp);
 987
 988        queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
 989
 990        while (end > 0) {
 991                if (try_wait_for_completion(&comp))
 992                        return 0;
 993                msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
 994                end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
 995        }
 996
 997        lp_qp_work->comp_flag = 0;
 998        if (try_wait_for_completion(&comp))
 999                return 0;
1000
1001        dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
1002        return -ETIMEDOUT;
1003}
1004
1005static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
1006{
1007        struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
1008        struct device *dev = &hr_dev->pdev->dev;
1009        struct ib_send_wr send_wr;
1010        const struct ib_send_wr *bad_wr;
1011        int ret;
1012
1013        memset(&send_wr, 0, sizeof(send_wr));
1014        send_wr.next    = NULL;
1015        send_wr.num_sge = 0;
1016        send_wr.send_flags = 0;
1017        send_wr.sg_list = NULL;
1018        send_wr.wr_id   = (unsigned long long)&send_wr;
1019        send_wr.opcode  = IB_WR_RDMA_WRITE;
1020
1021        ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
1022        if (ret) {
1023                dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
1024                return ret;
1025        }
1026
1027        return 0;
1028}
1029
1030static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1031{
1032        struct hns_roce_mr_free_work *mr_work;
1033        struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
1034        struct hns_roce_free_mr *free_mr;
1035        struct hns_roce_cq *mr_free_cq;
1036        struct hns_roce_v1_priv *priv;
1037        struct hns_roce_dev *hr_dev;
1038        struct hns_roce_mr *hr_mr;
1039        struct hns_roce_qp *hr_qp;
1040        struct device *dev;
1041        unsigned long end =
1042                msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1043        int i;
1044        int ret;
1045        int ne = 0;
1046
1047        mr_work = container_of(work, struct hns_roce_mr_free_work, work);
1048        hr_mr = (struct hns_roce_mr *)mr_work->mr;
1049        hr_dev = to_hr_dev(mr_work->ib_dev);
1050        dev = &hr_dev->pdev->dev;
1051
1052        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1053        free_mr = &priv->free_mr;
1054        mr_free_cq = free_mr->mr_free_cq;
1055
1056        for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
1057                hr_qp = free_mr->mr_free_qp[i];
1058                if (!hr_qp)
1059                        continue;
1060                ne++;
1061
1062                ret = hns_roce_v1_send_lp_wqe(hr_qp);
1063                if (ret) {
1064                        dev_err(dev,
1065                             "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
1066                             hr_qp->qpn, ret);
1067                        goto free_work;
1068                }
1069        }
1070
1071        if (!ne) {
1072                dev_err(dev, "Reserved loop qp is absent!\n");
1073                goto free_work;
1074        }
1075
1076        do {
1077                ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1078                if (ret < 0 && hr_qp) {
1079                        dev_err(dev,
1080                           "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1081                           hr_qp->qpn, ret, hr_mr->key, ne);
1082                        goto free_work;
1083                }
1084                ne -= ret;
1085                usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1086                             (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1087        } while (ne && time_before_eq(jiffies, end));
1088
1089        if (ne != 0)
1090                dev_err(dev,
1091                        "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1092                        hr_mr->key, ne);
1093
1094free_work:
1095        if (mr_work->comp_flag)
1096                complete(mr_work->comp);
1097        kfree(mr_work);
1098}
1099
1100static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
1101                                struct hns_roce_mr *mr, struct ib_udata *udata)
1102{
1103        struct device *dev = &hr_dev->pdev->dev;
1104        struct hns_roce_mr_free_work *mr_work;
1105        struct hns_roce_free_mr *free_mr;
1106        struct hns_roce_v1_priv *priv;
1107        struct completion comp;
1108        long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
1109        unsigned long start = jiffies;
1110        int npages;
1111        int ret = 0;
1112
1113        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1114        free_mr = &priv->free_mr;
1115
1116        if (mr->enabled) {
1117                if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
1118                                            key_to_hw_index(mr->key) &
1119                                            (hr_dev->caps.num_mtpts - 1)))
1120                        dev_warn(dev, "DESTROY_MPT failed!\n");
1121        }
1122
1123        mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1124        if (!mr_work) {
1125                ret = -ENOMEM;
1126                goto free_mr;
1127        }
1128
1129        INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1130
1131        mr_work->ib_dev = &(hr_dev->ib_dev);
1132        mr_work->comp = &comp;
1133        mr_work->comp_flag = 1;
1134        mr_work->mr = (void *)mr;
1135        init_completion(mr_work->comp);
1136
1137        queue_work(free_mr->free_mr_wq, &(mr_work->work));
1138
1139        while (end > 0) {
1140                if (try_wait_for_completion(&comp))
1141                        goto free_mr;
1142                msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1143                end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
1144        }
1145
1146        mr_work->comp_flag = 0;
1147        if (try_wait_for_completion(&comp))
1148                goto free_mr;
1149
1150        dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1151        ret = -ETIMEDOUT;
1152
1153free_mr:
1154        dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1155                mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1156
1157        if (mr->size != ~0ULL) {
1158                npages = ib_umem_page_count(mr->umem);
1159                dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1160                                  mr->pbl_dma_addr);
1161        }
1162
1163        hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1164                             key_to_hw_index(mr->key), 0);
1165
1166        ib_umem_release(mr->umem);
1167
1168        kfree(mr);
1169
1170        return ret;
1171}
1172
1173static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1174{
1175        struct device *dev = &hr_dev->pdev->dev;
1176        struct hns_roce_v1_priv *priv;
1177        struct hns_roce_db_table *db;
1178
1179        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1180        db = &priv->db_table;
1181
1182        if (db->sdb_ext_mod) {
1183                dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1184                                  db->ext_db->sdb_buf_list->buf,
1185                                  db->ext_db->sdb_buf_list->map);
1186                kfree(db->ext_db->sdb_buf_list);
1187        }
1188
1189        if (db->odb_ext_mod) {
1190                dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1191                                  db->ext_db->odb_buf_list->buf,
1192                                  db->ext_db->odb_buf_list->map);
1193                kfree(db->ext_db->odb_buf_list);
1194        }
1195
1196        kfree(db->ext_db);
1197}
1198
1199static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1200{
1201        int ret;
1202        u32 val;
1203        __le32 tmp;
1204        int raq_shift = 0;
1205        dma_addr_t addr;
1206        struct hns_roce_v1_priv *priv;
1207        struct hns_roce_raq_table *raq;
1208        struct device *dev = &hr_dev->pdev->dev;
1209
1210        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1211        raq = &priv->raq_table;
1212
1213        raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1214        if (!raq->e_raq_buf)
1215                return -ENOMEM;
1216
1217        raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1218                                                 &addr, GFP_KERNEL);
1219        if (!raq->e_raq_buf->buf) {
1220                ret = -ENOMEM;
1221                goto err_dma_alloc_raq;
1222        }
1223        raq->e_raq_buf->map = addr;
1224
1225        /* Configure raq extended address. 48bit 4K align*/
1226        roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1227
1228        /* Configure raq_shift */
1229        raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1230        val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1231        tmp = cpu_to_le32(val);
1232        roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1233                       ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1234        /*
1235         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1236         * using 4K page, and shift more 32 because of
1237         * caculating the high 32 bit value evaluated to hardware.
1238         */
1239        roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1240                       ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1241                       raq->e_raq_buf->map >> 44);
1242        val = le32_to_cpu(tmp);
1243        roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1244        dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1245
1246        /* Configure raq threshold */
1247        val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1248        tmp = cpu_to_le32(val);
1249        roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1250                       ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1251                       HNS_ROCE_V1_EXT_RAQ_WF);
1252        val = le32_to_cpu(tmp);
1253        roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1254        dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1255
1256        /* Enable extend raq */
1257        val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1258        tmp = cpu_to_le32(val);
1259        roce_set_field(tmp,
1260                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1261                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1262                       POL_TIME_INTERVAL_VAL);
1263        roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1264        roce_set_field(tmp,
1265                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1266                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1267                       2);
1268        roce_set_bit(tmp,
1269                     ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1270        val = le32_to_cpu(tmp);
1271        roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1272        dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1273
1274        /* Enable raq drop */
1275        val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1276        tmp = cpu_to_le32(val);
1277        roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1278        val = le32_to_cpu(tmp);
1279        roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1280        dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1281
1282        return 0;
1283
1284err_dma_alloc_raq:
1285        kfree(raq->e_raq_buf);
1286        return ret;
1287}
1288
1289static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1290{
1291        struct device *dev = &hr_dev->pdev->dev;
1292        struct hns_roce_v1_priv *priv;
1293        struct hns_roce_raq_table *raq;
1294
1295        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1296        raq = &priv->raq_table;
1297
1298        dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1299                          raq->e_raq_buf->map);
1300        kfree(raq->e_raq_buf);
1301}
1302
1303static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1304{
1305        __le32 tmp;
1306        u32 val;
1307
1308        if (enable_flag) {
1309                val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1310                 /* Open all ports */
1311                tmp = cpu_to_le32(val);
1312                roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1313                               ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1314                               ALL_PORT_VAL_OPEN);
1315                val = le32_to_cpu(tmp);
1316                roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1317        } else {
1318                val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1319                /* Close all ports */
1320                tmp = cpu_to_le32(val);
1321                roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1322                               ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1323                val = le32_to_cpu(tmp);
1324                roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1325        }
1326}
1327
1328static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1329{
1330        struct device *dev = &hr_dev->pdev->dev;
1331        struct hns_roce_v1_priv *priv;
1332        int ret;
1333
1334        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1335
1336        priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1337                HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1338                GFP_KERNEL);
1339        if (!priv->bt_table.qpc_buf.buf)
1340                return -ENOMEM;
1341
1342        priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1343                HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1344                GFP_KERNEL);
1345        if (!priv->bt_table.mtpt_buf.buf) {
1346                ret = -ENOMEM;
1347                goto err_failed_alloc_mtpt_buf;
1348        }
1349
1350        priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1351                HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1352                GFP_KERNEL);
1353        if (!priv->bt_table.cqc_buf.buf) {
1354                ret = -ENOMEM;
1355                goto err_failed_alloc_cqc_buf;
1356        }
1357
1358        return 0;
1359
1360err_failed_alloc_cqc_buf:
1361        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1362                priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1363
1364err_failed_alloc_mtpt_buf:
1365        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1366                priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1367
1368        return ret;
1369}
1370
1371static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1372{
1373        struct device *dev = &hr_dev->pdev->dev;
1374        struct hns_roce_v1_priv *priv;
1375
1376        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1377
1378        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1379                priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1380
1381        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1382                priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1383
1384        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1385                priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1386}
1387
1388static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1389{
1390        struct device *dev = &hr_dev->pdev->dev;
1391        struct hns_roce_buf_list *tptr_buf;
1392        struct hns_roce_v1_priv *priv;
1393
1394        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1395        tptr_buf = &priv->tptr_table.tptr_buf;
1396
1397        /*
1398         * This buffer will be used for CQ's tptr(tail pointer), also
1399         * named ci(customer index). Every CQ will use 2 bytes to save
1400         * cqe ci in hip06. Hardware will read this area to get new ci
1401         * when the queue is almost full.
1402         */
1403        tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1404                                           &tptr_buf->map, GFP_KERNEL);
1405        if (!tptr_buf->buf)
1406                return -ENOMEM;
1407
1408        hr_dev->tptr_dma_addr = tptr_buf->map;
1409        hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1410
1411        return 0;
1412}
1413
1414static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1415{
1416        struct device *dev = &hr_dev->pdev->dev;
1417        struct hns_roce_buf_list *tptr_buf;
1418        struct hns_roce_v1_priv *priv;
1419
1420        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1421        tptr_buf = &priv->tptr_table.tptr_buf;
1422
1423        dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1424                          tptr_buf->buf, tptr_buf->map);
1425}
1426
1427static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1428{
1429        struct device *dev = &hr_dev->pdev->dev;
1430        struct hns_roce_free_mr *free_mr;
1431        struct hns_roce_v1_priv *priv;
1432        int ret = 0;
1433
1434        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1435        free_mr = &priv->free_mr;
1436
1437        free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1438        if (!free_mr->free_mr_wq) {
1439                dev_err(dev, "Create free mr workqueue failed!\n");
1440                return -ENOMEM;
1441        }
1442
1443        ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1444        if (ret) {
1445                dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1446                flush_workqueue(free_mr->free_mr_wq);
1447                destroy_workqueue(free_mr->free_mr_wq);
1448        }
1449
1450        return ret;
1451}
1452
1453static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1454{
1455        struct hns_roce_free_mr *free_mr;
1456        struct hns_roce_v1_priv *priv;
1457
1458        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1459        free_mr = &priv->free_mr;
1460
1461        flush_workqueue(free_mr->free_mr_wq);
1462        destroy_workqueue(free_mr->free_mr_wq);
1463
1464        hns_roce_v1_release_lp_qp(hr_dev);
1465}
1466
1467/**
1468 * hns_roce_v1_reset - reset RoCE
1469 * @hr_dev: RoCE device struct pointer
1470 * @enable: true -- drop reset, false -- reset
1471 * return 0 - success , negative --fail
1472 */
1473static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1474{
1475        struct device_node *dsaf_node;
1476        struct device *dev = &hr_dev->pdev->dev;
1477        struct device_node *np = dev->of_node;
1478        struct fwnode_handle *fwnode;
1479        int ret;
1480
1481        /* check if this is DT/ACPI case */
1482        if (dev_of_node(dev)) {
1483                dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1484                if (!dsaf_node) {
1485                        dev_err(dev, "could not find dsaf-handle\n");
1486                        return -EINVAL;
1487                }
1488                fwnode = &dsaf_node->fwnode;
1489        } else if (is_acpi_device_node(dev->fwnode)) {
1490                struct fwnode_reference_args args;
1491
1492                ret = acpi_node_get_property_reference(dev->fwnode,
1493                                                       "dsaf-handle", 0, &args);
1494                if (ret) {
1495                        dev_err(dev, "could not find dsaf-handle\n");
1496                        return ret;
1497                }
1498                fwnode = args.fwnode;
1499        } else {
1500                dev_err(dev, "cannot read data from DT or ACPI\n");
1501                return -ENXIO;
1502        }
1503
1504        ret = hns_dsaf_roce_reset(fwnode, false);
1505        if (ret)
1506                return ret;
1507
1508        if (dereset) {
1509                msleep(SLEEP_TIME_INTERVAL);
1510                ret = hns_dsaf_roce_reset(fwnode, true);
1511        }
1512
1513        return ret;
1514}
1515
1516static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1517{
1518        int i = 0;
1519        struct hns_roce_caps *caps = &hr_dev->caps;
1520
1521        hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
1522        hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
1523        hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
1524                                ((u64)roce_read(hr_dev,
1525                                            ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
1526        hr_dev->hw_rev          = HNS_ROCE_HW_VER1;
1527
1528        caps->num_qps           = HNS_ROCE_V1_MAX_QP_NUM;
1529        caps->max_wqes          = HNS_ROCE_V1_MAX_WQE_NUM;
1530        caps->min_wqes          = HNS_ROCE_MIN_WQE_NUM;
1531        caps->num_cqs           = HNS_ROCE_V1_MAX_CQ_NUM;
1532        caps->min_cqes          = HNS_ROCE_MIN_CQE_NUM;
1533        caps->max_cqes          = HNS_ROCE_V1_MAX_CQE_NUM;
1534        caps->max_sq_sg         = HNS_ROCE_V1_SG_NUM;
1535        caps->max_rq_sg         = HNS_ROCE_V1_SG_NUM;
1536        caps->max_sq_inline     = HNS_ROCE_V1_INLINE_SIZE;
1537        caps->num_uars          = HNS_ROCE_V1_UAR_NUM;
1538        caps->phy_num_uars      = HNS_ROCE_V1_PHY_UAR_NUM;
1539        caps->num_aeq_vectors   = HNS_ROCE_V1_AEQE_VEC_NUM;
1540        caps->num_comp_vectors  = HNS_ROCE_V1_COMP_VEC_NUM;
1541        caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
1542        caps->num_mtpts         = HNS_ROCE_V1_MAX_MTPT_NUM;
1543        caps->num_mtt_segs      = HNS_ROCE_V1_MAX_MTT_SEGS;
1544        caps->num_pds           = HNS_ROCE_V1_MAX_PD_NUM;
1545        caps->max_qp_init_rdma  = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1546        caps->max_qp_dest_rdma  = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1547        caps->max_sq_desc_sz    = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1548        caps->max_rq_desc_sz    = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1549        caps->qpc_entry_sz      = HNS_ROCE_V1_QPC_ENTRY_SIZE;
1550        caps->irrl_entry_sz     = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1551        caps->cqc_entry_sz      = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1552        caps->mtpt_entry_sz     = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1553        caps->mtt_entry_sz      = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1554        caps->cq_entry_sz       = HNS_ROCE_V1_CQE_ENTRY_SIZE;
1555        caps->page_size_cap     = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1556        caps->reserved_lkey     = 0;
1557        caps->reserved_pds      = 0;
1558        caps->reserved_mrws     = 1;
1559        caps->reserved_uars     = 0;
1560        caps->reserved_cqs      = 0;
1561        caps->reserved_qps      = 12; /* 2 SQP per port, six ports total 12 */
1562        caps->chunk_sz          = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
1563
1564        for (i = 0; i < caps->num_ports; i++)
1565                caps->pkey_table_len[i] = 1;
1566
1567        for (i = 0; i < caps->num_ports; i++) {
1568                /* Six ports shared 16 GID in v1 engine */
1569                if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1570                        caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1571                                                 caps->num_ports;
1572                else
1573                        caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1574                                                 caps->num_ports + 1;
1575        }
1576
1577        caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
1578        caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
1579        caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
1580        caps->max_mtu = IB_MTU_2048;
1581
1582        return 0;
1583}
1584
1585static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1586{
1587        int ret;
1588        u32 val;
1589        __le32 tmp;
1590        struct device *dev = &hr_dev->pdev->dev;
1591
1592        /* DMAE user config */
1593        val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1594        tmp = cpu_to_le32(val);
1595        roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1596                       ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1597        roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1598                       ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1599                       1 << PAGES_SHIFT_16);
1600        val = le32_to_cpu(tmp);
1601        roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1602
1603        val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1604        tmp = cpu_to_le32(val);
1605        roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1606                       ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1607        roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1608                       ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1609                       1 << PAGES_SHIFT_16);
1610
1611        ret = hns_roce_db_init(hr_dev);
1612        if (ret) {
1613                dev_err(dev, "doorbell init failed!\n");
1614                return ret;
1615        }
1616
1617        ret = hns_roce_raq_init(hr_dev);
1618        if (ret) {
1619                dev_err(dev, "raq init failed!\n");
1620                goto error_failed_raq_init;
1621        }
1622
1623        ret = hns_roce_bt_init(hr_dev);
1624        if (ret) {
1625                dev_err(dev, "bt init failed!\n");
1626                goto error_failed_bt_init;
1627        }
1628
1629        ret = hns_roce_tptr_init(hr_dev);
1630        if (ret) {
1631                dev_err(dev, "tptr init failed!\n");
1632                goto error_failed_tptr_init;
1633        }
1634
1635        ret = hns_roce_free_mr_init(hr_dev);
1636        if (ret) {
1637                dev_err(dev, "free mr init failed!\n");
1638                goto error_failed_free_mr_init;
1639        }
1640
1641        hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1642
1643        return 0;
1644
1645error_failed_free_mr_init:
1646        hns_roce_tptr_free(hr_dev);
1647
1648error_failed_tptr_init:
1649        hns_roce_bt_free(hr_dev);
1650
1651error_failed_bt_init:
1652        hns_roce_raq_free(hr_dev);
1653
1654error_failed_raq_init:
1655        hns_roce_db_free(hr_dev);
1656        return ret;
1657}
1658
1659static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1660{
1661        hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1662        hns_roce_free_mr_free(hr_dev);
1663        hns_roce_tptr_free(hr_dev);
1664        hns_roce_bt_free(hr_dev);
1665        hns_roce_raq_free(hr_dev);
1666        hns_roce_db_free(hr_dev);
1667}
1668
1669static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
1670{
1671        u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
1672
1673        return (!!(status & (1 << HCR_GO_BIT)));
1674}
1675
1676static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1677                                 u64 out_param, u32 in_modifier, u8 op_modifier,
1678                                 u16 op, u16 token, int event)
1679{
1680        u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
1681        unsigned long end;
1682        u32 val = 0;
1683        __le32 tmp;
1684
1685        end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
1686        while (hns_roce_v1_cmd_pending(hr_dev)) {
1687                if (time_after(jiffies, end)) {
1688                        dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
1689                                (int)jiffies, (int)end);
1690                        return -EAGAIN;
1691                }
1692                cond_resched();
1693        }
1694
1695        tmp = cpu_to_le32(val);
1696        roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
1697                       op);
1698        roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
1699                       ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
1700        roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
1701        roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
1702        roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
1703                       ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
1704
1705        val = le32_to_cpu(tmp);
1706        writeq(in_param, hcr + 0);
1707        writeq(out_param, hcr + 2);
1708        writel(in_modifier, hcr + 4);
1709        /* Memory barrier */
1710        wmb();
1711
1712        writel(val, hcr + 5);
1713
1714        return 0;
1715}
1716
1717static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
1718                                unsigned long timeout)
1719{
1720        u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
1721        unsigned long end = 0;
1722        u32 status = 0;
1723
1724        end = msecs_to_jiffies(timeout) + jiffies;
1725        while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
1726                cond_resched();
1727
1728        if (hns_roce_v1_cmd_pending(hr_dev)) {
1729                dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1730                return -ETIMEDOUT;
1731        }
1732
1733        status = le32_to_cpu((__force __le32)
1734                              __raw_readl(hcr + HCR_STATUS_OFFSET));
1735        if ((status & STATUS_MASK) != 0x1) {
1736                dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
1737                return -EBUSY;
1738        }
1739
1740        return 0;
1741}
1742
1743static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1744                               int gid_index, const union ib_gid *gid,
1745                               const struct ib_gid_attr *attr)
1746{
1747        unsigned long flags;
1748        u32 *p = NULL;
1749        u8 gid_idx = 0;
1750
1751        gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1752
1753        spin_lock_irqsave(&hr_dev->iboe.lock, flags);
1754
1755        p = (u32 *)&gid->raw[0];
1756        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1757                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1758
1759        p = (u32 *)&gid->raw[4];
1760        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1761                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1762
1763        p = (u32 *)&gid->raw[8];
1764        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1765                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1766
1767        p = (u32 *)&gid->raw[0xc];
1768        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1769                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1770
1771        spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
1772
1773        return 0;
1774}
1775
1776static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1777                               u8 *addr)
1778{
1779        u32 reg_smac_l;
1780        u16 reg_smac_h;
1781        __le32 tmp;
1782        u16 *p_h;
1783        u32 *p;
1784        u32 val;
1785
1786        /*
1787         * When mac changed, loopback may fail
1788         * because of smac not equal to dmac.
1789         * We Need to release and create reserved qp again.
1790         */
1791        if (hr_dev->hw->dereg_mr) {
1792                int ret;
1793
1794                ret = hns_roce_v1_recreate_lp_qp(hr_dev);
1795                if (ret && ret != -ETIMEDOUT)
1796                        return ret;
1797        }
1798
1799        p = (u32 *)(&addr[0]);
1800        reg_smac_l = *p;
1801        roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1802                       PHY_PORT_OFFSET * phy_port);
1803
1804        val = roce_read(hr_dev,
1805                        ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1806        tmp = cpu_to_le32(val);
1807        p_h = (u16 *)(&addr[4]);
1808        reg_smac_h  = *p_h;
1809        roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1810                       ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1811        val = le32_to_cpu(tmp);
1812        roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1813                   val);
1814
1815        return 0;
1816}
1817
1818static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1819                                enum ib_mtu mtu)
1820{
1821        __le32 tmp;
1822        u32 val;
1823
1824        val = roce_read(hr_dev,
1825                        ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1826        tmp = cpu_to_le32(val);
1827        roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1828                       ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1829        val = le32_to_cpu(tmp);
1830        roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1831                   val);
1832}
1833
1834static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1835                                  unsigned long mtpt_idx)
1836{
1837        struct hns_roce_v1_mpt_entry *mpt_entry;
1838        struct sg_dma_page_iter sg_iter;
1839        u64 *pages;
1840        int i;
1841
1842        /* MPT filled into mailbox buf */
1843        mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1844        memset(mpt_entry, 0, sizeof(*mpt_entry));
1845
1846        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1847                       MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1848        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1849                       MPT_BYTE_4_KEY_S, mr->key);
1850        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1851                       MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1852        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1853        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1854                     (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1855        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1856        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1857                       MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1858        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1859        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1860                     (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1861        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1862                     (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1863        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1864                     (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1865        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1866                     0);
1867        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1868
1869        roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1870                       MPT_BYTE_12_PBL_ADDR_H_S, 0);
1871        roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1872                       MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1873
1874        mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
1875        mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
1876        mpt_entry->length = cpu_to_le32((u32)mr->size);
1877
1878        roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1879                       MPT_BYTE_28_PD_S, mr->pd);
1880        roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1881                       MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1882        roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1883                       MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1884
1885        /* DMA memory register */
1886        if (mr->type == MR_TYPE_DMA)
1887                return 0;
1888
1889        pages = (u64 *) __get_free_page(GFP_KERNEL);
1890        if (!pages)
1891                return -ENOMEM;
1892
1893        i = 0;
1894        for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
1895                pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12;
1896
1897                /* Directly record to MTPT table firstly 7 entry */
1898                if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
1899                        break;
1900                i++;
1901        }
1902
1903        /* Register user mr */
1904        for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
1905                switch (i) {
1906                case 0:
1907                        mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1908                        roce_set_field(mpt_entry->mpt_byte_36,
1909                                MPT_BYTE_36_PA0_H_M,
1910                                MPT_BYTE_36_PA0_H_S,
1911                                (u32)(pages[i] >> PAGES_SHIFT_32));
1912                        break;
1913                case 1:
1914                        roce_set_field(mpt_entry->mpt_byte_36,
1915                                       MPT_BYTE_36_PA1_L_M,
1916                                       MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
1917                        roce_set_field(mpt_entry->mpt_byte_40,
1918                                MPT_BYTE_40_PA1_H_M,
1919                                MPT_BYTE_40_PA1_H_S,
1920                                (u32)(pages[i] >> PAGES_SHIFT_24));
1921                        break;
1922                case 2:
1923                        roce_set_field(mpt_entry->mpt_byte_40,
1924                                       MPT_BYTE_40_PA2_L_M,
1925                                       MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
1926                        roce_set_field(mpt_entry->mpt_byte_44,
1927                                MPT_BYTE_44_PA2_H_M,
1928                                MPT_BYTE_44_PA2_H_S,
1929                                (u32)(pages[i] >> PAGES_SHIFT_16));
1930                        break;
1931                case 3:
1932                        roce_set_field(mpt_entry->mpt_byte_44,
1933                                       MPT_BYTE_44_PA3_L_M,
1934                                       MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
1935                        roce_set_field(mpt_entry->mpt_byte_48,
1936                                MPT_BYTE_48_PA3_H_M,
1937                                MPT_BYTE_48_PA3_H_S,
1938                                (u32)(pages[i] >> PAGES_SHIFT_8));
1939                        break;
1940                case 4:
1941                        mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1942                        roce_set_field(mpt_entry->mpt_byte_56,
1943                                MPT_BYTE_56_PA4_H_M,
1944                                MPT_BYTE_56_PA4_H_S,
1945                                (u32)(pages[i] >> PAGES_SHIFT_32));
1946                        break;
1947                case 5:
1948                        roce_set_field(mpt_entry->mpt_byte_56,
1949                                       MPT_BYTE_56_PA5_L_M,
1950                                       MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
1951                        roce_set_field(mpt_entry->mpt_byte_60,
1952                                MPT_BYTE_60_PA5_H_M,
1953                                MPT_BYTE_60_PA5_H_S,
1954                                (u32)(pages[i] >> PAGES_SHIFT_24));
1955                        break;
1956                case 6:
1957                        roce_set_field(mpt_entry->mpt_byte_60,
1958                                       MPT_BYTE_60_PA6_L_M,
1959                                       MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
1960                        roce_set_field(mpt_entry->mpt_byte_64,
1961                                MPT_BYTE_64_PA6_H_M,
1962                                MPT_BYTE_64_PA6_H_S,
1963                                (u32)(pages[i] >> PAGES_SHIFT_16));
1964                        break;
1965                default:
1966                        break;
1967                }
1968        }
1969
1970        free_page((unsigned long) pages);
1971
1972        mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
1973
1974        roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1975                       MPT_BYTE_12_PBL_ADDR_H_S,
1976                       ((u32)(mr->pbl_dma_addr >> 32)));
1977
1978        return 0;
1979}
1980
1981static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1982{
1983        return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
1984}
1985
1986static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1987{
1988        struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1989
1990        /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1991        return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1992                !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
1993}
1994
1995static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1996{
1997        return get_sw_cqe(hr_cq, hr_cq->cons_index);
1998}
1999
2000static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2001{
2002        __le32 doorbell[2];
2003
2004        doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
2005        doorbell[1] = 0;
2006        roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2007        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2008                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2009        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2010                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
2011        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2012                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
2013
2014        hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2015}
2016
2017static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2018                                   struct hns_roce_srq *srq)
2019{
2020        struct hns_roce_cqe *cqe, *dest;
2021        u32 prod_index;
2022        int nfreed = 0;
2023        u8 owner_bit;
2024
2025        for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
2026             ++prod_index) {
2027                if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2028                        break;
2029        }
2030
2031        /*
2032         * Now backwards through the CQ, removing CQ entries
2033         * that match our QP by overwriting them with next entries.
2034         */
2035        while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2036                cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2037                if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2038                                     CQE_BYTE_16_LOCAL_QPN_S) &
2039                                     HNS_ROCE_CQE_QPN_MASK) == qpn) {
2040                        /* In v1 engine, not support SRQ */
2041                        ++nfreed;
2042                } else if (nfreed) {
2043                        dest = get_cqe(hr_cq, (prod_index + nfreed) &
2044                                       hr_cq->ib_cq.cqe);
2045                        owner_bit = roce_get_bit(dest->cqe_byte_4,
2046                                                 CQE_BYTE_4_OWNER_S);
2047                        memcpy(dest, cqe, sizeof(*cqe));
2048                        roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
2049                                     owner_bit);
2050                }
2051        }
2052
2053        if (nfreed) {
2054                hr_cq->cons_index += nfreed;
2055                /*
2056                 * Make sure update of buffer contents is done before
2057                 * updating consumer index.
2058                 */
2059                wmb();
2060
2061                hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2062        }
2063}
2064
2065static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2066                                 struct hns_roce_srq *srq)
2067{
2068        spin_lock_irq(&hr_cq->lock);
2069        __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
2070        spin_unlock_irq(&hr_cq->lock);
2071}
2072
2073static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
2074                                  struct hns_roce_cq *hr_cq, void *mb_buf,
2075                                  u64 *mtts, dma_addr_t dma_handle)
2076{
2077        struct hns_roce_cq_context *cq_context = NULL;
2078        struct hns_roce_buf_list *tptr_buf;
2079        struct hns_roce_v1_priv *priv;
2080        dma_addr_t tptr_dma_addr;
2081        int offset;
2082
2083        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2084        tptr_buf = &priv->tptr_table.tptr_buf;
2085
2086        cq_context = mb_buf;
2087        memset(cq_context, 0, sizeof(*cq_context));
2088
2089        /* Get the tptr for this CQ. */
2090        offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
2091        tptr_dma_addr = tptr_buf->map + offset;
2092        hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
2093
2094        /* Register cq_context members */
2095        roce_set_field(cq_context->cqc_byte_4,
2096                       CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
2097                       CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
2098        roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
2099                       CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
2100
2101        cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
2102
2103        roce_set_field(cq_context->cqc_byte_12,
2104                       CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
2105                       CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
2106                       ((u64)dma_handle >> 32));
2107        roce_set_field(cq_context->cqc_byte_12,
2108                       CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
2109                       CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
2110                       ilog2(hr_cq->cq_depth));
2111        roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
2112                       CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
2113
2114        cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
2115
2116        roce_set_field(cq_context->cqc_byte_20,
2117                       CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
2118                       CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
2119        /* Dedicated hardware, directly set 0 */
2120        roce_set_field(cq_context->cqc_byte_20,
2121                       CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
2122                       CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
2123        /**
2124         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
2125         * using 4K page, and shift more 32 because of
2126         * caculating the high 32 bit value evaluated to hardware.
2127         */
2128        roce_set_field(cq_context->cqc_byte_20,
2129                       CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2130                       CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
2131                       tptr_dma_addr >> 44);
2132
2133        cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
2134
2135        roce_set_field(cq_context->cqc_byte_32,
2136                       CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2137                       CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2138        roce_set_bit(cq_context->cqc_byte_32,
2139                     CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2140        roce_set_bit(cq_context->cqc_byte_32,
2141                     CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2142        roce_set_bit(cq_context->cqc_byte_32,
2143                     CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2144        roce_set_bit(cq_context->cqc_byte_32,
2145                     CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2146                     0);
2147        /* The initial value of cq's ci is 0 */
2148        roce_set_field(cq_context->cqc_byte_32,
2149                       CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2150                       CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2151}
2152
2153static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2154{
2155        return -EOPNOTSUPP;
2156}
2157
2158static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
2159                                     enum ib_cq_notify_flags flags)
2160{
2161        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2162        u32 notification_flag;
2163        __le32 doorbell[2] = {};
2164
2165        notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2166                            IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2167        /*
2168         * flags = 0; Notification Flag = 1, next
2169         * flags = 1; Notification Flag = 0, solocited
2170         */
2171        doorbell[0] =
2172                cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2173        roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2174        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2175                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2176        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2177                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2178        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2179                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2180                       hr_cq->cqn | notification_flag);
2181
2182        hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2183
2184        return 0;
2185}
2186
2187static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2188                                struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2189{
2190        int qpn;
2191        int is_send;
2192        u16 wqe_ctr;
2193        u32 status;
2194        u32 opcode;
2195        struct hns_roce_cqe *cqe;
2196        struct hns_roce_qp *hr_qp;
2197        struct hns_roce_wq *wq;
2198        struct hns_roce_wqe_ctrl_seg *sq_wqe;
2199        struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2200        struct device *dev = &hr_dev->pdev->dev;
2201
2202        /* Find cqe according consumer index */
2203        cqe = next_cqe_sw(hr_cq);
2204        if (!cqe)
2205                return -EAGAIN;
2206
2207        ++hr_cq->cons_index;
2208        /* Memory barrier */
2209        rmb();
2210        /* 0->SQ, 1->RQ */
2211        is_send  = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2212
2213        /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2214        if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2215                           CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2216                qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2217                                     CQE_BYTE_20_PORT_NUM_S) +
2218                      roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2219                                     CQE_BYTE_16_LOCAL_QPN_S) *
2220                                     HNS_ROCE_MAX_PORTS;
2221        } else {
2222                qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2223                                     CQE_BYTE_16_LOCAL_QPN_S);
2224        }
2225
2226        if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2227                hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2228                if (unlikely(!hr_qp)) {
2229                        dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2230                                hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2231                        return -EINVAL;
2232                }
2233
2234                *cur_qp = hr_qp;
2235        }
2236
2237        wc->qp = &(*cur_qp)->ibqp;
2238        wc->vendor_err = 0;
2239
2240        status = roce_get_field(cqe->cqe_byte_4,
2241                                CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2242                                CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2243                                HNS_ROCE_CQE_STATUS_MASK;
2244        switch (status) {
2245        case HNS_ROCE_CQE_SUCCESS:
2246                wc->status = IB_WC_SUCCESS;
2247                break;
2248        case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2249                wc->status = IB_WC_LOC_LEN_ERR;
2250                break;
2251        case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2252                wc->status = IB_WC_LOC_QP_OP_ERR;
2253                break;
2254        case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2255                wc->status = IB_WC_LOC_PROT_ERR;
2256                break;
2257        case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2258                wc->status = IB_WC_WR_FLUSH_ERR;
2259                break;
2260        case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2261                wc->status = IB_WC_MW_BIND_ERR;
2262                break;
2263        case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2264                wc->status = IB_WC_BAD_RESP_ERR;
2265                break;
2266        case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2267                wc->status = IB_WC_LOC_ACCESS_ERR;
2268                break;
2269        case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2270                wc->status = IB_WC_REM_INV_REQ_ERR;
2271                break;
2272        case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2273                wc->status = IB_WC_REM_ACCESS_ERR;
2274                break;
2275        case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2276                wc->status = IB_WC_REM_OP_ERR;
2277                break;
2278        case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2279                wc->status = IB_WC_RETRY_EXC_ERR;
2280                break;
2281        case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2282                wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2283                break;
2284        default:
2285                wc->status = IB_WC_GENERAL_ERR;
2286                break;
2287        }
2288
2289        /* CQE status error, directly return */
2290        if (wc->status != IB_WC_SUCCESS)
2291                return 0;
2292
2293        if (is_send) {
2294                /* SQ conrespond to CQE */
2295                sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
2296                                                CQE_BYTE_4_WQE_INDEX_M,
2297                                                CQE_BYTE_4_WQE_INDEX_S)&
2298                                                ((*cur_qp)->sq.wqe_cnt-1));
2299                switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
2300                case HNS_ROCE_WQE_OPCODE_SEND:
2301                        wc->opcode = IB_WC_SEND;
2302                        break;
2303                case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2304                        wc->opcode = IB_WC_RDMA_READ;
2305                        wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2306                        break;
2307                case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2308                        wc->opcode = IB_WC_RDMA_WRITE;
2309                        break;
2310                case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2311                        wc->opcode = IB_WC_LOCAL_INV;
2312                        break;
2313                case HNS_ROCE_WQE_OPCODE_UD_SEND:
2314                        wc->opcode = IB_WC_SEND;
2315                        break;
2316                default:
2317                        wc->status = IB_WC_GENERAL_ERR;
2318                        break;
2319                }
2320                wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
2321                                IB_WC_WITH_IMM : 0);
2322
2323                wq = &(*cur_qp)->sq;
2324                if ((*cur_qp)->sq_signal_bits) {
2325                        /*
2326                         * If sg_signal_bit is 1,
2327                         * firstly tail pointer updated to wqe
2328                         * which current cqe correspond to
2329                         */
2330                        wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2331                                                      CQE_BYTE_4_WQE_INDEX_M,
2332                                                      CQE_BYTE_4_WQE_INDEX_S);
2333                        wq->tail += (wqe_ctr - (u16)wq->tail) &
2334                                    (wq->wqe_cnt - 1);
2335                }
2336                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2337                ++wq->tail;
2338        } else {
2339                /* RQ conrespond to CQE */
2340                wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2341                opcode = roce_get_field(cqe->cqe_byte_4,
2342                                        CQE_BYTE_4_OPERATION_TYPE_M,
2343                                        CQE_BYTE_4_OPERATION_TYPE_S) &
2344                                        HNS_ROCE_CQE_OPCODE_MASK;
2345                switch (opcode) {
2346                case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2347                        wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2348                        wc->wc_flags = IB_WC_WITH_IMM;
2349                        wc->ex.imm_data =
2350                                cpu_to_be32(le32_to_cpu(cqe->immediate_data));
2351                        break;
2352                case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2353                        if (roce_get_bit(cqe->cqe_byte_4,
2354                                         CQE_BYTE_4_IMM_INDICATOR_S)) {
2355                                wc->opcode = IB_WC_RECV;
2356                                wc->wc_flags = IB_WC_WITH_IMM;
2357                                wc->ex.imm_data = cpu_to_be32(
2358                                        le32_to_cpu(cqe->immediate_data));
2359                        } else {
2360                                wc->opcode = IB_WC_RECV;
2361                                wc->wc_flags = 0;
2362                        }
2363                        break;
2364                default:
2365                        wc->status = IB_WC_GENERAL_ERR;
2366                        break;
2367                }
2368
2369                /* Update tail pointer, record wr_id */
2370                wq = &(*cur_qp)->rq;
2371                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2372                ++wq->tail;
2373                wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2374                                            CQE_BYTE_20_SL_S);
2375                wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2376                                                CQE_BYTE_20_REMOTE_QPN_M,
2377                                                CQE_BYTE_20_REMOTE_QPN_S);
2378                wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2379                                              CQE_BYTE_20_GRH_PRESENT_S) ?
2380                                              IB_WC_GRH : 0);
2381                wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2382                                                     CQE_BYTE_28_P_KEY_IDX_M,
2383                                                     CQE_BYTE_28_P_KEY_IDX_S);
2384        }
2385
2386        return 0;
2387}
2388
2389int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2390{
2391        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2392        struct hns_roce_qp *cur_qp = NULL;
2393        unsigned long flags;
2394        int npolled;
2395        int ret = 0;
2396
2397        spin_lock_irqsave(&hr_cq->lock, flags);
2398
2399        for (npolled = 0; npolled < num_entries; ++npolled) {
2400                ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2401                if (ret)
2402                        break;
2403        }
2404
2405        if (npolled) {
2406                *hr_cq->tptr_addr = hr_cq->cons_index &
2407                        ((hr_cq->cq_depth << 1) - 1);
2408
2409                /* Memroy barrier */
2410                wmb();
2411                hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2412        }
2413
2414        spin_unlock_irqrestore(&hr_cq->lock, flags);
2415
2416        if (ret == 0 || ret == -EAGAIN)
2417                return npolled;
2418        else
2419                return ret;
2420}
2421
2422static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2423                                 struct hns_roce_hem_table *table, int obj,
2424                                 int step_idx)
2425{
2426        struct device *dev = &hr_dev->pdev->dev;
2427        struct hns_roce_v1_priv *priv;
2428        unsigned long flags = 0;
2429        long end = HW_SYNC_TIMEOUT_MSECS;
2430        __le32 bt_cmd_val[2] = {0};
2431        void __iomem *bt_cmd;
2432        u64 bt_ba = 0;
2433
2434        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2435
2436        switch (table->type) {
2437        case HEM_TYPE_QPC:
2438                bt_ba = priv->bt_table.qpc_buf.map >> 12;
2439                break;
2440        case HEM_TYPE_MTPT:
2441                bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2442                break;
2443        case HEM_TYPE_CQC:
2444                bt_ba = priv->bt_table.cqc_buf.map >> 12;
2445                break;
2446        case HEM_TYPE_SRQC:
2447                dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2448                return -EINVAL;
2449        default:
2450                return 0;
2451        }
2452        roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2453                        ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
2454        roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2455                ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2456        roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2457        roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2458
2459        spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2460
2461        bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2462
2463        while (1) {
2464                if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2465                        if (!end) {
2466                                dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2467                                spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2468                                        flags);
2469                                return -EBUSY;
2470                        }
2471                } else {
2472                        break;
2473                }
2474                mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
2475                end -= HW_SYNC_SLEEP_TIME_INTERVAL;
2476        }
2477
2478        bt_cmd_val[0] = cpu_to_le32(bt_ba);
2479        roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2480                ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2481        hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2482
2483        spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2484
2485        return 0;
2486}
2487
2488static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2489                                 struct hns_roce_mtt *mtt,
2490                                 enum hns_roce_qp_state cur_state,
2491                                 enum hns_roce_qp_state new_state,
2492                                 struct hns_roce_qp_context *context,
2493                                 struct hns_roce_qp *hr_qp)
2494{
2495        static const u16
2496        op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2497                [HNS_ROCE_QP_STATE_RST] = {
2498                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2499                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2500                [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2501                },
2502                [HNS_ROCE_QP_STATE_INIT] = {
2503                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2504                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2505                /* Note: In v1 engine, HW doesn't support RST2INIT.
2506                 * We use RST2INIT cmd instead of INIT2INIT.
2507                 */
2508                [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2509                [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2510                },
2511                [HNS_ROCE_QP_STATE_RTR] = {
2512                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2513                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2514                [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2515                },
2516                [HNS_ROCE_QP_STATE_RTS] = {
2517                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2518                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2519                [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2520                [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2521                },
2522                [HNS_ROCE_QP_STATE_SQD] = {
2523                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2524                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2525                [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2526                [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2527                },
2528                [HNS_ROCE_QP_STATE_ERR] = {
2529                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2530                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2531                }
2532        };
2533
2534        struct hns_roce_cmd_mailbox *mailbox;
2535        struct device *dev = &hr_dev->pdev->dev;
2536        int ret = 0;
2537
2538        if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2539            new_state >= HNS_ROCE_QP_NUM_STATE ||
2540            !op[cur_state][new_state]) {
2541                dev_err(dev, "[modify_qp]not support state %d to %d\n",
2542                        cur_state, new_state);
2543                return -EINVAL;
2544        }
2545
2546        if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2547                return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2548                                         HNS_ROCE_CMD_2RST_QP,
2549                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
2550
2551        if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2552                return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2553                                         HNS_ROCE_CMD_2ERR_QP,
2554                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
2555
2556        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2557        if (IS_ERR(mailbox))
2558                return PTR_ERR(mailbox);
2559
2560        memcpy(mailbox->buf, context, sizeof(*context));
2561
2562        ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2563                                op[cur_state][new_state],
2564                                HNS_ROCE_CMD_TIMEOUT_MSECS);
2565
2566        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2567        return ret;
2568}
2569
2570static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2571                             int attr_mask, enum ib_qp_state cur_state,
2572                             enum ib_qp_state new_state)
2573{
2574        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2575        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2576        struct hns_roce_sqp_context *context;
2577        struct device *dev = &hr_dev->pdev->dev;
2578        dma_addr_t dma_handle = 0;
2579        u32 __iomem *addr;
2580        int rq_pa_start;
2581        __le32 tmp;
2582        u32 reg_val;
2583        u64 *mtts;
2584
2585        context = kzalloc(sizeof(*context), GFP_KERNEL);
2586        if (!context)
2587                return -ENOMEM;
2588
2589        /* Search QP buf's MTTs */
2590        mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2591                                   hr_qp->mtt.first_seg, &dma_handle);
2592        if (!mtts) {
2593                dev_err(dev, "qp buf pa find failed\n");
2594                goto out;
2595        }
2596
2597        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2598                roce_set_field(context->qp1c_bytes_4,
2599                               QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2600                               QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2601                               ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2602                roce_set_field(context->qp1c_bytes_4,
2603                               QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2604                               QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2605                               ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2606                roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2607                               QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2608
2609                context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
2610                roce_set_field(context->qp1c_bytes_12,
2611                               QP1C_BYTES_12_SQ_RQ_BT_H_M,
2612                               QP1C_BYTES_12_SQ_RQ_BT_H_S,
2613                               ((u32)(dma_handle >> 32)));
2614
2615                roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2616                               QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2617                roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2618                               QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2619                roce_set_bit(context->qp1c_bytes_16,
2620                             QP1C_BYTES_16_SIGNALING_TYPE_S,
2621                             hr_qp->sq_signal_bits);
2622                roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2623                             1);
2624                roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2625                             1);
2626                roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2627                             0);
2628
2629                roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2630                               QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2631                roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2632                               QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2633
2634                rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2635                context->cur_rq_wqe_ba_l =
2636                                cpu_to_le32((u32)(mtts[rq_pa_start]));
2637
2638                roce_set_field(context->qp1c_bytes_28,
2639                               QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2640                               QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2641                               (mtts[rq_pa_start]) >> 32);
2642                roce_set_field(context->qp1c_bytes_28,
2643                               QP1C_BYTES_28_RQ_CUR_IDX_M,
2644                               QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2645
2646                roce_set_field(context->qp1c_bytes_32,
2647                               QP1C_BYTES_32_RX_CQ_NUM_M,
2648                               QP1C_BYTES_32_RX_CQ_NUM_S,
2649                               to_hr_cq(ibqp->recv_cq)->cqn);
2650                roce_set_field(context->qp1c_bytes_32,
2651                               QP1C_BYTES_32_TX_CQ_NUM_M,
2652                               QP1C_BYTES_32_TX_CQ_NUM_S,
2653                               to_hr_cq(ibqp->send_cq)->cqn);
2654
2655                context->cur_sq_wqe_ba_l  = cpu_to_le32((u32)mtts[0]);
2656
2657                roce_set_field(context->qp1c_bytes_40,
2658                               QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2659                               QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2660                               (mtts[0]) >> 32);
2661                roce_set_field(context->qp1c_bytes_40,
2662                               QP1C_BYTES_40_SQ_CUR_IDX_M,
2663                               QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2664
2665                /* Copy context to QP1C register */
2666                addr = (u32 __iomem *)(hr_dev->reg_base +
2667                                       ROCEE_QP1C_CFG0_0_REG +
2668                                       hr_qp->phy_port * sizeof(*context));
2669
2670                writel(le32_to_cpu(context->qp1c_bytes_4), addr);
2671                writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
2672                writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
2673                writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
2674                writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
2675                writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
2676                writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
2677                writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
2678                writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
2679                writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
2680        }
2681
2682        /* Modify QP1C status */
2683        reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2684                            hr_qp->phy_port * sizeof(*context));
2685        tmp = cpu_to_le32(reg_val);
2686        roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2687                       ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2688        reg_val = le32_to_cpu(tmp);
2689        roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2690                    hr_qp->phy_port * sizeof(*context), reg_val);
2691
2692        hr_qp->state = new_state;
2693        if (new_state == IB_QPS_RESET) {
2694                hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2695                                     ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2696                if (ibqp->send_cq != ibqp->recv_cq)
2697                        hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2698                                             hr_qp->qpn, NULL);
2699
2700                hr_qp->rq.head = 0;
2701                hr_qp->rq.tail = 0;
2702                hr_qp->sq.head = 0;
2703                hr_qp->sq.tail = 0;
2704                hr_qp->sq_next_wqe = 0;
2705        }
2706
2707        kfree(context);
2708        return 0;
2709
2710out:
2711        kfree(context);
2712        return -EINVAL;
2713}
2714
2715static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2716                            int attr_mask, enum ib_qp_state cur_state,
2717                            enum ib_qp_state new_state)
2718{
2719        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2720        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2721        struct device *dev = &hr_dev->pdev->dev;
2722        struct hns_roce_qp_context *context;
2723        const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2724        dma_addr_t dma_handle_2 = 0;
2725        dma_addr_t dma_handle = 0;
2726        __le32 doorbell[2] = {0};
2727        int rq_pa_start = 0;
2728        u64 *mtts_2 = NULL;
2729        int ret = -EINVAL;
2730        u64 *mtts = NULL;
2731        int port;
2732        u8 port_num;
2733        u8 *dmac;
2734        u8 *smac;
2735
2736        context = kzalloc(sizeof(*context), GFP_KERNEL);
2737        if (!context)
2738                return -ENOMEM;
2739
2740        /* Search qp buf's mtts */
2741        mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2742                                   hr_qp->mtt.first_seg, &dma_handle);
2743        if (mtts == NULL) {
2744                dev_err(dev, "qp buf pa find failed\n");
2745                goto out;
2746        }
2747
2748        /* Search IRRL's mtts */
2749        mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2750                                     hr_qp->qpn, &dma_handle_2);
2751        if (mtts_2 == NULL) {
2752                dev_err(dev, "qp irrl_table find failed\n");
2753                goto out;
2754        }
2755
2756        /*
2757         * Reset to init
2758         *      Mandatory param:
2759         *      IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2760         *      Optional param: NA
2761         */
2762        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2763                roce_set_field(context->qpc_bytes_4,
2764                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2765                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2766                               to_hr_qp_type(hr_qp->ibqp.qp_type));
2767
2768                roce_set_bit(context->qpc_bytes_4,
2769                             QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2770                roce_set_bit(context->qpc_bytes_4,
2771                             QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2772                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2773                roce_set_bit(context->qpc_bytes_4,
2774                             QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2775                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2776                             );
2777                roce_set_bit(context->qpc_bytes_4,
2778                             QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2779                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2780                             );
2781                roce_set_bit(context->qpc_bytes_4,
2782                             QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2783                roce_set_field(context->qpc_bytes_4,
2784                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2785                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2786                               ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2787                roce_set_field(context->qpc_bytes_4,
2788                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2789                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2790                               ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2791                roce_set_field(context->qpc_bytes_4,
2792                               QP_CONTEXT_QPC_BYTES_4_PD_M,
2793                               QP_CONTEXT_QPC_BYTES_4_PD_S,
2794                               to_hr_pd(ibqp->pd)->pdn);
2795                hr_qp->access_flags = attr->qp_access_flags;
2796                roce_set_field(context->qpc_bytes_8,
2797                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2798                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2799                               to_hr_cq(ibqp->send_cq)->cqn);
2800                roce_set_field(context->qpc_bytes_8,
2801                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2802                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2803                               to_hr_cq(ibqp->recv_cq)->cqn);
2804
2805                if (ibqp->srq)
2806                        roce_set_field(context->qpc_bytes_12,
2807                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2808                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2809                                       to_hr_srq(ibqp->srq)->srqn);
2810
2811                roce_set_field(context->qpc_bytes_12,
2812                               QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2813                               QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2814                               attr->pkey_index);
2815                hr_qp->pkey_index = attr->pkey_index;
2816                roce_set_field(context->qpc_bytes_16,
2817                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2818                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2819
2820        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2821                roce_set_field(context->qpc_bytes_4,
2822                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2823                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2824                               to_hr_qp_type(hr_qp->ibqp.qp_type));
2825                roce_set_bit(context->qpc_bytes_4,
2826                             QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2827                if (attr_mask & IB_QP_ACCESS_FLAGS) {
2828                        roce_set_bit(context->qpc_bytes_4,
2829                                     QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2830                                     !!(attr->qp_access_flags &
2831                                     IB_ACCESS_REMOTE_READ));
2832                        roce_set_bit(context->qpc_bytes_4,
2833                                     QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2834                                     !!(attr->qp_access_flags &
2835                                     IB_ACCESS_REMOTE_WRITE));
2836                } else {
2837                        roce_set_bit(context->qpc_bytes_4,
2838                                     QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2839                                     !!(hr_qp->access_flags &
2840                                     IB_ACCESS_REMOTE_READ));
2841                        roce_set_bit(context->qpc_bytes_4,
2842                                     QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2843                                     !!(hr_qp->access_flags &
2844                                     IB_ACCESS_REMOTE_WRITE));
2845                }
2846
2847                roce_set_bit(context->qpc_bytes_4,
2848                             QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2849                roce_set_field(context->qpc_bytes_4,
2850                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2851                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2852                               ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2853                roce_set_field(context->qpc_bytes_4,
2854                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2855                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2856                               ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2857                roce_set_field(context->qpc_bytes_4,
2858                               QP_CONTEXT_QPC_BYTES_4_PD_M,
2859                               QP_CONTEXT_QPC_BYTES_4_PD_S,
2860                               to_hr_pd(ibqp->pd)->pdn);
2861
2862                roce_set_field(context->qpc_bytes_8,
2863                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2864                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2865                               to_hr_cq(ibqp->send_cq)->cqn);
2866                roce_set_field(context->qpc_bytes_8,
2867                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2868                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2869                               to_hr_cq(ibqp->recv_cq)->cqn);
2870
2871                if (ibqp->srq)
2872                        roce_set_field(context->qpc_bytes_12,
2873                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2874                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2875                                       to_hr_srq(ibqp->srq)->srqn);
2876                if (attr_mask & IB_QP_PKEY_INDEX)
2877                        roce_set_field(context->qpc_bytes_12,
2878                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2879                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2880                                       attr->pkey_index);
2881                else
2882                        roce_set_field(context->qpc_bytes_12,
2883                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2884                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2885                                       hr_qp->pkey_index);
2886
2887                roce_set_field(context->qpc_bytes_16,
2888                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2889                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2890        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2891                if ((attr_mask & IB_QP_ALT_PATH) ||
2892                    (attr_mask & IB_QP_ACCESS_FLAGS) ||
2893                    (attr_mask & IB_QP_PKEY_INDEX) ||
2894                    (attr_mask & IB_QP_QKEY)) {
2895                        dev_err(dev, "INIT2RTR attr_mask error\n");
2896                        goto out;
2897                }
2898
2899                dmac = (u8 *)attr->ah_attr.roce.dmac;
2900
2901                context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
2902                roce_set_field(context->qpc_bytes_24,
2903                               QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2904                               QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2905                               ((u32)(dma_handle >> 32)));
2906                roce_set_bit(context->qpc_bytes_24,
2907                             QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2908                             1);
2909                roce_set_field(context->qpc_bytes_24,
2910                               QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2911                               QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2912                               attr->min_rnr_timer);
2913                context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
2914                roce_set_field(context->qpc_bytes_32,
2915                               QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2916                               QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2917                               ((u32)(dma_handle_2 >> 32)) &
2918                                QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2919                roce_set_field(context->qpc_bytes_32,
2920                               QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2921                               QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2922                roce_set_bit(context->qpc_bytes_32,
2923                             QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2924                             1);
2925                roce_set_bit(context->qpc_bytes_32,
2926                             QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2927                             hr_qp->sq_signal_bits);
2928
2929                port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2930                        hr_qp->port;
2931                smac = (u8 *)hr_dev->dev_addr[port];
2932                /* when dmac equals smac or loop_idc is 1, it should loopback */
2933                if (ether_addr_equal_unaligned(dmac, smac) ||
2934                    hr_dev->loop_idc == 0x1)
2935                        roce_set_bit(context->qpc_bytes_32,
2936                              QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2937
2938                roce_set_bit(context->qpc_bytes_32,
2939                             QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2940                             rdma_ah_get_ah_flags(&attr->ah_attr));
2941                roce_set_field(context->qpc_bytes_32,
2942                               QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2943                               QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2944                               ilog2((unsigned int)attr->max_dest_rd_atomic));
2945
2946                if (attr_mask & IB_QP_DEST_QPN)
2947                        roce_set_field(context->qpc_bytes_36,
2948                                       QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2949                                       QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2950                                       attr->dest_qp_num);
2951
2952                /* Configure GID index */
2953                port_num = rdma_ah_get_port_num(&attr->ah_attr);
2954                roce_set_field(context->qpc_bytes_36,
2955                               QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2956                               QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2957                                hns_get_gid_index(hr_dev,
2958                                                  port_num - 1,
2959                                                  grh->sgid_index));
2960
2961                memcpy(&(context->dmac_l), dmac, 4);
2962
2963                roce_set_field(context->qpc_bytes_44,
2964                               QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2965                               QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2966                               *((u16 *)(&dmac[4])));
2967                roce_set_field(context->qpc_bytes_44,
2968                               QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2969                               QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2970                               rdma_ah_get_static_rate(&attr->ah_attr));
2971                roce_set_field(context->qpc_bytes_44,
2972                               QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2973                               QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2974                               grh->hop_limit);
2975
2976                roce_set_field(context->qpc_bytes_48,
2977                               QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2978                               QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2979                               grh->flow_label);
2980                roce_set_field(context->qpc_bytes_48,
2981                               QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2982                               QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
2983                               grh->traffic_class);
2984                roce_set_field(context->qpc_bytes_48,
2985                               QP_CONTEXT_QPC_BYTES_48_MTU_M,
2986                               QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2987
2988                memcpy(context->dgid, grh->dgid.raw,
2989                       sizeof(grh->dgid.raw));
2990
2991                dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
2992                        roce_get_field(context->qpc_bytes_44,
2993                                       QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2994                                       QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
2995
2996                roce_set_field(context->qpc_bytes_68,
2997                               QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
2998                               QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
2999                               hr_qp->rq.head);
3000                roce_set_field(context->qpc_bytes_68,
3001                               QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
3002                               QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
3003
3004                rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
3005                context->cur_rq_wqe_ba_l =
3006                                cpu_to_le32((u32)(mtts[rq_pa_start]));
3007
3008                roce_set_field(context->qpc_bytes_76,
3009                        QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
3010                        QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
3011                        mtts[rq_pa_start] >> 32);
3012                roce_set_field(context->qpc_bytes_76,
3013                               QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
3014                               QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
3015
3016                context->rx_rnr_time = 0;
3017
3018                roce_set_field(context->qpc_bytes_84,
3019                               QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
3020                               QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
3021                               attr->rq_psn - 1);
3022                roce_set_field(context->qpc_bytes_84,
3023                               QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
3024                               QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
3025
3026                roce_set_field(context->qpc_bytes_88,
3027                               QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3028                               QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
3029                               attr->rq_psn);
3030                roce_set_bit(context->qpc_bytes_88,
3031                             QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
3032                roce_set_bit(context->qpc_bytes_88,
3033                             QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
3034                roce_set_field(context->qpc_bytes_88,
3035                        QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
3036                        QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
3037                        0);
3038                roce_set_field(context->qpc_bytes_88,
3039                               QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
3040                               QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
3041                               0);
3042
3043                context->dma_length = 0;
3044                context->r_key = 0;
3045                context->va_l = 0;
3046                context->va_h = 0;
3047
3048                roce_set_field(context->qpc_bytes_108,
3049                               QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
3050                               QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
3051                roce_set_bit(context->qpc_bytes_108,
3052                             QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
3053                roce_set_bit(context->qpc_bytes_108,
3054                             QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
3055
3056                roce_set_field(context->qpc_bytes_112,
3057                               QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
3058                               QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
3059                roce_set_field(context->qpc_bytes_112,
3060                               QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
3061                               QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
3062
3063                /* For chip resp ack */
3064                roce_set_field(context->qpc_bytes_156,
3065                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3066                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3067                               hr_qp->phy_port);
3068                roce_set_field(context->qpc_bytes_156,
3069                               QP_CONTEXT_QPC_BYTES_156_SL_M,
3070                               QP_CONTEXT_QPC_BYTES_156_SL_S,
3071                               rdma_ah_get_sl(&attr->ah_attr));
3072                hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3073        } else if (cur_state == IB_QPS_RTR &&
3074                new_state == IB_QPS_RTS) {
3075                /* If exist optional param, return error */
3076                if ((attr_mask & IB_QP_ALT_PATH) ||
3077                    (attr_mask & IB_QP_ACCESS_FLAGS) ||
3078                    (attr_mask & IB_QP_QKEY) ||
3079                    (attr_mask & IB_QP_PATH_MIG_STATE) ||
3080                    (attr_mask & IB_QP_CUR_STATE) ||
3081                    (attr_mask & IB_QP_MIN_RNR_TIMER)) {
3082                        dev_err(dev, "RTR2RTS attr_mask error\n");
3083                        goto out;
3084                }
3085
3086                context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
3087
3088                roce_set_field(context->qpc_bytes_120,
3089                               QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
3090                               QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
3091                               (mtts[0]) >> 32);
3092
3093                roce_set_field(context->qpc_bytes_124,
3094                               QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
3095                               QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
3096                roce_set_field(context->qpc_bytes_124,
3097                               QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
3098                               QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
3099
3100                roce_set_field(context->qpc_bytes_128,
3101                               QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
3102                               QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
3103                               attr->sq_psn);
3104                roce_set_bit(context->qpc_bytes_128,
3105                             QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
3106                roce_set_field(context->qpc_bytes_128,
3107                             QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
3108                             QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
3109                             0);
3110                roce_set_bit(context->qpc_bytes_128,
3111                             QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
3112
3113                roce_set_field(context->qpc_bytes_132,
3114                               QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
3115                               QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
3116                roce_set_field(context->qpc_bytes_132,
3117                               QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
3118                               QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
3119
3120                roce_set_field(context->qpc_bytes_136,
3121                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
3122                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
3123                               attr->sq_psn);
3124                roce_set_field(context->qpc_bytes_136,
3125                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
3126                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
3127                               attr->sq_psn);
3128
3129                roce_set_field(context->qpc_bytes_140,
3130                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
3131                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
3132                               (attr->sq_psn >> SQ_PSN_SHIFT));
3133                roce_set_field(context->qpc_bytes_140,
3134                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
3135                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
3136                roce_set_bit(context->qpc_bytes_140,
3137                             QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
3138
3139                roce_set_field(context->qpc_bytes_148,
3140                               QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3141                               QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3142                roce_set_field(context->qpc_bytes_148,
3143                               QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3144                               QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3145                               attr->retry_cnt);
3146                roce_set_field(context->qpc_bytes_148,
3147                               QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3148                               QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3149                               attr->rnr_retry);
3150                roce_set_field(context->qpc_bytes_148,
3151                               QP_CONTEXT_QPC_BYTES_148_LSN_M,
3152                               QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3153
3154                context->rnr_retry = 0;
3155
3156                roce_set_field(context->qpc_bytes_156,
3157                               QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3158                               QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3159                               attr->retry_cnt);
3160                if (attr->timeout < 0x12) {
3161                        dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3162                                 attr->timeout);
3163                        roce_set_field(context->qpc_bytes_156,
3164                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3165                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3166                                       0x12);
3167                } else {
3168                        roce_set_field(context->qpc_bytes_156,
3169                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3170                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3171                                       attr->timeout);
3172                }
3173                roce_set_field(context->qpc_bytes_156,
3174                               QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3175                               QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3176                               attr->rnr_retry);
3177                roce_set_field(context->qpc_bytes_156,
3178                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3179                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3180                               hr_qp->phy_port);
3181                roce_set_field(context->qpc_bytes_156,
3182                               QP_CONTEXT_QPC_BYTES_156_SL_M,
3183                               QP_CONTEXT_QPC_BYTES_156_SL_S,
3184                               rdma_ah_get_sl(&attr->ah_attr));
3185                hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3186                roce_set_field(context->qpc_bytes_156,
3187                               QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3188                               QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3189                               ilog2((unsigned int)attr->max_rd_atomic));
3190                roce_set_field(context->qpc_bytes_156,
3191                               QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3192                               QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3193                context->pkt_use_len = 0;
3194
3195                roce_set_field(context->qpc_bytes_164,
3196                               QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3197                               QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3198                roce_set_field(context->qpc_bytes_164,
3199                               QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3200                               QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3201
3202                roce_set_field(context->qpc_bytes_168,
3203                               QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3204                               QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3205                               attr->sq_psn);
3206                roce_set_field(context->qpc_bytes_168,
3207                               QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3208                               QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3209                roce_set_field(context->qpc_bytes_168,
3210                               QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3211                               QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3212                roce_set_bit(context->qpc_bytes_168,
3213                             QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3214                roce_set_bit(context->qpc_bytes_168,
3215                             QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3216                roce_set_bit(context->qpc_bytes_168,
3217                             QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3218                context->sge_use_len = 0;
3219
3220                roce_set_field(context->qpc_bytes_176,
3221                               QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3222                               QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3223                roce_set_field(context->qpc_bytes_176,
3224                               QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3225                               QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3226                               0);
3227                roce_set_field(context->qpc_bytes_180,
3228                               QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3229                               QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3230                roce_set_field(context->qpc_bytes_180,
3231                               QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3232                               QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3233
3234                context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
3235
3236                roce_set_field(context->qpc_bytes_188,
3237                               QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3238                               QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3239                               (mtts[0]) >> 32);
3240                roce_set_bit(context->qpc_bytes_188,
3241                             QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3242                roce_set_field(context->qpc_bytes_188,
3243                               QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3244                               QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3245                               0);
3246        } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3247                   (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3248                   (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3249                   (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3250                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3251                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3252                   (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3253                   (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
3254                dev_err(dev, "not support this status migration\n");
3255                goto out;
3256        }
3257
3258        /* Every status migrate must change state */
3259        roce_set_field(context->qpc_bytes_144,
3260                       QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3261                       QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3262
3263        /* SW pass context to HW */
3264        ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
3265                                    to_hns_roce_state(cur_state),
3266                                    to_hns_roce_state(new_state), context,
3267                                    hr_qp);
3268        if (ret) {
3269                dev_err(dev, "hns_roce_qp_modify failed\n");
3270                goto out;
3271        }
3272
3273        /*
3274         * Use rst2init to instead of init2init with drv,
3275         * need to hw to flash RQ HEAD by DB again
3276         */
3277        if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3278                /* Memory barrier */
3279                wmb();
3280
3281                roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3282                               RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3283                roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3284                               RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3285                roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3286                               RQ_DOORBELL_U32_8_CMD_S, 1);
3287                roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3288
3289                if (ibqp->uobject) {
3290                        hr_qp->rq.db_reg_l = hr_dev->reg_base +
3291                                     hr_dev->odb_offset +
3292                                     DB_REG_OFFSET * hr_dev->priv_uar.index;
3293                }
3294
3295                hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
3296        }
3297
3298        hr_qp->state = new_state;
3299
3300        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3301                hr_qp->resp_depth = attr->max_dest_rd_atomic;
3302        if (attr_mask & IB_QP_PORT) {
3303                hr_qp->port = attr->port_num - 1;
3304                hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3305        }
3306
3307        if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3308                hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3309                                     ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3310                if (ibqp->send_cq != ibqp->recv_cq)
3311                        hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3312                                             hr_qp->qpn, NULL);
3313
3314                hr_qp->rq.head = 0;
3315                hr_qp->rq.tail = 0;
3316                hr_qp->sq.head = 0;
3317                hr_qp->sq.tail = 0;
3318                hr_qp->sq_next_wqe = 0;
3319        }
3320out:
3321        kfree(context);
3322        return ret;
3323}
3324
3325static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
3326                                 const struct ib_qp_attr *attr, int attr_mask,
3327                                 enum ib_qp_state cur_state,
3328                                 enum ib_qp_state new_state)
3329{
3330
3331        if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3332                return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3333                                         new_state);
3334        else
3335                return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3336                                        new_state);
3337}
3338
3339static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3340{
3341        switch (state) {
3342        case HNS_ROCE_QP_STATE_RST:
3343                return IB_QPS_RESET;
3344        case HNS_ROCE_QP_STATE_INIT:
3345                return IB_QPS_INIT;
3346        case HNS_ROCE_QP_STATE_RTR:
3347                return IB_QPS_RTR;
3348        case HNS_ROCE_QP_STATE_RTS:
3349                return IB_QPS_RTS;
3350        case HNS_ROCE_QP_STATE_SQD:
3351                return IB_QPS_SQD;
3352        case HNS_ROCE_QP_STATE_ERR:
3353                return IB_QPS_ERR;
3354        default:
3355                return IB_QPS_ERR;
3356        }
3357}
3358
3359static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3360                                 struct hns_roce_qp *hr_qp,
3361                                 struct hns_roce_qp_context *hr_context)
3362{
3363        struct hns_roce_cmd_mailbox *mailbox;
3364        int ret;
3365
3366        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3367        if (IS_ERR(mailbox))
3368                return PTR_ERR(mailbox);
3369
3370        ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3371                                HNS_ROCE_CMD_QUERY_QP,
3372                                HNS_ROCE_CMD_TIMEOUT_MSECS);
3373        if (!ret)
3374                memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3375        else
3376                dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3377
3378        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3379
3380        return ret;
3381}
3382
3383static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3384                             int qp_attr_mask,
3385                             struct ib_qp_init_attr *qp_init_attr)
3386{
3387        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3388        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3389        struct hns_roce_sqp_context context;
3390        u32 addr;
3391
3392        mutex_lock(&hr_qp->mutex);
3393
3394        if (hr_qp->state == IB_QPS_RESET) {
3395                qp_attr->qp_state = IB_QPS_RESET;
3396                goto done;
3397        }
3398
3399        addr = ROCEE_QP1C_CFG0_0_REG +
3400                hr_qp->port * sizeof(struct hns_roce_sqp_context);
3401        context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
3402        context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
3403        context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
3404        context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
3405        context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
3406        context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
3407        context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
3408        context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
3409        context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
3410        context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
3411
3412        hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3413                                      QP1C_BYTES_4_QP_STATE_M,
3414                                      QP1C_BYTES_4_QP_STATE_S);
3415        qp_attr->qp_state       = hr_qp->state;
3416        qp_attr->path_mtu       = IB_MTU_256;
3417        qp_attr->path_mig_state = IB_MIG_ARMED;
3418        qp_attr->qkey           = QKEY_VAL;
3419        qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
3420        qp_attr->rq_psn         = 0;
3421        qp_attr->sq_psn         = 0;
3422        qp_attr->dest_qp_num    = 1;
3423        qp_attr->qp_access_flags = 6;
3424
3425        qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3426                                             QP1C_BYTES_20_PKEY_IDX_M,
3427                                             QP1C_BYTES_20_PKEY_IDX_S);
3428        qp_attr->port_num = hr_qp->port + 1;
3429        qp_attr->sq_draining = 0;
3430        qp_attr->max_rd_atomic = 0;
3431        qp_attr->max_dest_rd_atomic = 0;
3432        qp_attr->min_rnr_timer = 0;
3433        qp_attr->timeout = 0;
3434        qp_attr->retry_cnt = 0;
3435        qp_attr->rnr_retry = 0;
3436        qp_attr->alt_timeout = 0;
3437
3438done:
3439        qp_attr->cur_qp_state = qp_attr->qp_state;
3440        qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3441        qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3442        qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3443        qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3444        qp_attr->cap.max_inline_data = 0;
3445        qp_init_attr->cap = qp_attr->cap;
3446        qp_init_attr->create_flags = 0;
3447
3448        mutex_unlock(&hr_qp->mutex);
3449
3450        return 0;
3451}
3452
3453static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3454                            int qp_attr_mask,
3455                            struct ib_qp_init_attr *qp_init_attr)
3456{
3457        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3458        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3459        struct device *dev = &hr_dev->pdev->dev;
3460        struct hns_roce_qp_context *context;
3461        int tmp_qp_state = 0;
3462        int ret = 0;
3463        int state;
3464
3465        context = kzalloc(sizeof(*context), GFP_KERNEL);
3466        if (!context)
3467                return -ENOMEM;
3468
3469        memset(qp_attr, 0, sizeof(*qp_attr));
3470        memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3471
3472        mutex_lock(&hr_qp->mutex);
3473
3474        if (hr_qp->state == IB_QPS_RESET) {
3475                qp_attr->qp_state = IB_QPS_RESET;
3476                goto done;
3477        }
3478
3479        ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3480        if (ret) {
3481                dev_err(dev, "query qpc error\n");
3482                ret = -EINVAL;
3483                goto out;
3484        }
3485
3486        state = roce_get_field(context->qpc_bytes_144,
3487                               QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3488                               QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3489        tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3490        if (tmp_qp_state == -1) {
3491                dev_err(dev, "to_ib_qp_state error\n");
3492                ret = -EINVAL;
3493                goto out;
3494        }
3495        hr_qp->state = (u8)tmp_qp_state;
3496        qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3497        qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3498                                               QP_CONTEXT_QPC_BYTES_48_MTU_M,
3499                                               QP_CONTEXT_QPC_BYTES_48_MTU_S);
3500        qp_attr->path_mig_state = IB_MIG_ARMED;
3501        qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
3502        if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3503                qp_attr->qkey = QKEY_VAL;
3504
3505        qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3506                                         QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3507                                         QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3508        qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3509                                             QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3510                                             QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3511        qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3512                                        QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3513                                        QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3514        qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3515                        QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3516                                   ((roce_get_bit(context->qpc_bytes_4,
3517                        QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3518                                   ((roce_get_bit(context->qpc_bytes_4,
3519                        QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3520
3521        if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3522            hr_qp->ibqp.qp_type == IB_QPT_UC) {
3523                struct ib_global_route *grh =
3524                        rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3525
3526                rdma_ah_set_sl(&qp_attr->ah_attr,
3527                               roce_get_field(context->qpc_bytes_156,
3528                                              QP_CONTEXT_QPC_BYTES_156_SL_M,
3529                                              QP_CONTEXT_QPC_BYTES_156_SL_S));
3530                rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3531                grh->flow_label =
3532                        roce_get_field(context->qpc_bytes_48,
3533                                       QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3534                                       QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3535                grh->sgid_index =
3536                        roce_get_field(context->qpc_bytes_36,
3537                                       QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3538                                       QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3539                grh->hop_limit =
3540                        roce_get_field(context->qpc_bytes_44,
3541                                       QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3542                                       QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3543                grh->traffic_class =
3544                        roce_get_field(context->qpc_bytes_48,
3545                                       QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3546                                       QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3547
3548                memcpy(grh->dgid.raw, context->dgid,
3549                       sizeof(grh->dgid.raw));
3550        }
3551
3552        qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3553                              QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3554                              QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3555        qp_attr->port_num = hr_qp->port + 1;
3556        qp_attr->sq_draining = 0;
3557        qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
3558                                 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3559                                 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3560        qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
3561                                 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3562                                 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3563        qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3564                        QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3565                        QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3566        qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3567                            QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3568                            QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3569        qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3570                             QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3571                             QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3572        qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
3573
3574done:
3575        qp_attr->cur_qp_state = qp_attr->qp_state;
3576        qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3577        qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3578
3579        if (!ibqp->uobject) {
3580                qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3581                qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3582        } else {
3583                qp_attr->cap.max_send_wr = 0;
3584                qp_attr->cap.max_send_sge = 0;
3585        }
3586
3587        qp_init_attr->cap = qp_attr->cap;
3588
3589out:
3590        mutex_unlock(&hr_qp->mutex);
3591        kfree(context);
3592        return ret;
3593}
3594
3595static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3596                                int qp_attr_mask,
3597                                struct ib_qp_init_attr *qp_init_attr)
3598{
3599        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3600
3601        return hr_qp->doorbell_qpn <= 1 ?
3602                hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3603                hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3604}
3605
3606int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3607{
3608        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3609        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3610        struct hns_roce_cq *send_cq, *recv_cq;
3611        int ret;
3612
3613        ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
3614        if (ret)
3615                return ret;
3616
3617        send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3618        recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3619
3620        hns_roce_lock_cqs(send_cq, recv_cq);
3621        if (!udata) {
3622                __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3623                                       to_hr_srq(hr_qp->ibqp.srq) : NULL);
3624                if (send_cq != recv_cq)
3625                        __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3626        }
3627        hns_roce_unlock_cqs(send_cq, recv_cq);
3628
3629        hns_roce_qp_remove(hr_dev, hr_qp);
3630        hns_roce_qp_free(hr_dev, hr_qp);
3631
3632        /* RC QP, release QPN */
3633        if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3634                hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3635
3636        hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3637
3638        ib_umem_release(hr_qp->umem);
3639        if (!udata) {
3640                kfree(hr_qp->sq.wrid);
3641                kfree(hr_qp->rq.wrid);
3642
3643                hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3644        }
3645
3646        kfree(hr_qp);
3647        return 0;
3648}
3649
3650static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
3651{
3652        struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3653        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3654        struct device *dev = &hr_dev->pdev->dev;
3655        u32 cqe_cnt_ori;
3656        u32 cqe_cnt_cur;
3657        int wait_time = 0;
3658
3659        hns_roce_free_cqc(hr_dev, hr_cq);
3660
3661        /*
3662         * Before freeing cq buffer, we need to ensure that the outstanding CQE
3663         * have been written by checking the CQE counter.
3664         */
3665        cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3666        while (1) {
3667                if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3668                    HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3669                        break;
3670
3671                cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3672                if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3673                        break;
3674
3675                msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3676                if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3677                        dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3678                                hr_cq->cqn);
3679                        break;
3680                }
3681                wait_time++;
3682        }
3683
3684        hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
3685
3686        ib_umem_release(hr_cq->umem);
3687        if (!udata) {
3688                /* Free the buff of stored cq */
3689                hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
3690        }
3691}
3692
3693static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
3694{
3695        roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
3696                      (req_not << eq->log_entries), eq->doorbell);
3697}
3698
3699static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3700                                            struct hns_roce_aeqe *aeqe, int qpn)
3701{
3702        struct device *dev = &hr_dev->pdev->dev;
3703
3704        dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
3705        switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3706                               HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3707        case HNS_ROCE_LWQCE_QPC_ERROR:
3708                dev_warn(dev, "QP %d, QPC error.\n", qpn);
3709                break;
3710        case HNS_ROCE_LWQCE_MTU_ERROR:
3711                dev_warn(dev, "QP %d, MTU error.\n", qpn);
3712                break;
3713        case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3714                dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3715                break;
3716        case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3717                dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3718                break;
3719        case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3720                dev_warn(dev, "QP %d, WQE shift error\n", qpn);
3721                break;
3722        case HNS_ROCE_LWQCE_SL_ERROR:
3723                dev_warn(dev, "QP %d, SL error.\n", qpn);
3724                break;
3725        case HNS_ROCE_LWQCE_PORT_ERROR:
3726                dev_warn(dev, "QP %d, port error.\n", qpn);
3727                break;
3728        default:
3729                break;
3730        }
3731}
3732
3733static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3734                                                   struct hns_roce_aeqe *aeqe,
3735                                                   int qpn)
3736{
3737        struct device *dev = &hr_dev->pdev->dev;
3738
3739        dev_warn(dev, "Local Access Violation Work Queue Error.\n");
3740        switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3741                               HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3742        case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3743                dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3744                break;
3745        case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3746                dev_warn(dev, "QP %d, length error.\n", qpn);
3747                break;
3748        case HNS_ROCE_LAVWQE_VA_ERROR:
3749                dev_warn(dev, "QP %d, VA error.\n", qpn);
3750                break;
3751        case HNS_ROCE_LAVWQE_PD_ERROR:
3752                dev_err(dev, "QP %d, PD error.\n", qpn);
3753                break;
3754        case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3755                dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3756                break;
3757        case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3758                dev_warn(dev, "QP %d, key state error.\n", qpn);
3759                break;
3760        case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3761                dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3762                break;
3763        default:
3764                break;
3765        }
3766}
3767
3768static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
3769                                      struct hns_roce_aeqe *aeqe,
3770                                      int event_type)
3771{
3772        struct device *dev = &hr_dev->pdev->dev;
3773        int phy_port;
3774        int qpn;
3775
3776        qpn = roce_get_field(aeqe->event.qp_event.qp,
3777                             HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
3778                             HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
3779        phy_port = roce_get_field(aeqe->event.qp_event.qp,
3780                                  HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
3781                                  HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
3782        if (qpn <= 1)
3783                qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
3784
3785        switch (event_type) {
3786        case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3787                dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
3788                         "QP %d, phy_port %d.\n", qpn, phy_port);
3789                break;
3790        case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3791                hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
3792                break;
3793        case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3794                hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3795                break;
3796        default:
3797                break;
3798        }
3799
3800        hns_roce_qp_event(hr_dev, qpn, event_type);
3801}
3802
3803static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
3804                                      struct hns_roce_aeqe *aeqe,
3805                                      int event_type)
3806{
3807        struct device *dev = &hr_dev->pdev->dev;
3808        u32 cqn;
3809
3810        cqn = roce_get_field(aeqe->event.cq_event.cq,
3811                          HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
3812                          HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
3813
3814        switch (event_type) {
3815        case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3816                dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3817                break;
3818        case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3819                dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3820                break;
3821        case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3822                dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
3823                break;
3824        default:
3825                break;
3826        }
3827
3828        hns_roce_cq_event(hr_dev, cqn, event_type);
3829}
3830
3831static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
3832                                           struct hns_roce_aeqe *aeqe)
3833{
3834        struct device *dev = &hr_dev->pdev->dev;
3835
3836        switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3837                               HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3838        case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
3839                dev_warn(dev, "SDB overflow.\n");
3840                break;
3841        case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
3842                dev_warn(dev, "SDB almost overflow.\n");
3843                break;
3844        case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
3845                dev_warn(dev, "SDB almost empty.\n");
3846                break;
3847        case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
3848                dev_warn(dev, "ODB overflow.\n");
3849                break;
3850        case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
3851                dev_warn(dev, "ODB almost overflow.\n");
3852                break;
3853        case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
3854                dev_warn(dev, "SDB almost empty.\n");
3855                break;
3856        default:
3857                break;
3858        }
3859}
3860
3861static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
3862{
3863        unsigned long off = (entry & (eq->entries - 1)) *
3864                             HNS_ROCE_AEQ_ENTRY_SIZE;
3865
3866        return (struct hns_roce_aeqe *)((u8 *)
3867                (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3868                off % HNS_ROCE_BA_SIZE);
3869}
3870
3871static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
3872{
3873        struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
3874
3875        return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
3876                !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
3877}
3878
3879static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
3880                               struct hns_roce_eq *eq)
3881{
3882        struct device *dev = &hr_dev->pdev->dev;
3883        struct hns_roce_aeqe *aeqe;
3884        int aeqes_found = 0;
3885        int event_type;
3886
3887        while ((aeqe = next_aeqe_sw_v1(eq))) {
3888
3889                /* Make sure we read the AEQ entry after we have checked the
3890                 * ownership bit
3891                 */
3892                dma_rmb();
3893
3894                dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
3895                        aeqe,
3896                        roce_get_field(aeqe->asyn,
3897                                       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3898                                       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
3899                event_type = roce_get_field(aeqe->asyn,
3900                                            HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3901                                            HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
3902                switch (event_type) {
3903                case HNS_ROCE_EVENT_TYPE_PATH_MIG:
3904                        dev_warn(dev, "PATH MIG not supported\n");
3905                        break;
3906                case HNS_ROCE_EVENT_TYPE_COMM_EST:
3907                        dev_warn(dev, "COMMUNICATION established\n");
3908                        break;
3909                case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3910                        dev_warn(dev, "SQ DRAINED not supported\n");
3911                        break;
3912                case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
3913                        dev_warn(dev, "PATH MIG failed\n");
3914                        break;
3915                case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3916                case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3917                case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3918                        hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
3919                        break;
3920                case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
3921                case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
3922                case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
3923                        dev_warn(dev, "SRQ not support!\n");
3924                        break;
3925                case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3926                case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3927                case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3928                        hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
3929                        break;
3930                case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
3931                        dev_warn(dev, "port change.\n");
3932                        break;
3933                case HNS_ROCE_EVENT_TYPE_MB:
3934                        hns_roce_cmd_event(hr_dev,
3935                                           le16_to_cpu(aeqe->event.cmd.token),
3936                                           aeqe->event.cmd.status,
3937                                           le64_to_cpu(aeqe->event.cmd.out_param
3938                                           ));
3939                        break;
3940                case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
3941                        hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
3942                        break;
3943                case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
3944                        dev_warn(dev, "CEQ 0x%lx overflow.\n",
3945                        roce_get_field(aeqe->event.ce_event.ceqe,
3946                                     HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
3947                                     HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
3948                        break;
3949                default:
3950                        dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
3951                                 event_type, eq->eqn, eq->cons_index);
3952                        break;
3953                }
3954
3955                eq->cons_index++;
3956                aeqes_found = 1;
3957
3958                if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
3959                        dev_warn(dev, "cons_index overflow, set back to 0.\n");
3960                        eq->cons_index = 0;
3961                }
3962        }
3963
3964        set_eq_cons_index_v1(eq, 0);
3965
3966        return aeqes_found;
3967}
3968
3969static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
3970{
3971        unsigned long off = (entry & (eq->entries - 1)) *
3972                             HNS_ROCE_CEQ_ENTRY_SIZE;
3973
3974        return (struct hns_roce_ceqe *)((u8 *)
3975                        (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3976                        off % HNS_ROCE_BA_SIZE);
3977}
3978
3979static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
3980{
3981        struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
3982
3983        return (!!(roce_get_bit(ceqe->comp,
3984                HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
3985                (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
3986}
3987
3988static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
3989                               struct hns_roce_eq *eq)
3990{
3991        struct hns_roce_ceqe *ceqe;
3992        int ceqes_found = 0;
3993        u32 cqn;
3994
3995        while ((ceqe = next_ceqe_sw_v1(eq))) {
3996
3997                /* Make sure we read CEQ entry after we have checked the
3998                 * ownership bit
3999                 */
4000                dma_rmb();
4001
4002                cqn = roce_get_field(ceqe->comp,
4003                                     HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
4004                                     HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
4005                hns_roce_cq_completion(hr_dev, cqn);
4006
4007                ++eq->cons_index;
4008                ceqes_found = 1;
4009
4010                if (eq->cons_index >
4011                    EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) {
4012                        dev_warn(&eq->hr_dev->pdev->dev,
4013                                "cons_index overflow, set back to 0.\n");
4014                        eq->cons_index = 0;
4015                }
4016        }
4017
4018        set_eq_cons_index_v1(eq, 0);
4019
4020        return ceqes_found;
4021}
4022
4023static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
4024{
4025        struct hns_roce_eq  *eq  = eq_ptr;
4026        struct hns_roce_dev *hr_dev = eq->hr_dev;
4027        int int_work = 0;
4028
4029        if (eq->type_flag == HNS_ROCE_CEQ)
4030                /* CEQ irq routine, CEQ is pulse irq, not clear */
4031                int_work = hns_roce_v1_ceq_int(hr_dev, eq);
4032        else
4033                /* AEQ irq routine, AEQ is pulse irq, not clear */
4034                int_work = hns_roce_v1_aeq_int(hr_dev, eq);
4035
4036        return IRQ_RETVAL(int_work);
4037}
4038
4039static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
4040{
4041        struct hns_roce_dev *hr_dev = dev_id;
4042        struct device *dev = &hr_dev->pdev->dev;
4043        int int_work = 0;
4044        u32 caepaemask_val;
4045        u32 cealmovf_val;
4046        u32 caepaest_val;
4047        u32 aeshift_val;
4048        u32 ceshift_val;
4049        u32 cemask_val;
4050        __le32 tmp;
4051        int i;
4052
4053        /*
4054         * Abnormal interrupt:
4055         * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
4056         * interrupt, mask irq, clear irq, cancel mask operation
4057         */
4058        aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
4059        tmp = cpu_to_le32(aeshift_val);
4060
4061        /* AEQE overflow */
4062        if (roce_get_bit(tmp,
4063                ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
4064                dev_warn(dev, "AEQ overflow!\n");
4065
4066                /* Set mask */
4067                caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4068                tmp = cpu_to_le32(caepaemask_val);
4069                roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4070                             HNS_ROCE_INT_MASK_ENABLE);
4071                caepaemask_val = le32_to_cpu(tmp);
4072                roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4073
4074                /* Clear int state(INT_WC : write 1 clear) */
4075                caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
4076                tmp = cpu_to_le32(caepaest_val);
4077                roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
4078                caepaest_val = le32_to_cpu(tmp);
4079                roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
4080
4081                /* Clear mask */
4082                caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4083                tmp = cpu_to_le32(caepaemask_val);
4084                roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4085                             HNS_ROCE_INT_MASK_DISABLE);
4086                caepaemask_val = le32_to_cpu(tmp);
4087                roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4088        }
4089
4090        /* CEQ almost overflow */
4091        for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4092                ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
4093                                        i * CEQ_REG_OFFSET);
4094                tmp = cpu_to_le32(ceshift_val);
4095
4096                if (roce_get_bit(tmp,
4097                        ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
4098                        dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
4099                        int_work++;
4100
4101                        /* Set mask */
4102                        cemask_val = roce_read(hr_dev,
4103                                               ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4104                                               i * CEQ_REG_OFFSET);
4105                        tmp = cpu_to_le32(cemask_val);
4106                        roce_set_bit(tmp,
4107                                ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4108                                HNS_ROCE_INT_MASK_ENABLE);
4109                        cemask_val = le32_to_cpu(tmp);
4110                        roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4111                                   i * CEQ_REG_OFFSET, cemask_val);
4112
4113                        /* Clear int state(INT_WC : write 1 clear) */
4114                        cealmovf_val = roce_read(hr_dev,
4115                                       ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4116                                       i * CEQ_REG_OFFSET);
4117                        tmp = cpu_to_le32(cealmovf_val);
4118                        roce_set_bit(tmp,
4119                                     ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
4120                                     1);
4121                        cealmovf_val = le32_to_cpu(tmp);
4122                        roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4123                                   i * CEQ_REG_OFFSET, cealmovf_val);
4124
4125                        /* Clear mask */
4126                        cemask_val = roce_read(hr_dev,
4127                                     ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4128                                     i * CEQ_REG_OFFSET);
4129                        tmp = cpu_to_le32(cemask_val);
4130                        roce_set_bit(tmp,
4131                               ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4132                               HNS_ROCE_INT_MASK_DISABLE);
4133                        cemask_val = le32_to_cpu(tmp);
4134                        roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4135                                   i * CEQ_REG_OFFSET, cemask_val);
4136                }
4137        }
4138
4139        /* ECC multi-bit error alarm */
4140        dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
4141                 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
4142                 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
4143                 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
4144
4145        dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
4146                 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
4147                 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
4148                 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
4149
4150        return IRQ_RETVAL(int_work);
4151}
4152
4153static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
4154{
4155        u32 aemask_val;
4156        int masken = 0;
4157        __le32 tmp;
4158        int i;
4159
4160        /* AEQ INT */
4161        aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4162        tmp = cpu_to_le32(aemask_val);
4163        roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4164                     masken);
4165        roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
4166        aemask_val = le32_to_cpu(tmp);
4167        roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
4168
4169        /* CEQ INT */
4170        for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4171                /* IRQ mask */
4172                roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4173                           i * CEQ_REG_OFFSET, masken);
4174        }
4175}
4176
4177static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
4178                                struct hns_roce_eq *eq)
4179{
4180        int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
4181                      HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4182        int i;
4183
4184        if (!eq->buf_list)
4185                return;
4186
4187        for (i = 0; i < npages; ++i)
4188                dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
4189                                  eq->buf_list[i].buf, eq->buf_list[i].map);
4190
4191        kfree(eq->buf_list);
4192}
4193
4194static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
4195                                  int enable_flag)
4196{
4197        void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
4198        __le32 tmp;
4199        u32 val;
4200
4201        val = readl(eqc);
4202        tmp = cpu_to_le32(val);
4203
4204        if (enable_flag)
4205                roce_set_field(tmp,
4206                               ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4207                               ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4208                               HNS_ROCE_EQ_STAT_VALID);
4209        else
4210                roce_set_field(tmp,
4211                               ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4212                               ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4213                               HNS_ROCE_EQ_STAT_INVALID);
4214
4215        val = le32_to_cpu(tmp);
4216        writel(val, eqc);
4217}
4218
4219static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
4220                                 struct hns_roce_eq *eq)
4221{
4222        void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
4223        struct device *dev = &hr_dev->pdev->dev;
4224        dma_addr_t tmp_dma_addr;
4225        u32 eqconsindx_val = 0;
4226        u32 eqcuridx_val = 0;
4227        u32 eqshift_val = 0;
4228        __le32 tmp2 = 0;
4229        __le32 tmp1 = 0;
4230        __le32 tmp = 0;
4231        int num_bas;
4232        int ret;
4233        int i;
4234
4235        num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
4236                   HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4237
4238        if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
4239                dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
4240                        (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
4241                        num_bas);
4242                return -EINVAL;
4243        }
4244
4245        eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
4246        if (!eq->buf_list)
4247                return -ENOMEM;
4248
4249        for (i = 0; i < num_bas; ++i) {
4250                eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
4251                                                         &tmp_dma_addr,
4252                                                         GFP_KERNEL);
4253                if (!eq->buf_list[i].buf) {
4254                        ret = -ENOMEM;
4255                        goto err_out_free_pages;
4256                }
4257
4258                eq->buf_list[i].map = tmp_dma_addr;
4259        }
4260        eq->cons_index = 0;
4261        roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4262                       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4263                       HNS_ROCE_EQ_STAT_INVALID);
4264        roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
4265                       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
4266                       eq->log_entries);
4267        eqshift_val = le32_to_cpu(tmp);
4268        writel(eqshift_val, eqc);
4269
4270        /* Configure eq extended address 12~44bit */
4271        writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
4272
4273        /*
4274         * Configure eq extended address 45~49 bit.
4275         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
4276         * using 4K page, and shift more 32 because of
4277         * caculating the high 32 bit value evaluated to hardware.
4278         */
4279        roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
4280                       ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
4281                       eq->buf_list[0].map >> 44);
4282        roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
4283                       ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
4284        eqcuridx_val = le32_to_cpu(tmp1);
4285        writel(eqcuridx_val, eqc + 8);
4286
4287        /* Configure eq consumer index */
4288        roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
4289                       ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
4290        eqconsindx_val = le32_to_cpu(tmp2);
4291        writel(eqconsindx_val, eqc + 0xc);
4292
4293        return 0;
4294
4295err_out_free_pages:
4296        for (i -= 1; i >= 0; i--)
4297                dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
4298                                  eq->buf_list[i].map);
4299
4300        kfree(eq->buf_list);
4301        return ret;
4302}
4303
4304static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
4305{
4306        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4307        struct device *dev = &hr_dev->pdev->dev;
4308        struct hns_roce_eq *eq;
4309        int irq_num;
4310        int eq_num;
4311        int ret;
4312        int i, j;
4313
4314        eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4315        irq_num = eq_num + hr_dev->caps.num_other_vectors;
4316
4317        eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4318        if (!eq_table->eq)
4319                return -ENOMEM;
4320
4321        eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
4322                                     GFP_KERNEL);
4323        if (!eq_table->eqc_base) {
4324                ret = -ENOMEM;
4325                goto err_eqc_base_alloc_fail;
4326        }
4327
4328        for (i = 0; i < eq_num; i++) {
4329                eq = &eq_table->eq[i];
4330                eq->hr_dev = hr_dev;
4331                eq->eqn = i;
4332                eq->irq = hr_dev->irq[i];
4333                eq->log_page_size = PAGE_SHIFT;
4334
4335                if (i < hr_dev->caps.num_comp_vectors) {
4336                        /* CEQ */
4337                        eq_table->eqc_base[i] = hr_dev->reg_base +
4338                                                ROCEE_CAEP_CEQC_SHIFT_0_REG +
4339                                                CEQ_REG_OFFSET * i;
4340                        eq->type_flag = HNS_ROCE_CEQ;
4341                        eq->doorbell = hr_dev->reg_base +
4342                                       ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
4343                                       CEQ_REG_OFFSET * i;
4344                        eq->entries = hr_dev->caps.ceqe_depth;
4345                        eq->log_entries = ilog2(eq->entries);
4346                        eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4347                } else {
4348                        /* AEQ */
4349                        eq_table->eqc_base[i] = hr_dev->reg_base +
4350                                                ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
4351                        eq->type_flag = HNS_ROCE_AEQ;
4352                        eq->doorbell = hr_dev->reg_base +
4353                                       ROCEE_CAEP_AEQE_CONS_IDX_REG;
4354                        eq->entries = hr_dev->caps.aeqe_depth;
4355                        eq->log_entries = ilog2(eq->entries);
4356                        eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4357                }
4358        }
4359
4360        /* Disable irq */
4361        hns_roce_v1_int_mask_enable(hr_dev);
4362
4363        /* Configure ce int interval */
4364        roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
4365                   HNS_ROCE_CEQ_DEFAULT_INTERVAL);
4366
4367        /* Configure ce int burst num */
4368        roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
4369                   HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
4370
4371        for (i = 0; i < eq_num; i++) {
4372                ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
4373                if (ret) {
4374                        dev_err(dev, "eq create failed\n");
4375                        goto err_create_eq_fail;
4376                }
4377        }
4378
4379        for (j = 0; j < irq_num; j++) {
4380                if (j < eq_num)
4381                        ret = request_irq(hr_dev->irq[j],
4382                                          hns_roce_v1_msix_interrupt_eq, 0,
4383                                          hr_dev->irq_names[j],
4384                                          &eq_table->eq[j]);
4385                else
4386                        ret = request_irq(hr_dev->irq[j],
4387                                          hns_roce_v1_msix_interrupt_abn, 0,
4388                                          hr_dev->irq_names[j], hr_dev);
4389
4390                if (ret) {
4391                        dev_err(dev, "request irq error!\n");
4392                        goto err_request_irq_fail;
4393                }
4394        }
4395
4396        for (i = 0; i < eq_num; i++)
4397                hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
4398
4399        return 0;
4400
4401err_request_irq_fail:
4402        for (j -= 1; j >= 0; j--)
4403                free_irq(hr_dev->irq[j], &eq_table->eq[j]);
4404
4405err_create_eq_fail:
4406        for (i -= 1; i >= 0; i--)
4407                hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4408
4409        kfree(eq_table->eqc_base);
4410
4411err_eqc_base_alloc_fail:
4412        kfree(eq_table->eq);
4413
4414        return ret;
4415}
4416
4417static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4418{
4419        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4420        int irq_num;
4421        int eq_num;
4422        int i;
4423
4424        eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4425        irq_num = eq_num + hr_dev->caps.num_other_vectors;
4426        for (i = 0; i < eq_num; i++) {
4427                /* Disable EQ */
4428                hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
4429
4430                free_irq(hr_dev->irq[i], &eq_table->eq[i]);
4431
4432                hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4433        }
4434        for (i = eq_num; i < irq_num; i++)
4435                free_irq(hr_dev->irq[i], hr_dev);
4436
4437        kfree(eq_table->eqc_base);
4438        kfree(eq_table->eq);
4439}
4440
4441static const struct ib_device_ops hns_roce_v1_dev_ops = {
4442        .destroy_qp = hns_roce_v1_destroy_qp,
4443        .modify_cq = hns_roce_v1_modify_cq,
4444        .poll_cq = hns_roce_v1_poll_cq,
4445        .post_recv = hns_roce_v1_post_recv,
4446        .post_send = hns_roce_v1_post_send,
4447        .query_qp = hns_roce_v1_query_qp,
4448        .req_notify_cq = hns_roce_v1_req_notify_cq,
4449};
4450
4451static const struct hns_roce_hw hns_roce_hw_v1 = {
4452        .reset = hns_roce_v1_reset,
4453        .hw_profile = hns_roce_v1_profile,
4454        .hw_init = hns_roce_v1_init,
4455        .hw_exit = hns_roce_v1_exit,
4456        .post_mbox = hns_roce_v1_post_mbox,
4457        .chk_mbox = hns_roce_v1_chk_mbox,
4458        .set_gid = hns_roce_v1_set_gid,
4459        .set_mac = hns_roce_v1_set_mac,
4460        .set_mtu = hns_roce_v1_set_mtu,
4461        .write_mtpt = hns_roce_v1_write_mtpt,
4462        .write_cqc = hns_roce_v1_write_cqc,
4463        .modify_cq = hns_roce_v1_modify_cq,
4464        .clear_hem = hns_roce_v1_clear_hem,
4465        .modify_qp = hns_roce_v1_modify_qp,
4466        .query_qp = hns_roce_v1_query_qp,
4467        .destroy_qp = hns_roce_v1_destroy_qp,
4468        .post_send = hns_roce_v1_post_send,
4469        .post_recv = hns_roce_v1_post_recv,
4470        .req_notify_cq = hns_roce_v1_req_notify_cq,
4471        .poll_cq = hns_roce_v1_poll_cq,
4472        .dereg_mr = hns_roce_v1_dereg_mr,
4473        .destroy_cq = hns_roce_v1_destroy_cq,
4474        .init_eq = hns_roce_v1_init_eq_table,
4475        .cleanup_eq = hns_roce_v1_cleanup_eq_table,
4476        .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
4477};
4478
4479static const struct of_device_id hns_roce_of_match[] = {
4480        { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
4481        {},
4482};
4483MODULE_DEVICE_TABLE(of, hns_roce_of_match);
4484
4485static const struct acpi_device_id hns_roce_acpi_match[] = {
4486        { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
4487        {},
4488};
4489MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
4490
4491static struct
4492platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
4493{
4494        struct device *dev;
4495
4496        /* get the 'device' corresponding to the matching 'fwnode' */
4497        dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
4498        /* get the platform device */
4499        return dev ? to_platform_device(dev) : NULL;
4500}
4501
4502static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
4503{
4504        struct device *dev = &hr_dev->pdev->dev;
4505        struct platform_device *pdev = NULL;
4506        struct net_device *netdev = NULL;
4507        struct device_node *net_node;
4508        int port_cnt = 0;
4509        u8 phy_port;
4510        int ret;
4511        int i;
4512
4513        /* check if we are compatible with the underlying SoC */
4514        if (dev_of_node(dev)) {
4515                const struct of_device_id *of_id;
4516
4517                of_id = of_match_node(hns_roce_of_match, dev->of_node);
4518                if (!of_id) {
4519                        dev_err(dev, "device is not compatible!\n");
4520                        return -ENXIO;
4521                }
4522                hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
4523                if (!hr_dev->hw) {
4524                        dev_err(dev, "couldn't get H/W specific DT data!\n");
4525                        return -ENXIO;
4526                }
4527        } else if (is_acpi_device_node(dev->fwnode)) {
4528                const struct acpi_device_id *acpi_id;
4529
4530                acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
4531                if (!acpi_id) {
4532                        dev_err(dev, "device is not compatible!\n");
4533                        return -ENXIO;
4534                }
4535                hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
4536                if (!hr_dev->hw) {
4537                        dev_err(dev, "couldn't get H/W specific ACPI data!\n");
4538                        return -ENXIO;
4539                }
4540        } else {
4541                dev_err(dev, "can't read compatibility data from DT or ACPI\n");
4542                return -ENXIO;
4543        }
4544
4545        /* get the mapped register base address */
4546        hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
4547        if (IS_ERR(hr_dev->reg_base))
4548                return PTR_ERR(hr_dev->reg_base);
4549
4550        /* read the node_guid of IB device from the DT or ACPI */
4551        ret = device_property_read_u8_array(dev, "node-guid",
4552                                            (u8 *)&hr_dev->ib_dev.node_guid,
4553                                            GUID_LEN);
4554        if (ret) {
4555                dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
4556                return ret;
4557        }
4558
4559        /* get the RoCE associated ethernet ports or netdevices */
4560        for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
4561                if (dev_of_node(dev)) {
4562                        net_node = of_parse_phandle(dev->of_node, "eth-handle",
4563                                                    i);
4564                        if (!net_node)
4565                                continue;
4566                        pdev = of_find_device_by_node(net_node);
4567                } else if (is_acpi_device_node(dev->fwnode)) {
4568                        struct fwnode_reference_args args;
4569
4570                        ret = acpi_node_get_property_reference(dev->fwnode,
4571                                                               "eth-handle",
4572                                                               i, &args);
4573                        if (ret)
4574                                continue;
4575                        pdev = hns_roce_find_pdev(args.fwnode);
4576                } else {
4577                        dev_err(dev, "cannot read data from DT or ACPI\n");
4578                        return -ENXIO;
4579                }
4580
4581                if (pdev) {
4582                        netdev = platform_get_drvdata(pdev);
4583                        phy_port = (u8)i;
4584                        if (netdev) {
4585                                hr_dev->iboe.netdevs[port_cnt] = netdev;
4586                                hr_dev->iboe.phy_port[port_cnt] = phy_port;
4587                        } else {
4588                                dev_err(dev, "no netdev found with pdev %s\n",
4589                                        pdev->name);
4590                                return -ENODEV;
4591                        }
4592                        port_cnt++;
4593                }
4594        }
4595
4596        if (port_cnt == 0) {
4597                dev_err(dev, "unable to get eth-handle for available ports!\n");
4598                return -EINVAL;
4599        }
4600
4601        hr_dev->caps.num_ports = port_cnt;
4602
4603        /* cmd issue mode: 0 is poll, 1 is event */
4604        hr_dev->cmd_mod = 1;
4605        hr_dev->loop_idc = 0;
4606        hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4607        hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
4608
4609        /* read the interrupt names from the DT or ACPI */
4610        ret = device_property_read_string_array(dev, "interrupt-names",
4611                                                hr_dev->irq_names,
4612                                                HNS_ROCE_V1_MAX_IRQ_NUM);
4613        if (ret < 0) {
4614                dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
4615                return ret;
4616        }
4617
4618        /* fetch the interrupt numbers */
4619        for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
4620                hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
4621                if (hr_dev->irq[i] <= 0)
4622                        return -EINVAL;
4623        }
4624
4625        return 0;
4626}
4627
4628/**
4629 * hns_roce_probe - RoCE driver entrance
4630 * @pdev: pointer to platform device
4631 * Return : int
4632 *
4633 */
4634static int hns_roce_probe(struct platform_device *pdev)
4635{
4636        int ret;
4637        struct hns_roce_dev *hr_dev;
4638        struct device *dev = &pdev->dev;
4639
4640        hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
4641        if (!hr_dev)
4642                return -ENOMEM;
4643
4644        hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
4645        if (!hr_dev->priv) {
4646                ret = -ENOMEM;
4647                goto error_failed_kzalloc;
4648        }
4649
4650        hr_dev->pdev = pdev;
4651        hr_dev->dev = dev;
4652        platform_set_drvdata(pdev, hr_dev);
4653
4654        if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
4655            dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
4656                dev_err(dev, "Not usable DMA addressing mode\n");
4657                ret = -EIO;
4658                goto error_failed_get_cfg;
4659        }
4660
4661        ret = hns_roce_get_cfg(hr_dev);
4662        if (ret) {
4663                dev_err(dev, "Get Configuration failed!\n");
4664                goto error_failed_get_cfg;
4665        }
4666
4667        ret = hns_roce_init(hr_dev);
4668        if (ret) {
4669                dev_err(dev, "RoCE engine init failed!\n");
4670                goto error_failed_get_cfg;
4671        }
4672
4673        return 0;
4674
4675error_failed_get_cfg:
4676        kfree(hr_dev->priv);
4677
4678error_failed_kzalloc:
4679        ib_dealloc_device(&hr_dev->ib_dev);
4680
4681        return ret;
4682}
4683
4684/**
4685 * hns_roce_remove - remove RoCE device
4686 * @pdev: pointer to platform device
4687 */
4688static int hns_roce_remove(struct platform_device *pdev)
4689{
4690        struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
4691
4692        hns_roce_exit(hr_dev);
4693        kfree(hr_dev->priv);
4694        ib_dealloc_device(&hr_dev->ib_dev);
4695
4696        return 0;
4697}
4698
4699static struct platform_driver hns_roce_driver = {
4700        .probe = hns_roce_probe,
4701        .remove = hns_roce_remove,
4702        .driver = {
4703                .name = DRV_NAME,
4704                .of_match_table = hns_roce_of_match,
4705                .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
4706        },
4707};
4708
4709module_platform_driver(hns_roce_driver);
4710
4711MODULE_LICENSE("Dual BSD/GPL");
4712MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4713MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
4714MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4715MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
4716