linux/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Hisilicon Limited.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/platform_device.h>
  34#include <linux/acpi.h>
  35#include <linux/etherdevice.h>
  36#include <linux/interrupt.h>
  37#include <linux/of.h>
  38#include <linux/of_platform.h>
  39#include <rdma/ib_umem.h>
  40#include "hns_roce_common.h"
  41#include "hns_roce_device.h"
  42#include "hns_roce_cmd.h"
  43#include "hns_roce_hem.h"
  44#include "hns_roce_hw_v1.h"
  45
  46static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
  47{
  48        dseg->lkey = cpu_to_le32(sg->lkey);
  49        dseg->addr = cpu_to_le64(sg->addr);
  50        dseg->len  = cpu_to_le32(sg->length);
  51}
  52
  53static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
  54                          u32 rkey)
  55{
  56        rseg->raddr = cpu_to_le64(remote_addr);
  57        rseg->rkey  = cpu_to_le32(rkey);
  58        rseg->len   = 0;
  59}
  60
  61static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
  62                                 struct ib_send_wr **bad_wr)
  63{
  64        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  65        struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
  66        struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
  67        struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
  68        struct hns_roce_wqe_data_seg *dseg = NULL;
  69        struct hns_roce_qp *qp = to_hr_qp(ibqp);
  70        struct device *dev = &hr_dev->pdev->dev;
  71        struct hns_roce_sq_db sq_db;
  72        int ps_opcode = 0, i = 0;
  73        unsigned long flags = 0;
  74        void *wqe = NULL;
  75        u32 doorbell[2];
  76        int nreq = 0;
  77        u32 ind = 0;
  78        int ret = 0;
  79        u8 *smac;
  80        int loopback;
  81
  82        if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
  83                ibqp->qp_type != IB_QPT_RC)) {
  84                dev_err(dev, "un-supported QP type\n");
  85                *bad_wr = NULL;
  86                return -EOPNOTSUPP;
  87        }
  88
  89        spin_lock_irqsave(&qp->sq.lock, flags);
  90        ind = qp->sq_next_wqe;
  91        for (nreq = 0; wr; ++nreq, wr = wr->next) {
  92                if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
  93                        ret = -ENOMEM;
  94                        *bad_wr = wr;
  95                        goto out;
  96                }
  97
  98                if (unlikely(wr->num_sge > qp->sq.max_gs)) {
  99                        dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
 100                                wr->num_sge, qp->sq.max_gs);
 101                        ret = -EINVAL;
 102                        *bad_wr = wr;
 103                        goto out;
 104                }
 105
 106                wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
 107                qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
 108                                                                      wr->wr_id;
 109
 110                /* Corresponding to the RC and RD type wqe process separately */
 111                if (ibqp->qp_type == IB_QPT_GSI) {
 112                        ud_sq_wqe = wqe;
 113                        roce_set_field(ud_sq_wqe->dmac_h,
 114                                       UD_SEND_WQE_U32_4_DMAC_0_M,
 115                                       UD_SEND_WQE_U32_4_DMAC_0_S,
 116                                       ah->av.mac[0]);
 117                        roce_set_field(ud_sq_wqe->dmac_h,
 118                                       UD_SEND_WQE_U32_4_DMAC_1_M,
 119                                       UD_SEND_WQE_U32_4_DMAC_1_S,
 120                                       ah->av.mac[1]);
 121                        roce_set_field(ud_sq_wqe->dmac_h,
 122                                       UD_SEND_WQE_U32_4_DMAC_2_M,
 123                                       UD_SEND_WQE_U32_4_DMAC_2_S,
 124                                       ah->av.mac[2]);
 125                        roce_set_field(ud_sq_wqe->dmac_h,
 126                                       UD_SEND_WQE_U32_4_DMAC_3_M,
 127                                       UD_SEND_WQE_U32_4_DMAC_3_S,
 128                                       ah->av.mac[3]);
 129
 130                        roce_set_field(ud_sq_wqe->u32_8,
 131                                       UD_SEND_WQE_U32_8_DMAC_4_M,
 132                                       UD_SEND_WQE_U32_8_DMAC_4_S,
 133                                       ah->av.mac[4]);
 134                        roce_set_field(ud_sq_wqe->u32_8,
 135                                       UD_SEND_WQE_U32_8_DMAC_5_M,
 136                                       UD_SEND_WQE_U32_8_DMAC_5_S,
 137                                       ah->av.mac[5]);
 138
 139                        smac = (u8 *)hr_dev->dev_addr[qp->port];
 140                        loopback = ether_addr_equal_unaligned(ah->av.mac,
 141                                                              smac) ? 1 : 0;
 142                        roce_set_bit(ud_sq_wqe->u32_8,
 143                                     UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
 144                                     loopback);
 145
 146                        roce_set_field(ud_sq_wqe->u32_8,
 147                                       UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
 148                                       UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
 149                                       HNS_ROCE_WQE_OPCODE_SEND);
 150                        roce_set_field(ud_sq_wqe->u32_8,
 151                                       UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
 152                                       UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
 153                                       2);
 154                        roce_set_bit(ud_sq_wqe->u32_8,
 155                                UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
 156                                1);
 157
 158                        ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
 159                                cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
 160                                (wr->send_flags & IB_SEND_SOLICITED ?
 161                                cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
 162                                ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
 163                                cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
 164
 165                        roce_set_field(ud_sq_wqe->u32_16,
 166                                       UD_SEND_WQE_U32_16_DEST_QP_M,
 167                                       UD_SEND_WQE_U32_16_DEST_QP_S,
 168                                       ud_wr(wr)->remote_qpn);
 169                        roce_set_field(ud_sq_wqe->u32_16,
 170                                       UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
 171                                       UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
 172                                       ah->av.stat_rate);
 173
 174                        roce_set_field(ud_sq_wqe->u32_36,
 175                                       UD_SEND_WQE_U32_36_FLOW_LABEL_M,
 176                                       UD_SEND_WQE_U32_36_FLOW_LABEL_S, 0);
 177                        roce_set_field(ud_sq_wqe->u32_36,
 178                                       UD_SEND_WQE_U32_36_PRIORITY_M,
 179                                       UD_SEND_WQE_U32_36_PRIORITY_S,
 180                                       ah->av.sl_tclass_flowlabel >>
 181                                       HNS_ROCE_SL_SHIFT);
 182                        roce_set_field(ud_sq_wqe->u32_36,
 183                                       UD_SEND_WQE_U32_36_SGID_INDEX_M,
 184                                       UD_SEND_WQE_U32_36_SGID_INDEX_S,
 185                                       hns_get_gid_index(hr_dev, qp->phy_port,
 186                                                         ah->av.gid_index));
 187
 188                        roce_set_field(ud_sq_wqe->u32_40,
 189                                       UD_SEND_WQE_U32_40_HOP_LIMIT_M,
 190                                       UD_SEND_WQE_U32_40_HOP_LIMIT_S,
 191                                       ah->av.hop_limit);
 192                        roce_set_field(ud_sq_wqe->u32_40,
 193                                       UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
 194                                       UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, 0);
 195
 196                        memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
 197
 198                        ud_sq_wqe->va0_l =
 199                                       cpu_to_le32((u32)wr->sg_list[0].addr);
 200                        ud_sq_wqe->va0_h =
 201                                       cpu_to_le32((wr->sg_list[0].addr) >> 32);
 202                        ud_sq_wqe->l_key0 =
 203                                       cpu_to_le32(wr->sg_list[0].lkey);
 204
 205                        ud_sq_wqe->va1_l =
 206                                       cpu_to_le32((u32)wr->sg_list[1].addr);
 207                        ud_sq_wqe->va1_h =
 208                                       cpu_to_le32((wr->sg_list[1].addr) >> 32);
 209                        ud_sq_wqe->l_key1 =
 210                                       cpu_to_le32(wr->sg_list[1].lkey);
 211                        ind++;
 212                } else if (ibqp->qp_type == IB_QPT_RC) {
 213                        u32 tmp_len = 0;
 214
 215                        ctrl = wqe;
 216                        memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
 217                        for (i = 0; i < wr->num_sge; i++)
 218                                tmp_len += wr->sg_list[i].length;
 219
 220                        ctrl->msg_length =
 221                          cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
 222
 223                        ctrl->sgl_pa_h = 0;
 224                        ctrl->flag = 0;
 225
 226                        switch (wr->opcode) {
 227                        case IB_WR_SEND_WITH_IMM:
 228                        case IB_WR_RDMA_WRITE_WITH_IMM:
 229                                ctrl->imm_data = wr->ex.imm_data;
 230                                break;
 231                        case IB_WR_SEND_WITH_INV:
 232                                ctrl->inv_key =
 233                                        cpu_to_le32(wr->ex.invalidate_rkey);
 234                                break;
 235                        default:
 236                                ctrl->imm_data = 0;
 237                                break;
 238                        }
 239
 240                        /*Ctrl field, ctrl set type: sig, solic, imm, fence */
 241                        /* SO wait for conforming application scenarios */
 242                        ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
 243                                      cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
 244                                      (wr->send_flags & IB_SEND_SOLICITED ?
 245                                      cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
 246                                      ((wr->opcode == IB_WR_SEND_WITH_IMM ||
 247                                      wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
 248                                      cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
 249                                      (wr->send_flags & IB_SEND_FENCE ?
 250                                      (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
 251
 252                        wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
 253
 254                        switch (wr->opcode) {
 255                        case IB_WR_RDMA_READ:
 256                                ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
 257                                set_raddr_seg(wqe,  rdma_wr(wr)->remote_addr,
 258                                               rdma_wr(wr)->rkey);
 259                                break;
 260                        case IB_WR_RDMA_WRITE:
 261                        case IB_WR_RDMA_WRITE_WITH_IMM:
 262                                ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
 263                                set_raddr_seg(wqe,  rdma_wr(wr)->remote_addr,
 264                                              rdma_wr(wr)->rkey);
 265                                break;
 266                        case IB_WR_SEND:
 267                        case IB_WR_SEND_WITH_INV:
 268                        case IB_WR_SEND_WITH_IMM:
 269                                ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
 270                                break;
 271                        case IB_WR_LOCAL_INV:
 272                                break;
 273                        case IB_WR_ATOMIC_CMP_AND_SWP:
 274                        case IB_WR_ATOMIC_FETCH_AND_ADD:
 275                        case IB_WR_LSO:
 276                        default:
 277                                ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
 278                                break;
 279                        }
 280                        ctrl->flag |= cpu_to_le32(ps_opcode);
 281                        wqe += sizeof(struct hns_roce_wqe_raddr_seg);
 282
 283                        dseg = wqe;
 284                        if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
 285                                if (le32_to_cpu(ctrl->msg_length) >
 286                                    hr_dev->caps.max_sq_inline) {
 287                                        ret = -EINVAL;
 288                                        *bad_wr = wr;
 289                                        dev_err(dev, "inline len(1-%d)=%d, illegal",
 290                                                ctrl->msg_length,
 291                                                hr_dev->caps.max_sq_inline);
 292                                        goto out;
 293                                }
 294                                for (i = 0; i < wr->num_sge; i++) {
 295                                        memcpy(wqe, ((void *) (uintptr_t)
 296                                               wr->sg_list[i].addr),
 297                                               wr->sg_list[i].length);
 298                                        wqe += wr->sg_list[i].length;
 299                                }
 300                                ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
 301                        } else {
 302                                /*sqe num is two */
 303                                for (i = 0; i < wr->num_sge; i++)
 304                                        set_data_seg(dseg + i, wr->sg_list + i);
 305
 306                                ctrl->flag |= cpu_to_le32(wr->num_sge <<
 307                                              HNS_ROCE_WQE_SGE_NUM_BIT);
 308                        }
 309                        ind++;
 310                }
 311        }
 312
 313out:
 314        /* Set DB return */
 315        if (likely(nreq)) {
 316                qp->sq.head += nreq;
 317                /* Memory barrier */
 318                wmb();
 319
 320                sq_db.u32_4 = 0;
 321                sq_db.u32_8 = 0;
 322                roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
 323                               SQ_DOORBELL_U32_4_SQ_HEAD_S,
 324                              (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
 325                roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
 326                               SQ_DOORBELL_U32_4_SL_S, qp->sl);
 327                roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
 328                               SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
 329                roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
 330                               SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
 331                roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
 332
 333                doorbell[0] = le32_to_cpu(sq_db.u32_4);
 334                doorbell[1] = le32_to_cpu(sq_db.u32_8);
 335
 336                hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
 337                qp->sq_next_wqe = ind;
 338        }
 339
 340        spin_unlock_irqrestore(&qp->sq.lock, flags);
 341
 342        return ret;
 343}
 344
 345static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 346                                 struct ib_recv_wr **bad_wr)
 347{
 348        int ret = 0;
 349        int nreq = 0;
 350        int ind = 0;
 351        int i = 0;
 352        u32 reg_val = 0;
 353        unsigned long flags = 0;
 354        struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
 355        struct hns_roce_wqe_data_seg *scat = NULL;
 356        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 357        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 358        struct device *dev = &hr_dev->pdev->dev;
 359        struct hns_roce_rq_db rq_db;
 360        uint32_t doorbell[2] = {0};
 361
 362        spin_lock_irqsave(&hr_qp->rq.lock, flags);
 363        ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
 364
 365        for (nreq = 0; wr; ++nreq, wr = wr->next) {
 366                if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
 367                        hr_qp->ibqp.recv_cq)) {
 368                        ret = -ENOMEM;
 369                        *bad_wr = wr;
 370                        goto out;
 371                }
 372
 373                if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
 374                        dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
 375                                wr->num_sge, hr_qp->rq.max_gs);
 376                        ret = -EINVAL;
 377                        *bad_wr = wr;
 378                        goto out;
 379                }
 380
 381                ctrl = get_recv_wqe(hr_qp, ind);
 382
 383                roce_set_field(ctrl->rwqe_byte_12,
 384                               RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
 385                               RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
 386                               wr->num_sge);
 387
 388                scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
 389
 390                for (i = 0; i < wr->num_sge; i++)
 391                        set_data_seg(scat + i, wr->sg_list + i);
 392
 393                hr_qp->rq.wrid[ind] = wr->wr_id;
 394
 395                ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
 396        }
 397
 398out:
 399        if (likely(nreq)) {
 400                hr_qp->rq.head += nreq;
 401                /* Memory barrier */
 402                wmb();
 403
 404                if (ibqp->qp_type == IB_QPT_GSI) {
 405                        /* SW update GSI rq header */
 406                        reg_val = roce_read(to_hr_dev(ibqp->device),
 407                                            ROCEE_QP1C_CFG3_0_REG +
 408                                            QP1C_CFGN_OFFSET * hr_qp->phy_port);
 409                        roce_set_field(reg_val,
 410                                       ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
 411                                       ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
 412                                       hr_qp->rq.head);
 413                        roce_write(to_hr_dev(ibqp->device),
 414                                   ROCEE_QP1C_CFG3_0_REG +
 415                                   QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
 416                } else {
 417                        rq_db.u32_4 = 0;
 418                        rq_db.u32_8 = 0;
 419
 420                        roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
 421                                       RQ_DOORBELL_U32_4_RQ_HEAD_S,
 422                                       hr_qp->rq.head);
 423                        roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
 424                                       RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
 425                        roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
 426                                       RQ_DOORBELL_U32_8_CMD_S, 1);
 427                        roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
 428                                     1);
 429
 430                        doorbell[0] = le32_to_cpu(rq_db.u32_4);
 431                        doorbell[1] = le32_to_cpu(rq_db.u32_8);
 432
 433                        hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
 434                }
 435        }
 436        spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
 437
 438        return ret;
 439}
 440
 441static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
 442                                       int sdb_mode, int odb_mode)
 443{
 444        u32 val;
 445
 446        val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
 447        roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
 448        roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
 449        roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
 450}
 451
 452static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
 453                                     u32 odb_mode)
 454{
 455        u32 val;
 456
 457        /* Configure SDB/ODB extend mode */
 458        val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
 459        roce_set_bit(val, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
 460        roce_set_bit(val, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
 461        roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
 462}
 463
 464static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
 465                             u32 sdb_alful)
 466{
 467        u32 val;
 468
 469        /* Configure SDB */
 470        val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
 471        roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
 472                       ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
 473        roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
 474                       ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
 475        roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
 476}
 477
 478static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
 479                             u32 odb_alful)
 480{
 481        u32 val;
 482
 483        /* Configure ODB */
 484        val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
 485        roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
 486                       ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
 487        roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
 488                       ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
 489        roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
 490}
 491
 492static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
 493                                 u32 ext_sdb_alful)
 494{
 495        struct device *dev = &hr_dev->pdev->dev;
 496        struct hns_roce_v1_priv *priv;
 497        struct hns_roce_db_table *db;
 498        dma_addr_t sdb_dma_addr;
 499        u32 val;
 500
 501        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 502        db = &priv->db_table;
 503
 504        /* Configure extend SDB threshold */
 505        roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
 506        roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
 507
 508        /* Configure extend SDB base addr */
 509        sdb_dma_addr = db->ext_db->sdb_buf_list->map;
 510        roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
 511
 512        /* Configure extend SDB depth */
 513        val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
 514        roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
 515                       ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
 516                       db->ext_db->esdb_dep);
 517        /*
 518         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
 519         * using 4K page, and shift more 32 because of
 520         * caculating the high 32 bit value evaluated to hardware.
 521         */
 522        roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
 523                       ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
 524        roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
 525
 526        dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
 527        dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
 528                ext_sdb_alept, ext_sdb_alful);
 529}
 530
 531static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
 532                                 u32 ext_odb_alful)
 533{
 534        struct device *dev = &hr_dev->pdev->dev;
 535        struct hns_roce_v1_priv *priv;
 536        struct hns_roce_db_table *db;
 537        dma_addr_t odb_dma_addr;
 538        u32 val;
 539
 540        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 541        db = &priv->db_table;
 542
 543        /* Configure extend ODB threshold */
 544        roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
 545        roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
 546
 547        /* Configure extend ODB base addr */
 548        odb_dma_addr = db->ext_db->odb_buf_list->map;
 549        roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
 550
 551        /* Configure extend ODB depth */
 552        val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
 553        roce_set_field(val, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
 554                       ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
 555                       db->ext_db->eodb_dep);
 556        roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
 557                       ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
 558                       db->ext_db->eodb_dep);
 559        roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
 560
 561        dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
 562        dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
 563                ext_odb_alept, ext_odb_alful);
 564}
 565
 566static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
 567                                u32 odb_ext_mod)
 568{
 569        struct device *dev = &hr_dev->pdev->dev;
 570        struct hns_roce_v1_priv *priv;
 571        struct hns_roce_db_table *db;
 572        dma_addr_t sdb_dma_addr;
 573        dma_addr_t odb_dma_addr;
 574        int ret = 0;
 575
 576        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 577        db = &priv->db_table;
 578
 579        db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
 580        if (!db->ext_db)
 581                return -ENOMEM;
 582
 583        if (sdb_ext_mod) {
 584                db->ext_db->sdb_buf_list = kmalloc(
 585                                sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
 586                if (!db->ext_db->sdb_buf_list) {
 587                        ret = -ENOMEM;
 588                        goto ext_sdb_buf_fail_out;
 589                }
 590
 591                db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
 592                                                     HNS_ROCE_V1_EXT_SDB_SIZE,
 593                                                     &sdb_dma_addr, GFP_KERNEL);
 594                if (!db->ext_db->sdb_buf_list->buf) {
 595                        ret = -ENOMEM;
 596                        goto alloc_sq_db_buf_fail;
 597                }
 598                db->ext_db->sdb_buf_list->map = sdb_dma_addr;
 599
 600                db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
 601                hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
 602                                     HNS_ROCE_V1_EXT_SDB_ALFUL);
 603        } else
 604                hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
 605                                 HNS_ROCE_V1_SDB_ALFUL);
 606
 607        if (odb_ext_mod) {
 608                db->ext_db->odb_buf_list = kmalloc(
 609                                sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
 610                if (!db->ext_db->odb_buf_list) {
 611                        ret = -ENOMEM;
 612                        goto ext_odb_buf_fail_out;
 613                }
 614
 615                db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
 616                                                     HNS_ROCE_V1_EXT_ODB_SIZE,
 617                                                     &odb_dma_addr, GFP_KERNEL);
 618                if (!db->ext_db->odb_buf_list->buf) {
 619                        ret = -ENOMEM;
 620                        goto alloc_otr_db_buf_fail;
 621                }
 622                db->ext_db->odb_buf_list->map = odb_dma_addr;
 623
 624                db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
 625                hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
 626                                     HNS_ROCE_V1_EXT_ODB_ALFUL);
 627        } else
 628                hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
 629                                 HNS_ROCE_V1_ODB_ALFUL);
 630
 631        hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
 632
 633        return 0;
 634
 635alloc_otr_db_buf_fail:
 636        kfree(db->ext_db->odb_buf_list);
 637
 638ext_odb_buf_fail_out:
 639        if (sdb_ext_mod) {
 640                dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
 641                                  db->ext_db->sdb_buf_list->buf,
 642                                  db->ext_db->sdb_buf_list->map);
 643        }
 644
 645alloc_sq_db_buf_fail:
 646        if (sdb_ext_mod)
 647                kfree(db->ext_db->sdb_buf_list);
 648
 649ext_sdb_buf_fail_out:
 650        kfree(db->ext_db);
 651        return ret;
 652}
 653
 654static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
 655                                                    struct ib_pd *pd)
 656{
 657        struct device *dev = &hr_dev->pdev->dev;
 658        struct ib_qp_init_attr init_attr;
 659        struct ib_qp *qp;
 660
 661        memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
 662        init_attr.qp_type               = IB_QPT_RC;
 663        init_attr.sq_sig_type           = IB_SIGNAL_ALL_WR;
 664        init_attr.cap.max_recv_wr       = HNS_ROCE_MIN_WQE_NUM;
 665        init_attr.cap.max_send_wr       = HNS_ROCE_MIN_WQE_NUM;
 666
 667        qp = hns_roce_create_qp(pd, &init_attr, NULL);
 668        if (IS_ERR(qp)) {
 669                dev_err(dev, "Create loop qp for mr free failed!");
 670                return NULL;
 671        }
 672
 673        return to_hr_qp(qp);
 674}
 675
 676static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
 677{
 678        struct hns_roce_caps *caps = &hr_dev->caps;
 679        struct device *dev = &hr_dev->pdev->dev;
 680        struct ib_cq_init_attr cq_init_attr;
 681        struct hns_roce_free_mr *free_mr;
 682        struct ib_qp_attr attr = { 0 };
 683        struct hns_roce_v1_priv *priv;
 684        struct hns_roce_qp *hr_qp;
 685        struct ib_cq *cq;
 686        struct ib_pd *pd;
 687        union ib_gid dgid;
 688        u64 subnet_prefix;
 689        int attr_mask = 0;
 690        int i, j;
 691        int ret;
 692        u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
 693        u8 phy_port;
 694        u8 port = 0;
 695        u8 sl;
 696
 697        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 698        free_mr = &priv->free_mr;
 699
 700        /* Reserved cq for loop qp */
 701        cq_init_attr.cqe                = HNS_ROCE_MIN_WQE_NUM * 2;
 702        cq_init_attr.comp_vector        = 0;
 703        cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
 704        if (IS_ERR(cq)) {
 705                dev_err(dev, "Create cq for reseved loop qp failed!");
 706                return -ENOMEM;
 707        }
 708        free_mr->mr_free_cq = to_hr_cq(cq);
 709        free_mr->mr_free_cq->ib_cq.device               = &hr_dev->ib_dev;
 710        free_mr->mr_free_cq->ib_cq.uobject              = NULL;
 711        free_mr->mr_free_cq->ib_cq.comp_handler         = NULL;
 712        free_mr->mr_free_cq->ib_cq.event_handler        = NULL;
 713        free_mr->mr_free_cq->ib_cq.cq_context           = NULL;
 714        atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
 715
 716        pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
 717        if (IS_ERR(pd)) {
 718                dev_err(dev, "Create pd for reseved loop qp failed!");
 719                ret = -ENOMEM;
 720                goto alloc_pd_failed;
 721        }
 722        free_mr->mr_free_pd = to_hr_pd(pd);
 723        free_mr->mr_free_pd->ibpd.device  = &hr_dev->ib_dev;
 724        free_mr->mr_free_pd->ibpd.uobject = NULL;
 725        free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
 726        atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
 727
 728        attr.qp_access_flags    = IB_ACCESS_REMOTE_WRITE;
 729        attr.pkey_index         = 0;
 730        attr.min_rnr_timer      = 0;
 731        /* Disable read ability */
 732        attr.max_dest_rd_atomic = 0;
 733        attr.max_rd_atomic      = 0;
 734        /* Use arbitrary values as rq_psn and sq_psn */
 735        attr.rq_psn             = 0x0808;
 736        attr.sq_psn             = 0x0808;
 737        attr.retry_cnt          = 7;
 738        attr.rnr_retry          = 7;
 739        attr.timeout            = 0x12;
 740        attr.path_mtu           = IB_MTU_256;
 741        attr.ah_attr.type       = RDMA_AH_ATTR_TYPE_ROCE;
 742        rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
 743        rdma_ah_set_static_rate(&attr.ah_attr, 3);
 744
 745        subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
 746        for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
 747                phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
 748                                (i % HNS_ROCE_MAX_PORTS);
 749                sl = i / HNS_ROCE_MAX_PORTS;
 750
 751                for (j = 0; j < caps->num_ports; j++) {
 752                        if (hr_dev->iboe.phy_port[j] == phy_port) {
 753                                queue_en[i] = 1;
 754                                port = j;
 755                                break;
 756                        }
 757                }
 758
 759                if (!queue_en[i])
 760                        continue;
 761
 762                free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
 763                if (!free_mr->mr_free_qp[i]) {
 764                        dev_err(dev, "Create loop qp failed!\n");
 765                        goto create_lp_qp_failed;
 766                }
 767                hr_qp = free_mr->mr_free_qp[i];
 768
 769                hr_qp->port             = port;
 770                hr_qp->phy_port         = phy_port;
 771                hr_qp->ibqp.qp_type     = IB_QPT_RC;
 772                hr_qp->ibqp.device      = &hr_dev->ib_dev;
 773                hr_qp->ibqp.uobject     = NULL;
 774                atomic_set(&hr_qp->ibqp.usecnt, 0);
 775                hr_qp->ibqp.pd          = pd;
 776                hr_qp->ibqp.recv_cq     = cq;
 777                hr_qp->ibqp.send_cq     = cq;
 778
 779                rdma_ah_set_port_num(&attr.ah_attr, port + 1);
 780                rdma_ah_set_sl(&attr.ah_attr, sl);
 781                attr.port_num           = port + 1;
 782
 783                attr.dest_qp_num        = hr_qp->qpn;
 784                memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
 785                       hr_dev->dev_addr[port],
 786                       MAC_ADDR_OCTET_NUM);
 787
 788                memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
 789                memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
 790                memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
 791                dgid.raw[11] = 0xff;
 792                dgid.raw[12] = 0xfe;
 793                dgid.raw[8] ^= 2;
 794                rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
 795
 796                ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
 797                                            IB_QPS_RESET, IB_QPS_INIT);
 798                if (ret) {
 799                        dev_err(dev, "modify qp failed(%d)!\n", ret);
 800                        goto create_lp_qp_failed;
 801                }
 802
 803                ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
 804                                            IB_QPS_INIT, IB_QPS_RTR);
 805                if (ret) {
 806                        dev_err(dev, "modify qp failed(%d)!\n", ret);
 807                        goto create_lp_qp_failed;
 808                }
 809
 810                ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
 811                                            IB_QPS_RTR, IB_QPS_RTS);
 812                if (ret) {
 813                        dev_err(dev, "modify qp failed(%d)!\n", ret);
 814                        goto create_lp_qp_failed;
 815                }
 816        }
 817
 818        return 0;
 819
 820create_lp_qp_failed:
 821        for (i -= 1; i >= 0; i--) {
 822                hr_qp = free_mr->mr_free_qp[i];
 823                if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
 824                        dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
 825        }
 826
 827        if (hns_roce_dealloc_pd(pd))
 828                dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
 829
 830alloc_pd_failed:
 831        if (hns_roce_ib_destroy_cq(cq))
 832                dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
 833
 834        return -EINVAL;
 835}
 836
 837static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
 838{
 839        struct device *dev = &hr_dev->pdev->dev;
 840        struct hns_roce_free_mr *free_mr;
 841        struct hns_roce_v1_priv *priv;
 842        struct hns_roce_qp *hr_qp;
 843        int ret;
 844        int i;
 845
 846        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 847        free_mr = &priv->free_mr;
 848
 849        for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
 850                hr_qp = free_mr->mr_free_qp[i];
 851                if (!hr_qp)
 852                        continue;
 853
 854                ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
 855                if (ret)
 856                        dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
 857                                i, ret);
 858        }
 859
 860        ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
 861        if (ret)
 862                dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
 863
 864        ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
 865        if (ret)
 866                dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
 867}
 868
 869static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
 870{
 871        struct device *dev = &hr_dev->pdev->dev;
 872        struct hns_roce_v1_priv *priv;
 873        struct hns_roce_db_table *db;
 874        u32 sdb_ext_mod;
 875        u32 odb_ext_mod;
 876        u32 sdb_evt_mod;
 877        u32 odb_evt_mod;
 878        int ret = 0;
 879
 880        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 881        db = &priv->db_table;
 882
 883        memset(db, 0, sizeof(*db));
 884
 885        /* Default DB mode */
 886        sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
 887        odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
 888        sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
 889        odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
 890
 891        db->sdb_ext_mod = sdb_ext_mod;
 892        db->odb_ext_mod = odb_ext_mod;
 893
 894        /* Init extend DB */
 895        ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
 896        if (ret) {
 897                dev_err(dev, "Failed in extend DB configuration.\n");
 898                return ret;
 899        }
 900
 901        hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
 902
 903        return 0;
 904}
 905
 906static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
 907{
 908        struct hns_roce_recreate_lp_qp_work *lp_qp_work;
 909        struct hns_roce_dev *hr_dev;
 910
 911        lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
 912                                  work);
 913        hr_dev = to_hr_dev(lp_qp_work->ib_dev);
 914
 915        hns_roce_v1_release_lp_qp(hr_dev);
 916
 917        if (hns_roce_v1_rsv_lp_qp(hr_dev))
 918                dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
 919
 920        if (lp_qp_work->comp_flag)
 921                complete(lp_qp_work->comp);
 922
 923        kfree(lp_qp_work);
 924}
 925
 926static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
 927{
 928        struct device *dev = &hr_dev->pdev->dev;
 929        struct hns_roce_recreate_lp_qp_work *lp_qp_work;
 930        struct hns_roce_free_mr *free_mr;
 931        struct hns_roce_v1_priv *priv;
 932        struct completion comp;
 933        unsigned long end =
 934          msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
 935
 936        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
 937        free_mr = &priv->free_mr;
 938
 939        lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
 940                             GFP_KERNEL);
 941        if (!lp_qp_work)
 942                return -ENOMEM;
 943
 944        INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
 945
 946        lp_qp_work->ib_dev = &(hr_dev->ib_dev);
 947        lp_qp_work->comp = &comp;
 948        lp_qp_work->comp_flag = 1;
 949
 950        init_completion(lp_qp_work->comp);
 951
 952        queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
 953
 954        while (time_before_eq(jiffies, end)) {
 955                if (try_wait_for_completion(&comp))
 956                        return 0;
 957                msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
 958        }
 959
 960        lp_qp_work->comp_flag = 0;
 961        if (try_wait_for_completion(&comp))
 962                return 0;
 963
 964        dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
 965        return -ETIMEDOUT;
 966}
 967
 968static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
 969{
 970        struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
 971        struct device *dev = &hr_dev->pdev->dev;
 972        struct ib_send_wr send_wr, *bad_wr;
 973        int ret;
 974
 975        memset(&send_wr, 0, sizeof(send_wr));
 976        send_wr.next    = NULL;
 977        send_wr.num_sge = 0;
 978        send_wr.send_flags = 0;
 979        send_wr.sg_list = NULL;
 980        send_wr.wr_id   = (unsigned long long)&send_wr;
 981        send_wr.opcode  = IB_WR_RDMA_WRITE;
 982
 983        ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
 984        if (ret) {
 985                dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
 986                return ret;
 987        }
 988
 989        return 0;
 990}
 991
 992static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
 993{
 994        struct hns_roce_mr_free_work *mr_work;
 995        struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
 996        struct hns_roce_free_mr *free_mr;
 997        struct hns_roce_cq *mr_free_cq;
 998        struct hns_roce_v1_priv *priv;
 999        struct hns_roce_dev *hr_dev;
1000        struct hns_roce_mr *hr_mr;
1001        struct hns_roce_qp *hr_qp;
1002        struct device *dev;
1003        unsigned long end =
1004                msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1005        int i;
1006        int ret;
1007        int ne = 0;
1008
1009        mr_work = container_of(work, struct hns_roce_mr_free_work, work);
1010        hr_mr = (struct hns_roce_mr *)mr_work->mr;
1011        hr_dev = to_hr_dev(mr_work->ib_dev);
1012        dev = &hr_dev->pdev->dev;
1013
1014        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1015        free_mr = &priv->free_mr;
1016        mr_free_cq = free_mr->mr_free_cq;
1017
1018        for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
1019                hr_qp = free_mr->mr_free_qp[i];
1020                if (!hr_qp)
1021                        continue;
1022                ne++;
1023
1024                ret = hns_roce_v1_send_lp_wqe(hr_qp);
1025                if (ret) {
1026                        dev_err(dev,
1027                             "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
1028                             hr_qp->qpn, ret);
1029                        goto free_work;
1030                }
1031        }
1032
1033        if (!ne) {
1034                dev_err(dev, "Reserved loop qp is absent!\n");
1035                goto free_work;
1036        }
1037
1038        do {
1039                ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1040                if (ret < 0 && hr_qp) {
1041                        dev_err(dev,
1042                           "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1043                           hr_qp->qpn, ret, hr_mr->key, ne);
1044                        goto free_work;
1045                }
1046                ne -= ret;
1047                usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1048                             (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1049        } while (ne && time_before_eq(jiffies, end));
1050
1051        if (ne != 0)
1052                dev_err(dev,
1053                        "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1054                        hr_mr->key, ne);
1055
1056free_work:
1057        if (mr_work->comp_flag)
1058                complete(mr_work->comp);
1059        kfree(mr_work);
1060}
1061
1062static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
1063                                struct hns_roce_mr *mr)
1064{
1065        struct device *dev = &hr_dev->pdev->dev;
1066        struct hns_roce_mr_free_work *mr_work;
1067        struct hns_roce_free_mr *free_mr;
1068        struct hns_roce_v1_priv *priv;
1069        struct completion comp;
1070        unsigned long end =
1071                msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1072        unsigned long start = jiffies;
1073        int npages;
1074        int ret = 0;
1075
1076        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1077        free_mr = &priv->free_mr;
1078
1079        if (mr->enabled) {
1080                if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
1081                                       & (hr_dev->caps.num_mtpts - 1)))
1082                        dev_warn(dev, "HW2SW_MPT failed!\n");
1083        }
1084
1085        mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1086        if (!mr_work) {
1087                ret = -ENOMEM;
1088                goto free_mr;
1089        }
1090
1091        INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1092
1093        mr_work->ib_dev = &(hr_dev->ib_dev);
1094        mr_work->comp = &comp;
1095        mr_work->comp_flag = 1;
1096        mr_work->mr = (void *)mr;
1097        init_completion(mr_work->comp);
1098
1099        queue_work(free_mr->free_mr_wq, &(mr_work->work));
1100
1101        while (time_before_eq(jiffies, end)) {
1102                if (try_wait_for_completion(&comp))
1103                        goto free_mr;
1104                msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1105        }
1106
1107        mr_work->comp_flag = 0;
1108        if (try_wait_for_completion(&comp))
1109                goto free_mr;
1110
1111        dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1112        ret = -ETIMEDOUT;
1113
1114free_mr:
1115        dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1116                mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1117
1118        if (mr->size != ~0ULL) {
1119                npages = ib_umem_page_count(mr->umem);
1120                dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1121                                  mr->pbl_dma_addr);
1122        }
1123
1124        hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1125                             key_to_hw_index(mr->key), 0);
1126
1127        if (mr->umem)
1128                ib_umem_release(mr->umem);
1129
1130        kfree(mr);
1131
1132        return ret;
1133}
1134
1135static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1136{
1137        struct device *dev = &hr_dev->pdev->dev;
1138        struct hns_roce_v1_priv *priv;
1139        struct hns_roce_db_table *db;
1140
1141        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1142        db = &priv->db_table;
1143
1144        if (db->sdb_ext_mod) {
1145                dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1146                                  db->ext_db->sdb_buf_list->buf,
1147                                  db->ext_db->sdb_buf_list->map);
1148                kfree(db->ext_db->sdb_buf_list);
1149        }
1150
1151        if (db->odb_ext_mod) {
1152                dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1153                                  db->ext_db->odb_buf_list->buf,
1154                                  db->ext_db->odb_buf_list->map);
1155                kfree(db->ext_db->odb_buf_list);
1156        }
1157
1158        kfree(db->ext_db);
1159}
1160
1161static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1162{
1163        int ret;
1164        int raq_shift = 0;
1165        dma_addr_t addr;
1166        u32 val;
1167        struct hns_roce_v1_priv *priv;
1168        struct hns_roce_raq_table *raq;
1169        struct device *dev = &hr_dev->pdev->dev;
1170
1171        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1172        raq = &priv->raq_table;
1173
1174        raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1175        if (!raq->e_raq_buf)
1176                return -ENOMEM;
1177
1178        raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1179                                                 &addr, GFP_KERNEL);
1180        if (!raq->e_raq_buf->buf) {
1181                ret = -ENOMEM;
1182                goto err_dma_alloc_raq;
1183        }
1184        raq->e_raq_buf->map = addr;
1185
1186        /* Configure raq extended address. 48bit 4K align*/
1187        roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1188
1189        /* Configure raq_shift */
1190        raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1191        val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1192        roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1193                       ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1194        /*
1195         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1196         * using 4K page, and shift more 32 because of
1197         * caculating the high 32 bit value evaluated to hardware.
1198         */
1199        roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1200                       ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1201                       raq->e_raq_buf->map >> 44);
1202        roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1203        dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1204
1205        /* Configure raq threshold */
1206        val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1207        roce_set_field(val, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1208                       ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1209                       HNS_ROCE_V1_EXT_RAQ_WF);
1210        roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1211        dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1212
1213        /* Enable extend raq */
1214        val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1215        roce_set_field(val,
1216                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1217                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1218                       POL_TIME_INTERVAL_VAL);
1219        roce_set_bit(val, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1220        roce_set_field(val,
1221                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1222                       ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1223                       2);
1224        roce_set_bit(val,
1225                     ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1226        roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1227        dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1228
1229        /* Enable raq drop */
1230        val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1231        roce_set_bit(val, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1232        roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1233        dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1234
1235        return 0;
1236
1237err_dma_alloc_raq:
1238        kfree(raq->e_raq_buf);
1239        return ret;
1240}
1241
1242static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1243{
1244        struct device *dev = &hr_dev->pdev->dev;
1245        struct hns_roce_v1_priv *priv;
1246        struct hns_roce_raq_table *raq;
1247
1248        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1249        raq = &priv->raq_table;
1250
1251        dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1252                          raq->e_raq_buf->map);
1253        kfree(raq->e_raq_buf);
1254}
1255
1256static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1257{
1258        u32 val;
1259
1260        if (enable_flag) {
1261                val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1262                 /* Open all ports */
1263                roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1264                               ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1265                               ALL_PORT_VAL_OPEN);
1266                roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1267        } else {
1268                val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1269                /* Close all ports */
1270                roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1271                               ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1272                roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1273        }
1274}
1275
1276static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1277{
1278        struct device *dev = &hr_dev->pdev->dev;
1279        struct hns_roce_v1_priv *priv;
1280        int ret;
1281
1282        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1283
1284        priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1285                HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1286                GFP_KERNEL);
1287        if (!priv->bt_table.qpc_buf.buf)
1288                return -ENOMEM;
1289
1290        priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1291                HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1292                GFP_KERNEL);
1293        if (!priv->bt_table.mtpt_buf.buf) {
1294                ret = -ENOMEM;
1295                goto err_failed_alloc_mtpt_buf;
1296        }
1297
1298        priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1299                HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1300                GFP_KERNEL);
1301        if (!priv->bt_table.cqc_buf.buf) {
1302                ret = -ENOMEM;
1303                goto err_failed_alloc_cqc_buf;
1304        }
1305
1306        return 0;
1307
1308err_failed_alloc_cqc_buf:
1309        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1310                priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1311
1312err_failed_alloc_mtpt_buf:
1313        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1314                priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1315
1316        return ret;
1317}
1318
1319static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1320{
1321        struct device *dev = &hr_dev->pdev->dev;
1322        struct hns_roce_v1_priv *priv;
1323
1324        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1325
1326        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1327                priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1328
1329        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1330                priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1331
1332        dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1333                priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1334}
1335
1336static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1337{
1338        struct device *dev = &hr_dev->pdev->dev;
1339        struct hns_roce_buf_list *tptr_buf;
1340        struct hns_roce_v1_priv *priv;
1341
1342        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1343        tptr_buf = &priv->tptr_table.tptr_buf;
1344
1345        /*
1346         * This buffer will be used for CQ's tptr(tail pointer), also
1347         * named ci(customer index). Every CQ will use 2 bytes to save
1348         * cqe ci in hip06. Hardware will read this area to get new ci
1349         * when the queue is almost full.
1350         */
1351        tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1352                                           &tptr_buf->map, GFP_KERNEL);
1353        if (!tptr_buf->buf)
1354                return -ENOMEM;
1355
1356        hr_dev->tptr_dma_addr = tptr_buf->map;
1357        hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1358
1359        return 0;
1360}
1361
1362static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1363{
1364        struct device *dev = &hr_dev->pdev->dev;
1365        struct hns_roce_buf_list *tptr_buf;
1366        struct hns_roce_v1_priv *priv;
1367
1368        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1369        tptr_buf = &priv->tptr_table.tptr_buf;
1370
1371        dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1372                          tptr_buf->buf, tptr_buf->map);
1373}
1374
1375static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1376{
1377        struct device *dev = &hr_dev->pdev->dev;
1378        struct hns_roce_free_mr *free_mr;
1379        struct hns_roce_v1_priv *priv;
1380        int ret = 0;
1381
1382        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1383        free_mr = &priv->free_mr;
1384
1385        free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1386        if (!free_mr->free_mr_wq) {
1387                dev_err(dev, "Create free mr workqueue failed!\n");
1388                return -ENOMEM;
1389        }
1390
1391        ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1392        if (ret) {
1393                dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1394                flush_workqueue(free_mr->free_mr_wq);
1395                destroy_workqueue(free_mr->free_mr_wq);
1396        }
1397
1398        return ret;
1399}
1400
1401static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1402{
1403        struct hns_roce_free_mr *free_mr;
1404        struct hns_roce_v1_priv *priv;
1405
1406        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1407        free_mr = &priv->free_mr;
1408
1409        flush_workqueue(free_mr->free_mr_wq);
1410        destroy_workqueue(free_mr->free_mr_wq);
1411
1412        hns_roce_v1_release_lp_qp(hr_dev);
1413}
1414
1415/**
1416 * hns_roce_v1_reset - reset RoCE
1417 * @hr_dev: RoCE device struct pointer
1418 * @enable: true -- drop reset, false -- reset
1419 * return 0 - success , negative --fail
1420 */
1421static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1422{
1423        struct device_node *dsaf_node;
1424        struct device *dev = &hr_dev->pdev->dev;
1425        struct device_node *np = dev->of_node;
1426        struct fwnode_handle *fwnode;
1427        int ret;
1428
1429        /* check if this is DT/ACPI case */
1430        if (dev_of_node(dev)) {
1431                dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1432                if (!dsaf_node) {
1433                        dev_err(dev, "could not find dsaf-handle\n");
1434                        return -EINVAL;
1435                }
1436                fwnode = &dsaf_node->fwnode;
1437        } else if (is_acpi_device_node(dev->fwnode)) {
1438                struct acpi_reference_args args;
1439
1440                ret = acpi_node_get_property_reference(dev->fwnode,
1441                                                       "dsaf-handle", 0, &args);
1442                if (ret) {
1443                        dev_err(dev, "could not find dsaf-handle\n");
1444                        return ret;
1445                }
1446                fwnode = acpi_fwnode_handle(args.adev);
1447        } else {
1448                dev_err(dev, "cannot read data from DT or ACPI\n");
1449                return -ENXIO;
1450        }
1451
1452        ret = hns_dsaf_roce_reset(fwnode, false);
1453        if (ret)
1454                return ret;
1455
1456        if (dereset) {
1457                msleep(SLEEP_TIME_INTERVAL);
1458                ret = hns_dsaf_roce_reset(fwnode, true);
1459        }
1460
1461        return ret;
1462}
1463
1464static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
1465{
1466        struct device *dev = &hr_dev->pdev->dev;
1467        struct hns_roce_v1_priv *priv;
1468        struct hns_roce_des_qp *des_qp;
1469
1470        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1471        des_qp = &priv->des_qp;
1472
1473        des_qp->requeue_flag = 1;
1474        des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
1475        if (!des_qp->qp_wq) {
1476                dev_err(dev, "Create destroy qp workqueue failed!\n");
1477                return -ENOMEM;
1478        }
1479
1480        return 0;
1481}
1482
1483static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
1484{
1485        struct hns_roce_v1_priv *priv;
1486        struct hns_roce_des_qp *des_qp;
1487
1488        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1489        des_qp = &priv->des_qp;
1490
1491        des_qp->requeue_flag = 0;
1492        flush_workqueue(des_qp->qp_wq);
1493        destroy_workqueue(des_qp->qp_wq);
1494}
1495
1496static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1497{
1498        int i = 0;
1499        struct hns_roce_caps *caps = &hr_dev->caps;
1500
1501        hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
1502        hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
1503                                             ROCEE_VENDOR_PART_ID_REG));
1504        hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
1505                                             ROCEE_SYS_IMAGE_GUID_L_REG)) |
1506                                ((u64)le32_to_cpu(roce_read(hr_dev,
1507                                            ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
1508        hr_dev->hw_rev          = HNS_ROCE_HW_VER1;
1509
1510        caps->num_qps           = HNS_ROCE_V1_MAX_QP_NUM;
1511        caps->max_wqes          = HNS_ROCE_V1_MAX_WQE_NUM;
1512        caps->min_wqes          = HNS_ROCE_MIN_WQE_NUM;
1513        caps->num_cqs           = HNS_ROCE_V1_MAX_CQ_NUM;
1514        caps->min_cqes          = HNS_ROCE_MIN_CQE_NUM;
1515        caps->max_cqes          = HNS_ROCE_V1_MAX_CQE_NUM;
1516        caps->max_sq_sg         = HNS_ROCE_V1_SG_NUM;
1517        caps->max_rq_sg         = HNS_ROCE_V1_SG_NUM;
1518        caps->max_sq_inline     = HNS_ROCE_V1_INLINE_SIZE;
1519        caps->num_uars          = HNS_ROCE_V1_UAR_NUM;
1520        caps->phy_num_uars      = HNS_ROCE_V1_PHY_UAR_NUM;
1521        caps->num_aeq_vectors   = HNS_ROCE_V1_AEQE_VEC_NUM;
1522        caps->num_comp_vectors  = HNS_ROCE_V1_COMP_VEC_NUM;
1523        caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
1524        caps->num_mtpts         = HNS_ROCE_V1_MAX_MTPT_NUM;
1525        caps->num_mtt_segs      = HNS_ROCE_V1_MAX_MTT_SEGS;
1526        caps->num_pds           = HNS_ROCE_V1_MAX_PD_NUM;
1527        caps->max_qp_init_rdma  = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1528        caps->max_qp_dest_rdma  = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1529        caps->max_sq_desc_sz    = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1530        caps->max_rq_desc_sz    = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1531        caps->qpc_entry_sz      = HNS_ROCE_V1_QPC_ENTRY_SIZE;
1532        caps->irrl_entry_sz     = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1533        caps->cqc_entry_sz      = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1534        caps->mtpt_entry_sz     = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1535        caps->mtt_entry_sz      = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1536        caps->cq_entry_sz       = HNS_ROCE_V1_CQE_ENTRY_SIZE;
1537        caps->page_size_cap     = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1538        caps->reserved_lkey     = 0;
1539        caps->reserved_pds      = 0;
1540        caps->reserved_mrws     = 1;
1541        caps->reserved_uars     = 0;
1542        caps->reserved_cqs      = 0;
1543        caps->chunk_sz          = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
1544
1545        for (i = 0; i < caps->num_ports; i++)
1546                caps->pkey_table_len[i] = 1;
1547
1548        for (i = 0; i < caps->num_ports; i++) {
1549                /* Six ports shared 16 GID in v1 engine */
1550                if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1551                        caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1552                                                 caps->num_ports;
1553                else
1554                        caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1555                                                 caps->num_ports + 1;
1556        }
1557
1558        caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
1559        caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
1560        caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
1561                                                         ROCEE_ACK_DELAY_REG));
1562        caps->max_mtu = IB_MTU_2048;
1563
1564        return 0;
1565}
1566
1567static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1568{
1569        int ret;
1570        u32 val;
1571        struct device *dev = &hr_dev->pdev->dev;
1572
1573        /* DMAE user config */
1574        val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1575        roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1576                       ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1577        roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1578                       ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1579                       1 << PAGES_SHIFT_16);
1580        roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1581
1582        val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1583        roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1584                       ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1585        roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1586                       ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1587                       1 << PAGES_SHIFT_16);
1588
1589        ret = hns_roce_db_init(hr_dev);
1590        if (ret) {
1591                dev_err(dev, "doorbell init failed!\n");
1592                return ret;
1593        }
1594
1595        ret = hns_roce_raq_init(hr_dev);
1596        if (ret) {
1597                dev_err(dev, "raq init failed!\n");
1598                goto error_failed_raq_init;
1599        }
1600
1601        ret = hns_roce_bt_init(hr_dev);
1602        if (ret) {
1603                dev_err(dev, "bt init failed!\n");
1604                goto error_failed_bt_init;
1605        }
1606
1607        ret = hns_roce_tptr_init(hr_dev);
1608        if (ret) {
1609                dev_err(dev, "tptr init failed!\n");
1610                goto error_failed_tptr_init;
1611        }
1612
1613        ret = hns_roce_des_qp_init(hr_dev);
1614        if (ret) {
1615                dev_err(dev, "des qp init failed!\n");
1616                goto error_failed_des_qp_init;
1617        }
1618
1619        ret = hns_roce_free_mr_init(hr_dev);
1620        if (ret) {
1621                dev_err(dev, "free mr init failed!\n");
1622                goto error_failed_free_mr_init;
1623        }
1624
1625        hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1626
1627        return 0;
1628
1629error_failed_free_mr_init:
1630        hns_roce_des_qp_free(hr_dev);
1631
1632error_failed_des_qp_init:
1633        hns_roce_tptr_free(hr_dev);
1634
1635error_failed_tptr_init:
1636        hns_roce_bt_free(hr_dev);
1637
1638error_failed_bt_init:
1639        hns_roce_raq_free(hr_dev);
1640
1641error_failed_raq_init:
1642        hns_roce_db_free(hr_dev);
1643        return ret;
1644}
1645
1646static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1647{
1648        hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1649        hns_roce_free_mr_free(hr_dev);
1650        hns_roce_des_qp_free(hr_dev);
1651        hns_roce_tptr_free(hr_dev);
1652        hns_roce_bt_free(hr_dev);
1653        hns_roce_raq_free(hr_dev);
1654        hns_roce_db_free(hr_dev);
1655}
1656
1657static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
1658{
1659        u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
1660
1661        return (!!(status & (1 << HCR_GO_BIT)));
1662}
1663
1664static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1665                                 u64 out_param, u32 in_modifier, u8 op_modifier,
1666                                 u16 op, u16 token, int event)
1667{
1668        u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
1669        unsigned long end;
1670        u32 val = 0;
1671
1672        end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
1673        while (hns_roce_v1_cmd_pending(hr_dev)) {
1674                if (time_after(jiffies, end)) {
1675                        dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
1676                                (int)jiffies, (int)end);
1677                        return -EAGAIN;
1678                }
1679                cond_resched();
1680        }
1681
1682        roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
1683                       op);
1684        roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
1685                       ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
1686        roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
1687        roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
1688        roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
1689                       ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
1690
1691        writeq(in_param, hcr + 0);
1692        writeq(out_param, hcr + 2);
1693        writel(in_modifier, hcr + 4);
1694        /* Memory barrier */
1695        wmb();
1696
1697        writel(val, hcr + 5);
1698
1699        mmiowb();
1700
1701        return 0;
1702}
1703
1704static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
1705                                unsigned long timeout)
1706{
1707        u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
1708        unsigned long end = 0;
1709        u32 status = 0;
1710
1711        end = msecs_to_jiffies(timeout) + jiffies;
1712        while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
1713                cond_resched();
1714
1715        if (hns_roce_v1_cmd_pending(hr_dev)) {
1716                dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1717                return -ETIMEDOUT;
1718        }
1719
1720        status = le32_to_cpu((__force __be32)
1721                              __raw_readl(hcr + HCR_STATUS_OFFSET));
1722        if ((status & STATUS_MASK) != 0x1) {
1723                dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
1724                return -EBUSY;
1725        }
1726
1727        return 0;
1728}
1729
1730static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1731                               int gid_index, union ib_gid *gid,
1732                               const struct ib_gid_attr *attr)
1733{
1734        u32 *p = NULL;
1735        u8 gid_idx = 0;
1736
1737        gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1738
1739        p = (u32 *)&gid->raw[0];
1740        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1741                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1742
1743        p = (u32 *)&gid->raw[4];
1744        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1745                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1746
1747        p = (u32 *)&gid->raw[8];
1748        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1749                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1750
1751        p = (u32 *)&gid->raw[0xc];
1752        roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1753                       (HNS_ROCE_V1_GID_NUM * gid_idx));
1754
1755        return 0;
1756}
1757
1758static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1759                               u8 *addr)
1760{
1761        u32 reg_smac_l;
1762        u16 reg_smac_h;
1763        u16 *p_h;
1764        u32 *p;
1765        u32 val;
1766
1767        /*
1768         * When mac changed, loopback may fail
1769         * because of smac not equal to dmac.
1770         * We Need to release and create reserved qp again.
1771         */
1772        if (hr_dev->hw->dereg_mr) {
1773                int ret;
1774
1775                ret = hns_roce_v1_recreate_lp_qp(hr_dev);
1776                if (ret && ret != -ETIMEDOUT)
1777                        return ret;
1778        }
1779
1780        p = (u32 *)(&addr[0]);
1781        reg_smac_l = *p;
1782        roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1783                       PHY_PORT_OFFSET * phy_port);
1784
1785        val = roce_read(hr_dev,
1786                        ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1787        p_h = (u16 *)(&addr[4]);
1788        reg_smac_h  = *p_h;
1789        roce_set_field(val, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1790                       ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1791        roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1792                   val);
1793
1794        return 0;
1795}
1796
1797static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1798                                enum ib_mtu mtu)
1799{
1800        u32 val;
1801
1802        val = roce_read(hr_dev,
1803                        ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1804        roce_set_field(val, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1805                       ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1806        roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1807                   val);
1808}
1809
1810static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1811                                  unsigned long mtpt_idx)
1812{
1813        struct hns_roce_v1_mpt_entry *mpt_entry;
1814        struct scatterlist *sg;
1815        u64 *pages;
1816        int entry;
1817        int i;
1818
1819        /* MPT filled into mailbox buf */
1820        mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1821        memset(mpt_entry, 0, sizeof(*mpt_entry));
1822
1823        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1824                       MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1825        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1826                       MPT_BYTE_4_KEY_S, mr->key);
1827        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1828                       MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1829        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1830        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1831                     (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1832        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1833        roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1834                       MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1835        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1836        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1837                     (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1838        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1839                     (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1840        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1841                     (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1842        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1843                     0);
1844        roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1845
1846        roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1847                       MPT_BYTE_12_PBL_ADDR_H_S, 0);
1848        roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1849                       MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1850
1851        mpt_entry->virt_addr_l = (u32)mr->iova;
1852        mpt_entry->virt_addr_h = (u32)(mr->iova >> 32);
1853        mpt_entry->length = (u32)mr->size;
1854
1855        roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1856                       MPT_BYTE_28_PD_S, mr->pd);
1857        roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1858                       MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1859        roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1860                       MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1861
1862        /* DMA memory register */
1863        if (mr->type == MR_TYPE_DMA)
1864                return 0;
1865
1866        pages = (u64 *) __get_free_page(GFP_KERNEL);
1867        if (!pages)
1868                return -ENOMEM;
1869
1870        i = 0;
1871        for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1872                pages[i] = ((u64)sg_dma_address(sg)) >> 12;
1873
1874                /* Directly record to MTPT table firstly 7 entry */
1875                if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
1876                        break;
1877                i++;
1878        }
1879
1880        /* Register user mr */
1881        for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
1882                switch (i) {
1883                case 0:
1884                        mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1885                        roce_set_field(mpt_entry->mpt_byte_36,
1886                                MPT_BYTE_36_PA0_H_M,
1887                                MPT_BYTE_36_PA0_H_S,
1888                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
1889                        break;
1890                case 1:
1891                        roce_set_field(mpt_entry->mpt_byte_36,
1892                                       MPT_BYTE_36_PA1_L_M,
1893                                       MPT_BYTE_36_PA1_L_S,
1894                                       cpu_to_le32((u32)(pages[i])));
1895                        roce_set_field(mpt_entry->mpt_byte_40,
1896                                MPT_BYTE_40_PA1_H_M,
1897                                MPT_BYTE_40_PA1_H_S,
1898                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
1899                        break;
1900                case 2:
1901                        roce_set_field(mpt_entry->mpt_byte_40,
1902                                       MPT_BYTE_40_PA2_L_M,
1903                                       MPT_BYTE_40_PA2_L_S,
1904                                       cpu_to_le32((u32)(pages[i])));
1905                        roce_set_field(mpt_entry->mpt_byte_44,
1906                                MPT_BYTE_44_PA2_H_M,
1907                                MPT_BYTE_44_PA2_H_S,
1908                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
1909                        break;
1910                case 3:
1911                        roce_set_field(mpt_entry->mpt_byte_44,
1912                                       MPT_BYTE_44_PA3_L_M,
1913                                       MPT_BYTE_44_PA3_L_S,
1914                                       cpu_to_le32((u32)(pages[i])));
1915                        roce_set_field(mpt_entry->mpt_byte_48,
1916                                MPT_BYTE_48_PA3_H_M,
1917                                MPT_BYTE_48_PA3_H_S,
1918                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_8)));
1919                        break;
1920                case 4:
1921                        mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1922                        roce_set_field(mpt_entry->mpt_byte_56,
1923                                MPT_BYTE_56_PA4_H_M,
1924                                MPT_BYTE_56_PA4_H_S,
1925                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
1926                        break;
1927                case 5:
1928                        roce_set_field(mpt_entry->mpt_byte_56,
1929                                       MPT_BYTE_56_PA5_L_M,
1930                                       MPT_BYTE_56_PA5_L_S,
1931                                       cpu_to_le32((u32)(pages[i])));
1932                        roce_set_field(mpt_entry->mpt_byte_60,
1933                                MPT_BYTE_60_PA5_H_M,
1934                                MPT_BYTE_60_PA5_H_S,
1935                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
1936                        break;
1937                case 6:
1938                        roce_set_field(mpt_entry->mpt_byte_60,
1939                                       MPT_BYTE_60_PA6_L_M,
1940                                       MPT_BYTE_60_PA6_L_S,
1941                                       cpu_to_le32((u32)(pages[i])));
1942                        roce_set_field(mpt_entry->mpt_byte_64,
1943                                MPT_BYTE_64_PA6_H_M,
1944                                MPT_BYTE_64_PA6_H_S,
1945                                cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
1946                        break;
1947                default:
1948                        break;
1949                }
1950        }
1951
1952        free_page((unsigned long) pages);
1953
1954        mpt_entry->pbl_addr_l = (u32)(mr->pbl_dma_addr);
1955
1956        roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1957                       MPT_BYTE_12_PBL_ADDR_H_S,
1958                       ((u32)(mr->pbl_dma_addr >> 32)));
1959
1960        return 0;
1961}
1962
1963static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1964{
1965        return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1966                                   n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
1967}
1968
1969static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1970{
1971        struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1972
1973        /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1974        return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1975                !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
1976}
1977
1978static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1979{
1980        return get_sw_cqe(hr_cq, hr_cq->cons_index);
1981}
1982
1983static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1984{
1985        u32 doorbell[2];
1986
1987        doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
1988        doorbell[1] = 0;
1989        roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
1990        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
1991                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
1992        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
1993                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
1994        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
1995                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
1996
1997        hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
1998}
1999
2000static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2001                                   struct hns_roce_srq *srq)
2002{
2003        struct hns_roce_cqe *cqe, *dest;
2004        u32 prod_index;
2005        int nfreed = 0;
2006        u8 owner_bit;
2007
2008        for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
2009             ++prod_index) {
2010                if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2011                        break;
2012        }
2013
2014        /*
2015         * Now backwards through the CQ, removing CQ entries
2016         * that match our QP by overwriting them with next entries.
2017         */
2018        while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2019                cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2020                if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2021                                     CQE_BYTE_16_LOCAL_QPN_S) &
2022                                     HNS_ROCE_CQE_QPN_MASK) == qpn) {
2023                        /* In v1 engine, not support SRQ */
2024                        ++nfreed;
2025                } else if (nfreed) {
2026                        dest = get_cqe(hr_cq, (prod_index + nfreed) &
2027                                       hr_cq->ib_cq.cqe);
2028                        owner_bit = roce_get_bit(dest->cqe_byte_4,
2029                                                 CQE_BYTE_4_OWNER_S);
2030                        memcpy(dest, cqe, sizeof(*cqe));
2031                        roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
2032                                     owner_bit);
2033                }
2034        }
2035
2036        if (nfreed) {
2037                hr_cq->cons_index += nfreed;
2038                /*
2039                 * Make sure update of buffer contents is done before
2040                 * updating consumer index.
2041                 */
2042                wmb();
2043
2044                hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2045        }
2046}
2047
2048static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2049                                 struct hns_roce_srq *srq)
2050{
2051        spin_lock_irq(&hr_cq->lock);
2052        __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
2053        spin_unlock_irq(&hr_cq->lock);
2054}
2055
2056static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
2057                                  struct hns_roce_cq *hr_cq, void *mb_buf,
2058                                  u64 *mtts, dma_addr_t dma_handle, int nent,
2059                                  u32 vector)
2060{
2061        struct hns_roce_cq_context *cq_context = NULL;
2062        struct hns_roce_buf_list *tptr_buf;
2063        struct hns_roce_v1_priv *priv;
2064        dma_addr_t tptr_dma_addr;
2065        int offset;
2066
2067        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2068        tptr_buf = &priv->tptr_table.tptr_buf;
2069
2070        cq_context = mb_buf;
2071        memset(cq_context, 0, sizeof(*cq_context));
2072
2073        /* Get the tptr for this CQ. */
2074        offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
2075        tptr_dma_addr = tptr_buf->map + offset;
2076        hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
2077
2078        /* Register cq_context members */
2079        roce_set_field(cq_context->cqc_byte_4,
2080                       CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
2081                       CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
2082        roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
2083                       CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
2084        cq_context->cqc_byte_4 = cpu_to_le32(cq_context->cqc_byte_4);
2085
2086        cq_context->cq_bt_l = (u32)dma_handle;
2087        cq_context->cq_bt_l = cpu_to_le32(cq_context->cq_bt_l);
2088
2089        roce_set_field(cq_context->cqc_byte_12,
2090                       CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
2091                       CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
2092                       ((u64)dma_handle >> 32));
2093        roce_set_field(cq_context->cqc_byte_12,
2094                       CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
2095                       CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
2096                       ilog2((unsigned int)nent));
2097        roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
2098                       CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
2099        cq_context->cqc_byte_12 = cpu_to_le32(cq_context->cqc_byte_12);
2100
2101        cq_context->cur_cqe_ba0_l = (u32)(mtts[0]);
2102        cq_context->cur_cqe_ba0_l = cpu_to_le32(cq_context->cur_cqe_ba0_l);
2103
2104        roce_set_field(cq_context->cqc_byte_20,
2105                       CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
2106                       CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S,
2107                       cpu_to_le32((mtts[0]) >> 32));
2108        /* Dedicated hardware, directly set 0 */
2109        roce_set_field(cq_context->cqc_byte_20,
2110                       CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
2111                       CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
2112        /**
2113         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
2114         * using 4K page, and shift more 32 because of
2115         * caculating the high 32 bit value evaluated to hardware.
2116         */
2117        roce_set_field(cq_context->cqc_byte_20,
2118                       CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2119                       CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
2120                       tptr_dma_addr >> 44);
2121        cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
2122
2123        cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12);
2124
2125        roce_set_field(cq_context->cqc_byte_32,
2126                       CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2127                       CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2128        roce_set_bit(cq_context->cqc_byte_32,
2129                     CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2130        roce_set_bit(cq_context->cqc_byte_32,
2131                     CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2132        roce_set_bit(cq_context->cqc_byte_32,
2133                     CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2134        roce_set_bit(cq_context->cqc_byte_32,
2135                     CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2136                     0);
2137        /* The initial value of cq's ci is 0 */
2138        roce_set_field(cq_context->cqc_byte_32,
2139                       CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2140                       CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2141        cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
2142}
2143
2144static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2145{
2146        return -EOPNOTSUPP;
2147}
2148
2149static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
2150                                     enum ib_cq_notify_flags flags)
2151{
2152        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2153        u32 notification_flag;
2154        u32 doorbell[2];
2155
2156        notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2157                            IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2158        /*
2159         * flags = 0; Notification Flag = 1, next
2160         * flags = 1; Notification Flag = 0, solocited
2161         */
2162        doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
2163        roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2164        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2165                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2166        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2167                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2168        roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2169                       ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2170                       hr_cq->cqn | notification_flag);
2171
2172        hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2173
2174        return 0;
2175}
2176
2177static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2178                                struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2179{
2180        int qpn;
2181        int is_send;
2182        u16 wqe_ctr;
2183        u32 status;
2184        u32 opcode;
2185        struct hns_roce_cqe *cqe;
2186        struct hns_roce_qp *hr_qp;
2187        struct hns_roce_wq *wq;
2188        struct hns_roce_wqe_ctrl_seg *sq_wqe;
2189        struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2190        struct device *dev = &hr_dev->pdev->dev;
2191
2192        /* Find cqe according consumer index */
2193        cqe = next_cqe_sw(hr_cq);
2194        if (!cqe)
2195                return -EAGAIN;
2196
2197        ++hr_cq->cons_index;
2198        /* Memory barrier */
2199        rmb();
2200        /* 0->SQ, 1->RQ */
2201        is_send  = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2202
2203        /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2204        if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2205                           CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2206                qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2207                                     CQE_BYTE_20_PORT_NUM_S) +
2208                      roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2209                                     CQE_BYTE_16_LOCAL_QPN_S) *
2210                                     HNS_ROCE_MAX_PORTS;
2211        } else {
2212                qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2213                                     CQE_BYTE_16_LOCAL_QPN_S);
2214        }
2215
2216        if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2217                hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2218                if (unlikely(!hr_qp)) {
2219                        dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2220                                hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2221                        return -EINVAL;
2222                }
2223
2224                *cur_qp = hr_qp;
2225        }
2226
2227        wc->qp = &(*cur_qp)->ibqp;
2228        wc->vendor_err = 0;
2229
2230        status = roce_get_field(cqe->cqe_byte_4,
2231                                CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2232                                CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2233                                HNS_ROCE_CQE_STATUS_MASK;
2234        switch (status) {
2235        case HNS_ROCE_CQE_SUCCESS:
2236                wc->status = IB_WC_SUCCESS;
2237                break;
2238        case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2239                wc->status = IB_WC_LOC_LEN_ERR;
2240                break;
2241        case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2242                wc->status = IB_WC_LOC_QP_OP_ERR;
2243                break;
2244        case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2245                wc->status = IB_WC_LOC_PROT_ERR;
2246                break;
2247        case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2248                wc->status = IB_WC_WR_FLUSH_ERR;
2249                break;
2250        case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2251                wc->status = IB_WC_MW_BIND_ERR;
2252                break;
2253        case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2254                wc->status = IB_WC_BAD_RESP_ERR;
2255                break;
2256        case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2257                wc->status = IB_WC_LOC_ACCESS_ERR;
2258                break;
2259        case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2260                wc->status = IB_WC_REM_INV_REQ_ERR;
2261                break;
2262        case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2263                wc->status = IB_WC_REM_ACCESS_ERR;
2264                break;
2265        case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2266                wc->status = IB_WC_REM_OP_ERR;
2267                break;
2268        case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2269                wc->status = IB_WC_RETRY_EXC_ERR;
2270                break;
2271        case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2272                wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2273                break;
2274        default:
2275                wc->status = IB_WC_GENERAL_ERR;
2276                break;
2277        }
2278
2279        /* CQE status error, directly return */
2280        if (wc->status != IB_WC_SUCCESS)
2281                return 0;
2282
2283        if (is_send) {
2284                /* SQ conrespond to CQE */
2285                sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
2286                                                CQE_BYTE_4_WQE_INDEX_M,
2287                                                CQE_BYTE_4_WQE_INDEX_S)&
2288                                                ((*cur_qp)->sq.wqe_cnt-1));
2289                switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
2290                case HNS_ROCE_WQE_OPCODE_SEND:
2291                        wc->opcode = IB_WC_SEND;
2292                        break;
2293                case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2294                        wc->opcode = IB_WC_RDMA_READ;
2295                        wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2296                        break;
2297                case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2298                        wc->opcode = IB_WC_RDMA_WRITE;
2299                        break;
2300                case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2301                        wc->opcode = IB_WC_LOCAL_INV;
2302                        break;
2303                case HNS_ROCE_WQE_OPCODE_UD_SEND:
2304                        wc->opcode = IB_WC_SEND;
2305                        break;
2306                default:
2307                        wc->status = IB_WC_GENERAL_ERR;
2308                        break;
2309                }
2310                wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
2311                                IB_WC_WITH_IMM : 0);
2312
2313                wq = &(*cur_qp)->sq;
2314                if ((*cur_qp)->sq_signal_bits) {
2315                        /*
2316                         * If sg_signal_bit is 1,
2317                         * firstly tail pointer updated to wqe
2318                         * which current cqe correspond to
2319                         */
2320                        wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2321                                                      CQE_BYTE_4_WQE_INDEX_M,
2322                                                      CQE_BYTE_4_WQE_INDEX_S);
2323                        wq->tail += (wqe_ctr - (u16)wq->tail) &
2324                                    (wq->wqe_cnt - 1);
2325                }
2326                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2327                ++wq->tail;
2328        } else {
2329                /* RQ conrespond to CQE */
2330                wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2331                opcode = roce_get_field(cqe->cqe_byte_4,
2332                                        CQE_BYTE_4_OPERATION_TYPE_M,
2333                                        CQE_BYTE_4_OPERATION_TYPE_S) &
2334                                        HNS_ROCE_CQE_OPCODE_MASK;
2335                switch (opcode) {
2336                case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2337                        wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2338                        wc->wc_flags = IB_WC_WITH_IMM;
2339                        wc->ex.imm_data =
2340                                cpu_to_be32(le32_to_cpu(cqe->immediate_data));
2341                        break;
2342                case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2343                        if (roce_get_bit(cqe->cqe_byte_4,
2344                                         CQE_BYTE_4_IMM_INDICATOR_S)) {
2345                                wc->opcode = IB_WC_RECV;
2346                                wc->wc_flags = IB_WC_WITH_IMM;
2347                                wc->ex.imm_data = cpu_to_be32(
2348                                        le32_to_cpu(cqe->immediate_data));
2349                        } else {
2350                                wc->opcode = IB_WC_RECV;
2351                                wc->wc_flags = 0;
2352                        }
2353                        break;
2354                default:
2355                        wc->status = IB_WC_GENERAL_ERR;
2356                        break;
2357                }
2358
2359                /* Update tail pointer, record wr_id */
2360                wq = &(*cur_qp)->rq;
2361                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2362                ++wq->tail;
2363                wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2364                                            CQE_BYTE_20_SL_S);
2365                wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2366                                                CQE_BYTE_20_REMOTE_QPN_M,
2367                                                CQE_BYTE_20_REMOTE_QPN_S);
2368                wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2369                                              CQE_BYTE_20_GRH_PRESENT_S) ?
2370                                              IB_WC_GRH : 0);
2371                wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2372                                                     CQE_BYTE_28_P_KEY_IDX_M,
2373                                                     CQE_BYTE_28_P_KEY_IDX_S);
2374        }
2375
2376        return 0;
2377}
2378
2379int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2380{
2381        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2382        struct hns_roce_qp *cur_qp = NULL;
2383        unsigned long flags;
2384        int npolled;
2385        int ret = 0;
2386
2387        spin_lock_irqsave(&hr_cq->lock, flags);
2388
2389        for (npolled = 0; npolled < num_entries; ++npolled) {
2390                ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2391                if (ret)
2392                        break;
2393        }
2394
2395        if (npolled) {
2396                *hr_cq->tptr_addr = hr_cq->cons_index &
2397                        ((hr_cq->cq_depth << 1) - 1);
2398
2399                /* Memroy barrier */
2400                wmb();
2401                hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2402        }
2403
2404        spin_unlock_irqrestore(&hr_cq->lock, flags);
2405
2406        if (ret == 0 || ret == -EAGAIN)
2407                return npolled;
2408        else
2409                return ret;
2410}
2411
2412static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2413                                 struct hns_roce_hem_table *table, int obj,
2414                                 int step_idx)
2415{
2416        struct device *dev = &hr_dev->pdev->dev;
2417        struct hns_roce_v1_priv *priv;
2418        unsigned long end = 0, flags = 0;
2419        uint32_t bt_cmd_val[2] = {0};
2420        void __iomem *bt_cmd;
2421        u64 bt_ba = 0;
2422
2423        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2424
2425        switch (table->type) {
2426        case HEM_TYPE_QPC:
2427                roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2428                        ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
2429                bt_ba = priv->bt_table.qpc_buf.map >> 12;
2430                break;
2431        case HEM_TYPE_MTPT:
2432                roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2433                        ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
2434                bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2435                break;
2436        case HEM_TYPE_CQC:
2437                roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2438                        ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
2439                bt_ba = priv->bt_table.cqc_buf.map >> 12;
2440                break;
2441        case HEM_TYPE_SRQC:
2442                dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2443                return -EINVAL;
2444        default:
2445                return 0;
2446        }
2447        roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2448                ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2449        roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2450        roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2451
2452        spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2453
2454        bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2455
2456        end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
2457        while (1) {
2458                if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2459                        if (!(time_before(jiffies, end))) {
2460                                dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2461                                spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2462                                        flags);
2463                                return -EBUSY;
2464                        }
2465                } else {
2466                        break;
2467                }
2468                msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
2469        }
2470
2471        bt_cmd_val[0] = (uint32_t)bt_ba;
2472        roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2473                ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2474        hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2475
2476        spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2477
2478        return 0;
2479}
2480
2481static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2482                                 struct hns_roce_mtt *mtt,
2483                                 enum hns_roce_qp_state cur_state,
2484                                 enum hns_roce_qp_state new_state,
2485                                 struct hns_roce_qp_context *context,
2486                                 struct hns_roce_qp *hr_qp)
2487{
2488        static const u16
2489        op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2490                [HNS_ROCE_QP_STATE_RST] = {
2491                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2492                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2493                [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2494                },
2495                [HNS_ROCE_QP_STATE_INIT] = {
2496                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2497                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2498                /* Note: In v1 engine, HW doesn't support RST2INIT.
2499                 * We use RST2INIT cmd instead of INIT2INIT.
2500                 */
2501                [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2502                [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2503                },
2504                [HNS_ROCE_QP_STATE_RTR] = {
2505                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2506                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2507                [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2508                },
2509                [HNS_ROCE_QP_STATE_RTS] = {
2510                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2511                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2512                [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2513                [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2514                },
2515                [HNS_ROCE_QP_STATE_SQD] = {
2516                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2517                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2518                [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2519                [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2520                },
2521                [HNS_ROCE_QP_STATE_ERR] = {
2522                [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2523                [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2524                }
2525        };
2526
2527        struct hns_roce_cmd_mailbox *mailbox;
2528        struct device *dev = &hr_dev->pdev->dev;
2529        int ret = 0;
2530
2531        if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2532            new_state >= HNS_ROCE_QP_NUM_STATE ||
2533            !op[cur_state][new_state]) {
2534                dev_err(dev, "[modify_qp]not support state %d to %d\n",
2535                        cur_state, new_state);
2536                return -EINVAL;
2537        }
2538
2539        if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2540                return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2541                                         HNS_ROCE_CMD_2RST_QP,
2542                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
2543
2544        if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2545                return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2546                                         HNS_ROCE_CMD_2ERR_QP,
2547                                         HNS_ROCE_CMD_TIMEOUT_MSECS);
2548
2549        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2550        if (IS_ERR(mailbox))
2551                return PTR_ERR(mailbox);
2552
2553        memcpy(mailbox->buf, context, sizeof(*context));
2554
2555        ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2556                                op[cur_state][new_state],
2557                                HNS_ROCE_CMD_TIMEOUT_MSECS);
2558
2559        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2560        return ret;
2561}
2562
2563static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2564                             int attr_mask, enum ib_qp_state cur_state,
2565                             enum ib_qp_state new_state)
2566{
2567        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2568        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2569        struct hns_roce_sqp_context *context;
2570        struct device *dev = &hr_dev->pdev->dev;
2571        dma_addr_t dma_handle = 0;
2572        int rq_pa_start;
2573        u32 reg_val;
2574        u64 *mtts;
2575        u32 __iomem *addr;
2576
2577        context = kzalloc(sizeof(*context), GFP_KERNEL);
2578        if (!context)
2579                return -ENOMEM;
2580
2581        /* Search QP buf's MTTs */
2582        mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2583                                   hr_qp->mtt.first_seg, &dma_handle);
2584        if (!mtts) {
2585                dev_err(dev, "qp buf pa find failed\n");
2586                goto out;
2587        }
2588
2589        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2590                roce_set_field(context->qp1c_bytes_4,
2591                               QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2592                               QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2593                               ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2594                roce_set_field(context->qp1c_bytes_4,
2595                               QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2596                               QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2597                               ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2598                roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2599                               QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2600
2601                context->sq_rq_bt_l = (u32)(dma_handle);
2602                roce_set_field(context->qp1c_bytes_12,
2603                               QP1C_BYTES_12_SQ_RQ_BT_H_M,
2604                               QP1C_BYTES_12_SQ_RQ_BT_H_S,
2605                               ((u32)(dma_handle >> 32)));
2606
2607                roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2608                               QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2609                roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2610                               QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2611                roce_set_bit(context->qp1c_bytes_16,
2612                             QP1C_BYTES_16_SIGNALING_TYPE_S,
2613                             hr_qp->sq_signal_bits);
2614                roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2615                             1);
2616                roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2617                             1);
2618                roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2619                             0);
2620
2621                roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2622                               QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2623                roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2624                               QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2625
2626                rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2627                context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
2628
2629                roce_set_field(context->qp1c_bytes_28,
2630                               QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2631                               QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2632                               (mtts[rq_pa_start]) >> 32);
2633                roce_set_field(context->qp1c_bytes_28,
2634                               QP1C_BYTES_28_RQ_CUR_IDX_M,
2635                               QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2636
2637                roce_set_field(context->qp1c_bytes_32,
2638                               QP1C_BYTES_32_RX_CQ_NUM_M,
2639                               QP1C_BYTES_32_RX_CQ_NUM_S,
2640                               to_hr_cq(ibqp->recv_cq)->cqn);
2641                roce_set_field(context->qp1c_bytes_32,
2642                               QP1C_BYTES_32_TX_CQ_NUM_M,
2643                               QP1C_BYTES_32_TX_CQ_NUM_S,
2644                               to_hr_cq(ibqp->send_cq)->cqn);
2645
2646                context->cur_sq_wqe_ba_l  = (u32)mtts[0];
2647
2648                roce_set_field(context->qp1c_bytes_40,
2649                               QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2650                               QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2651                               (mtts[0]) >> 32);
2652                roce_set_field(context->qp1c_bytes_40,
2653                               QP1C_BYTES_40_SQ_CUR_IDX_M,
2654                               QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2655
2656                /* Copy context to QP1C register */
2657                addr = (u32 __iomem *)(hr_dev->reg_base +
2658                                       ROCEE_QP1C_CFG0_0_REG +
2659                                       hr_qp->phy_port * sizeof(*context));
2660
2661                writel(context->qp1c_bytes_4, addr);
2662                writel(context->sq_rq_bt_l, addr + 1);
2663                writel(context->qp1c_bytes_12, addr + 2);
2664                writel(context->qp1c_bytes_16, addr + 3);
2665                writel(context->qp1c_bytes_20, addr + 4);
2666                writel(context->cur_rq_wqe_ba_l, addr + 5);
2667                writel(context->qp1c_bytes_28, addr + 6);
2668                writel(context->qp1c_bytes_32, addr + 7);
2669                writel(context->cur_sq_wqe_ba_l, addr + 8);
2670                writel(context->qp1c_bytes_40, addr + 9);
2671        }
2672
2673        /* Modify QP1C status */
2674        reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2675                            hr_qp->phy_port * sizeof(*context));
2676        roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2677                       ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2678        roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2679                    hr_qp->phy_port * sizeof(*context), reg_val);
2680
2681        hr_qp->state = new_state;
2682        if (new_state == IB_QPS_RESET) {
2683                hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2684                                     ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2685                if (ibqp->send_cq != ibqp->recv_cq)
2686                        hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2687                                             hr_qp->qpn, NULL);
2688
2689                hr_qp->rq.head = 0;
2690                hr_qp->rq.tail = 0;
2691                hr_qp->sq.head = 0;
2692                hr_qp->sq.tail = 0;
2693                hr_qp->sq_next_wqe = 0;
2694        }
2695
2696        kfree(context);
2697        return 0;
2698
2699out:
2700        kfree(context);
2701        return -EINVAL;
2702}
2703
2704static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2705                            int attr_mask, enum ib_qp_state cur_state,
2706                            enum ib_qp_state new_state)
2707{
2708        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2709        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2710        struct device *dev = &hr_dev->pdev->dev;
2711        struct hns_roce_qp_context *context;
2712        const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2713        dma_addr_t dma_handle_2 = 0;
2714        dma_addr_t dma_handle = 0;
2715        uint32_t doorbell[2] = {0};
2716        int rq_pa_start = 0;
2717        u64 *mtts_2 = NULL;
2718        int ret = -EINVAL;
2719        u64 *mtts = NULL;
2720        int port;
2721        u8 port_num;
2722        u8 *dmac;
2723        u8 *smac;
2724
2725        context = kzalloc(sizeof(*context), GFP_KERNEL);
2726        if (!context)
2727                return -ENOMEM;
2728
2729        /* Search qp buf's mtts */
2730        mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2731                                   hr_qp->mtt.first_seg, &dma_handle);
2732        if (mtts == NULL) {
2733                dev_err(dev, "qp buf pa find failed\n");
2734                goto out;
2735        }
2736
2737        /* Search IRRL's mtts */
2738        mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2739                                     hr_qp->qpn, &dma_handle_2);
2740        if (mtts_2 == NULL) {
2741                dev_err(dev, "qp irrl_table find failed\n");
2742                goto out;
2743        }
2744
2745        /*
2746         * Reset to init
2747         *      Mandatory param:
2748         *      IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2749         *      Optional param: NA
2750         */
2751        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2752                roce_set_field(context->qpc_bytes_4,
2753                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2754                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2755                               to_hr_qp_type(hr_qp->ibqp.qp_type));
2756
2757                roce_set_bit(context->qpc_bytes_4,
2758                             QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2759                roce_set_bit(context->qpc_bytes_4,
2760                             QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2761                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2762                roce_set_bit(context->qpc_bytes_4,
2763                             QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2764                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2765                             );
2766                roce_set_bit(context->qpc_bytes_4,
2767                             QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2768                             !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2769                             );
2770                roce_set_bit(context->qpc_bytes_4,
2771                             QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2772                roce_set_field(context->qpc_bytes_4,
2773                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2774                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2775                               ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2776                roce_set_field(context->qpc_bytes_4,
2777                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2778                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2779                               ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2780                roce_set_field(context->qpc_bytes_4,
2781                               QP_CONTEXT_QPC_BYTES_4_PD_M,
2782                               QP_CONTEXT_QPC_BYTES_4_PD_S,
2783                               to_hr_pd(ibqp->pd)->pdn);
2784                hr_qp->access_flags = attr->qp_access_flags;
2785                roce_set_field(context->qpc_bytes_8,
2786                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2787                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2788                               to_hr_cq(ibqp->send_cq)->cqn);
2789                roce_set_field(context->qpc_bytes_8,
2790                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2791                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2792                               to_hr_cq(ibqp->recv_cq)->cqn);
2793
2794                if (ibqp->srq)
2795                        roce_set_field(context->qpc_bytes_12,
2796                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2797                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2798                                       to_hr_srq(ibqp->srq)->srqn);
2799
2800                roce_set_field(context->qpc_bytes_12,
2801                               QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2802                               QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2803                               attr->pkey_index);
2804                hr_qp->pkey_index = attr->pkey_index;
2805                roce_set_field(context->qpc_bytes_16,
2806                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2807                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2808
2809        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2810                roce_set_field(context->qpc_bytes_4,
2811                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2812                               QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2813                               to_hr_qp_type(hr_qp->ibqp.qp_type));
2814                roce_set_bit(context->qpc_bytes_4,
2815                             QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2816                if (attr_mask & IB_QP_ACCESS_FLAGS) {
2817                        roce_set_bit(context->qpc_bytes_4,
2818                                     QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2819                                     !!(attr->qp_access_flags &
2820                                     IB_ACCESS_REMOTE_READ));
2821                        roce_set_bit(context->qpc_bytes_4,
2822                                     QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2823                                     !!(attr->qp_access_flags &
2824                                     IB_ACCESS_REMOTE_WRITE));
2825                } else {
2826                        roce_set_bit(context->qpc_bytes_4,
2827                                     QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2828                                     !!(hr_qp->access_flags &
2829                                     IB_ACCESS_REMOTE_READ));
2830                        roce_set_bit(context->qpc_bytes_4,
2831                                     QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2832                                     !!(hr_qp->access_flags &
2833                                     IB_ACCESS_REMOTE_WRITE));
2834                }
2835
2836                roce_set_bit(context->qpc_bytes_4,
2837                             QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2838                roce_set_field(context->qpc_bytes_4,
2839                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2840                               QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2841                               ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2842                roce_set_field(context->qpc_bytes_4,
2843                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2844                               QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2845                               ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2846                roce_set_field(context->qpc_bytes_4,
2847                               QP_CONTEXT_QPC_BYTES_4_PD_M,
2848                               QP_CONTEXT_QPC_BYTES_4_PD_S,
2849                               to_hr_pd(ibqp->pd)->pdn);
2850
2851                roce_set_field(context->qpc_bytes_8,
2852                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2853                               QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2854                               to_hr_cq(ibqp->send_cq)->cqn);
2855                roce_set_field(context->qpc_bytes_8,
2856                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2857                               QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2858                               to_hr_cq(ibqp->recv_cq)->cqn);
2859
2860                if (ibqp->srq)
2861                        roce_set_field(context->qpc_bytes_12,
2862                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2863                                       QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2864                                       to_hr_srq(ibqp->srq)->srqn);
2865                if (attr_mask & IB_QP_PKEY_INDEX)
2866                        roce_set_field(context->qpc_bytes_12,
2867                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2868                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2869                                       attr->pkey_index);
2870                else
2871                        roce_set_field(context->qpc_bytes_12,
2872                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2873                                       QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2874                                       hr_qp->pkey_index);
2875
2876                roce_set_field(context->qpc_bytes_16,
2877                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2878                               QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2879        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2880                if ((attr_mask & IB_QP_ALT_PATH) ||
2881                    (attr_mask & IB_QP_ACCESS_FLAGS) ||
2882                    (attr_mask & IB_QP_PKEY_INDEX) ||
2883                    (attr_mask & IB_QP_QKEY)) {
2884                        dev_err(dev, "INIT2RTR attr_mask error\n");
2885                        goto out;
2886                }
2887
2888                dmac = (u8 *)attr->ah_attr.roce.dmac;
2889
2890                context->sq_rq_bt_l = (u32)(dma_handle);
2891                roce_set_field(context->qpc_bytes_24,
2892                               QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2893                               QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2894                               ((u32)(dma_handle >> 32)));
2895                roce_set_bit(context->qpc_bytes_24,
2896                             QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2897                             1);
2898                roce_set_field(context->qpc_bytes_24,
2899                               QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2900                               QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2901                               attr->min_rnr_timer);
2902                context->irrl_ba_l = (u32)(dma_handle_2);
2903                roce_set_field(context->qpc_bytes_32,
2904                               QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2905                               QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2906                               ((u32)(dma_handle_2 >> 32)) &
2907                                QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2908                roce_set_field(context->qpc_bytes_32,
2909                               QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2910                               QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2911                roce_set_bit(context->qpc_bytes_32,
2912                             QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2913                             1);
2914                roce_set_bit(context->qpc_bytes_32,
2915                             QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2916                             hr_qp->sq_signal_bits);
2917
2918                port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2919                        hr_qp->port;
2920                smac = (u8 *)hr_dev->dev_addr[port];
2921                /* when dmac equals smac or loop_idc is 1, it should loopback */
2922                if (ether_addr_equal_unaligned(dmac, smac) ||
2923                    hr_dev->loop_idc == 0x1)
2924                        roce_set_bit(context->qpc_bytes_32,
2925                              QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2926
2927                roce_set_bit(context->qpc_bytes_32,
2928                             QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2929                             rdma_ah_get_ah_flags(&attr->ah_attr));
2930                roce_set_field(context->qpc_bytes_32,
2931                               QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2932                               QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2933                               ilog2((unsigned int)attr->max_dest_rd_atomic));
2934
2935                if (attr_mask & IB_QP_DEST_QPN)
2936                        roce_set_field(context->qpc_bytes_36,
2937                                       QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2938                                       QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2939                                       attr->dest_qp_num);
2940
2941                /* Configure GID index */
2942                port_num = rdma_ah_get_port_num(&attr->ah_attr);
2943                roce_set_field(context->qpc_bytes_36,
2944                               QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2945                               QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2946                                hns_get_gid_index(hr_dev,
2947                                                  port_num - 1,
2948                                                  grh->sgid_index));
2949
2950                memcpy(&(context->dmac_l), dmac, 4);
2951
2952                roce_set_field(context->qpc_bytes_44,
2953                               QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2954                               QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2955                               *((u16 *)(&dmac[4])));
2956                roce_set_field(context->qpc_bytes_44,
2957                               QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2958                               QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2959                               rdma_ah_get_static_rate(&attr->ah_attr));
2960                roce_set_field(context->qpc_bytes_44,
2961                               QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2962                               QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2963                               grh->hop_limit);
2964
2965                roce_set_field(context->qpc_bytes_48,
2966                               QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2967                               QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2968                               grh->flow_label);
2969                roce_set_field(context->qpc_bytes_48,
2970                               QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2971                               QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
2972                               grh->traffic_class);
2973                roce_set_field(context->qpc_bytes_48,
2974                               QP_CONTEXT_QPC_BYTES_48_MTU_M,
2975                               QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2976
2977                memcpy(context->dgid, grh->dgid.raw,
2978                       sizeof(grh->dgid.raw));
2979
2980                dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
2981                        roce_get_field(context->qpc_bytes_44,
2982                                       QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2983                                       QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
2984
2985                roce_set_field(context->qpc_bytes_68,
2986                               QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
2987                               QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
2988                               hr_qp->rq.head);
2989                roce_set_field(context->qpc_bytes_68,
2990                               QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
2991                               QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
2992
2993                rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2994                context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
2995
2996                roce_set_field(context->qpc_bytes_76,
2997                        QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
2998                        QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
2999                        mtts[rq_pa_start] >> 32);
3000                roce_set_field(context->qpc_bytes_76,
3001                               QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
3002                               QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
3003
3004                context->rx_rnr_time = 0;
3005
3006                roce_set_field(context->qpc_bytes_84,
3007                               QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
3008                               QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
3009                               attr->rq_psn - 1);
3010                roce_set_field(context->qpc_bytes_84,
3011                               QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
3012                               QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
3013
3014                roce_set_field(context->qpc_bytes_88,
3015                               QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3016                               QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
3017                               attr->rq_psn);
3018                roce_set_bit(context->qpc_bytes_88,
3019                             QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
3020                roce_set_bit(context->qpc_bytes_88,
3021                             QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
3022                roce_set_field(context->qpc_bytes_88,
3023                        QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
3024                        QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
3025                        0);
3026                roce_set_field(context->qpc_bytes_88,
3027                               QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
3028                               QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
3029                               0);
3030
3031                context->dma_length = 0;
3032                context->r_key = 0;
3033                context->va_l = 0;
3034                context->va_h = 0;
3035
3036                roce_set_field(context->qpc_bytes_108,
3037                               QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
3038                               QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
3039                roce_set_bit(context->qpc_bytes_108,
3040                             QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
3041                roce_set_bit(context->qpc_bytes_108,
3042                             QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
3043
3044                roce_set_field(context->qpc_bytes_112,
3045                               QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
3046                               QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
3047                roce_set_field(context->qpc_bytes_112,
3048                               QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
3049                               QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
3050
3051                /* For chip resp ack */
3052                roce_set_field(context->qpc_bytes_156,
3053                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3054                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3055                               hr_qp->phy_port);
3056                roce_set_field(context->qpc_bytes_156,
3057                               QP_CONTEXT_QPC_BYTES_156_SL_M,
3058                               QP_CONTEXT_QPC_BYTES_156_SL_S,
3059                               rdma_ah_get_sl(&attr->ah_attr));
3060                hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3061        } else if (cur_state == IB_QPS_RTR &&
3062                new_state == IB_QPS_RTS) {
3063                /* If exist optional param, return error */
3064                if ((attr_mask & IB_QP_ALT_PATH) ||
3065                    (attr_mask & IB_QP_ACCESS_FLAGS) ||
3066                    (attr_mask & IB_QP_QKEY) ||
3067                    (attr_mask & IB_QP_PATH_MIG_STATE) ||
3068                    (attr_mask & IB_QP_CUR_STATE) ||
3069                    (attr_mask & IB_QP_MIN_RNR_TIMER)) {
3070                        dev_err(dev, "RTR2RTS attr_mask error\n");
3071                        goto out;
3072                }
3073
3074                context->rx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
3075
3076                roce_set_field(context->qpc_bytes_120,
3077                               QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
3078                               QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
3079                               (mtts[0]) >> 32);
3080
3081                roce_set_field(context->qpc_bytes_124,
3082                               QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
3083                               QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
3084                roce_set_field(context->qpc_bytes_124,
3085                               QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
3086                               QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
3087
3088                roce_set_field(context->qpc_bytes_128,
3089                               QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
3090                               QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
3091                               attr->sq_psn);
3092                roce_set_bit(context->qpc_bytes_128,
3093                             QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
3094                roce_set_field(context->qpc_bytes_128,
3095                             QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
3096                             QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
3097                             0);
3098                roce_set_bit(context->qpc_bytes_128,
3099                             QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
3100
3101                roce_set_field(context->qpc_bytes_132,
3102                               QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
3103                               QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
3104                roce_set_field(context->qpc_bytes_132,
3105                               QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
3106                               QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
3107
3108                roce_set_field(context->qpc_bytes_136,
3109                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
3110                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
3111                               attr->sq_psn);
3112                roce_set_field(context->qpc_bytes_136,
3113                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
3114                               QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
3115                               attr->sq_psn);
3116
3117                roce_set_field(context->qpc_bytes_140,
3118                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
3119                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
3120                               (attr->sq_psn >> SQ_PSN_SHIFT));
3121                roce_set_field(context->qpc_bytes_140,
3122                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
3123                               QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
3124                roce_set_bit(context->qpc_bytes_140,
3125                             QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
3126
3127                roce_set_field(context->qpc_bytes_148,
3128                               QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3129                               QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3130                roce_set_field(context->qpc_bytes_148,
3131                               QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3132                               QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3133                               attr->retry_cnt);
3134                roce_set_field(context->qpc_bytes_148,
3135                               QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3136                               QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3137                               attr->rnr_retry);
3138                roce_set_field(context->qpc_bytes_148,
3139                               QP_CONTEXT_QPC_BYTES_148_LSN_M,
3140                               QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3141
3142                context->rnr_retry = 0;
3143
3144                roce_set_field(context->qpc_bytes_156,
3145                               QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3146                               QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3147                               attr->retry_cnt);
3148                if (attr->timeout < 0x12) {
3149                        dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3150                                 attr->timeout);
3151                        roce_set_field(context->qpc_bytes_156,
3152                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3153                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3154                                       0x12);
3155                } else {
3156                        roce_set_field(context->qpc_bytes_156,
3157                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3158                                       QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3159                                       attr->timeout);
3160                }
3161                roce_set_field(context->qpc_bytes_156,
3162                               QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3163                               QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3164                               attr->rnr_retry);
3165                roce_set_field(context->qpc_bytes_156,
3166                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3167                               QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3168                               hr_qp->phy_port);
3169                roce_set_field(context->qpc_bytes_156,
3170                               QP_CONTEXT_QPC_BYTES_156_SL_M,
3171                               QP_CONTEXT_QPC_BYTES_156_SL_S,
3172                               rdma_ah_get_sl(&attr->ah_attr));
3173                hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3174                roce_set_field(context->qpc_bytes_156,
3175                               QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3176                               QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3177                               ilog2((unsigned int)attr->max_rd_atomic));
3178                roce_set_field(context->qpc_bytes_156,
3179                               QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3180                               QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3181                context->pkt_use_len = 0;
3182
3183                roce_set_field(context->qpc_bytes_164,
3184                               QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3185                               QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3186                roce_set_field(context->qpc_bytes_164,
3187                               QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3188                               QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3189
3190                roce_set_field(context->qpc_bytes_168,
3191                               QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3192                               QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3193                               attr->sq_psn);
3194                roce_set_field(context->qpc_bytes_168,
3195                               QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3196                               QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3197                roce_set_field(context->qpc_bytes_168,
3198                               QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3199                               QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3200                roce_set_bit(context->qpc_bytes_168,
3201                             QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3202                roce_set_bit(context->qpc_bytes_168,
3203                             QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3204                roce_set_bit(context->qpc_bytes_168,
3205                             QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3206                context->sge_use_len = 0;
3207
3208                roce_set_field(context->qpc_bytes_176,
3209                               QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3210                               QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3211                roce_set_field(context->qpc_bytes_176,
3212                               QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3213                               QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3214                               0);
3215                roce_set_field(context->qpc_bytes_180,
3216                               QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3217                               QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3218                roce_set_field(context->qpc_bytes_180,
3219                               QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3220                               QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3221
3222                context->tx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
3223
3224                roce_set_field(context->qpc_bytes_188,
3225                               QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3226                               QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3227                               (mtts[0]) >> 32);
3228                roce_set_bit(context->qpc_bytes_188,
3229                             QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3230                roce_set_field(context->qpc_bytes_188,
3231                               QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3232                               QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3233                               0);
3234        } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3235                   (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3236                   (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3237                   (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3238                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3239                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3240                   (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3241                   (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
3242                dev_err(dev, "not support this status migration\n");
3243                goto out;
3244        }
3245
3246        /* Every status migrate must change state */
3247        roce_set_field(context->qpc_bytes_144,
3248                       QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3249                       QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3250
3251        /* SW pass context to HW */
3252        ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
3253                                    to_hns_roce_state(cur_state),
3254                                    to_hns_roce_state(new_state), context,
3255                                    hr_qp);
3256        if (ret) {
3257                dev_err(dev, "hns_roce_qp_modify failed\n");
3258                goto out;
3259        }
3260
3261        /*
3262         * Use rst2init to instead of init2init with drv,
3263         * need to hw to flash RQ HEAD by DB again
3264         */
3265        if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3266                /* Memory barrier */
3267                wmb();
3268
3269                roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3270                               RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3271                roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3272                               RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3273                roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3274                               RQ_DOORBELL_U32_8_CMD_S, 1);
3275                roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3276
3277                if (ibqp->uobject) {
3278                        hr_qp->rq.db_reg_l = hr_dev->reg_base +
3279                                     hr_dev->odb_offset +
3280                                     DB_REG_OFFSET * hr_dev->priv_uar.index;
3281                }
3282
3283                hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
3284        }
3285
3286        hr_qp->state = new_state;
3287
3288        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3289                hr_qp->resp_depth = attr->max_dest_rd_atomic;
3290        if (attr_mask & IB_QP_PORT) {
3291                hr_qp->port = attr->port_num - 1;
3292                hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3293        }
3294
3295        if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3296                hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3297                                     ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3298                if (ibqp->send_cq != ibqp->recv_cq)
3299                        hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3300                                             hr_qp->qpn, NULL);
3301
3302                hr_qp->rq.head = 0;
3303                hr_qp->rq.tail = 0;
3304                hr_qp->sq.head = 0;
3305                hr_qp->sq.tail = 0;
3306                hr_qp->sq_next_wqe = 0;
3307        }
3308out:
3309        kfree(context);
3310        return ret;
3311}
3312
3313static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
3314                                 const struct ib_qp_attr *attr, int attr_mask,
3315                                 enum ib_qp_state cur_state,
3316                                 enum ib_qp_state new_state)
3317{
3318
3319        if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3320                return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3321                                         new_state);
3322        else
3323                return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3324                                        new_state);
3325}
3326
3327static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3328{
3329        switch (state) {
3330        case HNS_ROCE_QP_STATE_RST:
3331                return IB_QPS_RESET;
3332        case HNS_ROCE_QP_STATE_INIT:
3333                return IB_QPS_INIT;
3334        case HNS_ROCE_QP_STATE_RTR:
3335                return IB_QPS_RTR;
3336        case HNS_ROCE_QP_STATE_RTS:
3337                return IB_QPS_RTS;
3338        case HNS_ROCE_QP_STATE_SQD:
3339                return IB_QPS_SQD;
3340        case HNS_ROCE_QP_STATE_ERR:
3341                return IB_QPS_ERR;
3342        default:
3343                return IB_QPS_ERR;
3344        }
3345}
3346
3347static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3348                                 struct hns_roce_qp *hr_qp,
3349                                 struct hns_roce_qp_context *hr_context)
3350{
3351        struct hns_roce_cmd_mailbox *mailbox;
3352        int ret;
3353
3354        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3355        if (IS_ERR(mailbox))
3356                return PTR_ERR(mailbox);
3357
3358        ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3359                                HNS_ROCE_CMD_QUERY_QP,
3360                                HNS_ROCE_CMD_TIMEOUT_MSECS);
3361        if (!ret)
3362                memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3363        else
3364                dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3365
3366        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3367
3368        return ret;
3369}
3370
3371static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3372                             int qp_attr_mask,
3373                             struct ib_qp_init_attr *qp_init_attr)
3374{
3375        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3376        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3377        struct hns_roce_sqp_context context;
3378        u32 addr;
3379
3380        mutex_lock(&hr_qp->mutex);
3381
3382        if (hr_qp->state == IB_QPS_RESET) {
3383                qp_attr->qp_state = IB_QPS_RESET;
3384                goto done;
3385        }
3386
3387        addr = ROCEE_QP1C_CFG0_0_REG +
3388                hr_qp->port * sizeof(struct hns_roce_sqp_context);
3389        context.qp1c_bytes_4 = roce_read(hr_dev, addr);
3390        context.sq_rq_bt_l = roce_read(hr_dev, addr + 1);
3391        context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2);
3392        context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3);
3393        context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4);
3394        context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5);
3395        context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6);
3396        context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7);
3397        context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8);
3398        context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9);
3399
3400        hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3401                                      QP1C_BYTES_4_QP_STATE_M,
3402                                      QP1C_BYTES_4_QP_STATE_S);
3403        qp_attr->qp_state       = hr_qp->state;
3404        qp_attr->path_mtu       = IB_MTU_256;
3405        qp_attr->path_mig_state = IB_MIG_ARMED;
3406        qp_attr->qkey           = QKEY_VAL;
3407        qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
3408        qp_attr->rq_psn         = 0;
3409        qp_attr->sq_psn         = 0;
3410        qp_attr->dest_qp_num    = 1;
3411        qp_attr->qp_access_flags = 6;
3412
3413        qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3414                                             QP1C_BYTES_20_PKEY_IDX_M,
3415                                             QP1C_BYTES_20_PKEY_IDX_S);
3416        qp_attr->port_num = hr_qp->port + 1;
3417        qp_attr->sq_draining = 0;
3418        qp_attr->max_rd_atomic = 0;
3419        qp_attr->max_dest_rd_atomic = 0;
3420        qp_attr->min_rnr_timer = 0;
3421        qp_attr->timeout = 0;
3422        qp_attr->retry_cnt = 0;
3423        qp_attr->rnr_retry = 0;
3424        qp_attr->alt_timeout = 0;
3425
3426done:
3427        qp_attr->cur_qp_state = qp_attr->qp_state;
3428        qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3429        qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3430        qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3431        qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3432        qp_attr->cap.max_inline_data = 0;
3433        qp_init_attr->cap = qp_attr->cap;
3434        qp_init_attr->create_flags = 0;
3435
3436        mutex_unlock(&hr_qp->mutex);
3437
3438        return 0;
3439}
3440
3441static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3442                            int qp_attr_mask,
3443                            struct ib_qp_init_attr *qp_init_attr)
3444{
3445        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3446        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3447        struct device *dev = &hr_dev->pdev->dev;
3448        struct hns_roce_qp_context *context;
3449        int tmp_qp_state = 0;
3450        int ret = 0;
3451        int state;
3452
3453        context = kzalloc(sizeof(*context), GFP_KERNEL);
3454        if (!context)
3455                return -ENOMEM;
3456
3457        memset(qp_attr, 0, sizeof(*qp_attr));
3458        memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3459
3460        mutex_lock(&hr_qp->mutex);
3461
3462        if (hr_qp->state == IB_QPS_RESET) {
3463                qp_attr->qp_state = IB_QPS_RESET;
3464                goto done;
3465        }
3466
3467        ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3468        if (ret) {
3469                dev_err(dev, "query qpc error\n");
3470                ret = -EINVAL;
3471                goto out;
3472        }
3473
3474        state = roce_get_field(context->qpc_bytes_144,
3475                               QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3476                               QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3477        tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3478        if (tmp_qp_state == -1) {
3479                dev_err(dev, "to_ib_qp_state error\n");
3480                ret = -EINVAL;
3481                goto out;
3482        }
3483        hr_qp->state = (u8)tmp_qp_state;
3484        qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3485        qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3486                                               QP_CONTEXT_QPC_BYTES_48_MTU_M,
3487                                               QP_CONTEXT_QPC_BYTES_48_MTU_S);
3488        qp_attr->path_mig_state = IB_MIG_ARMED;
3489        qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
3490        if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3491                qp_attr->qkey = QKEY_VAL;
3492
3493        qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3494                                         QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3495                                         QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3496        qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3497                                             QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3498                                             QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3499        qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3500                                        QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3501                                        QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3502        qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3503                        QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3504                                   ((roce_get_bit(context->qpc_bytes_4,
3505                        QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3506                                   ((roce_get_bit(context->qpc_bytes_4,
3507                        QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3508
3509        if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3510            hr_qp->ibqp.qp_type == IB_QPT_UC) {
3511                struct ib_global_route *grh =
3512                        rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3513
3514                rdma_ah_set_sl(&qp_attr->ah_attr,
3515                               roce_get_field(context->qpc_bytes_156,
3516                                              QP_CONTEXT_QPC_BYTES_156_SL_M,
3517                                              QP_CONTEXT_QPC_BYTES_156_SL_S));
3518                rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3519                grh->flow_label =
3520                        roce_get_field(context->qpc_bytes_48,
3521                                       QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3522                                       QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3523                grh->sgid_index =
3524                        roce_get_field(context->qpc_bytes_36,
3525                                       QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3526                                       QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3527                grh->hop_limit =
3528                        roce_get_field(context->qpc_bytes_44,
3529                                       QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3530                                       QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3531                grh->traffic_class =
3532                        roce_get_field(context->qpc_bytes_48,
3533                                       QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3534                                       QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3535
3536                memcpy(grh->dgid.raw, context->dgid,
3537                       sizeof(grh->dgid.raw));
3538        }
3539
3540        qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3541                              QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3542                              QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3543        qp_attr->port_num = hr_qp->port + 1;
3544        qp_attr->sq_draining = 0;
3545        qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
3546                                 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3547                                 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3548        qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
3549                                 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3550                                 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3551        qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3552                        QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3553                        QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3554        qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3555                            QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3556                            QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3557        qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3558                             QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3559                             QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3560        qp_attr->rnr_retry = context->rnr_retry;
3561
3562done:
3563        qp_attr->cur_qp_state = qp_attr->qp_state;
3564        qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3565        qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3566
3567        if (!ibqp->uobject) {
3568                qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3569                qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3570        } else {
3571                qp_attr->cap.max_send_wr = 0;
3572                qp_attr->cap.max_send_sge = 0;
3573        }
3574
3575        qp_init_attr->cap = qp_attr->cap;
3576
3577out:
3578        mutex_unlock(&hr_qp->mutex);
3579        kfree(context);
3580        return ret;
3581}
3582
3583static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3584                                int qp_attr_mask,
3585                                struct ib_qp_init_attr *qp_init_attr)
3586{
3587        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3588
3589        return hr_qp->doorbell_qpn <= 1 ?
3590                hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3591                hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3592}
3593
3594static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
3595                                      u32 *old_send, u32 *old_retry,
3596                                      u32 *tsp_st, u32 *success_flags)
3597{
3598        u32 sdb_retry_cnt;
3599        u32 sdb_send_ptr;
3600        u32 cur_cnt, old_cnt;
3601        u32 send_ptr;
3602
3603        sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
3604        sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
3605        cur_cnt = roce_get_field(sdb_send_ptr,
3606                                 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3607                                 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3608                  roce_get_field(sdb_retry_cnt,
3609                                 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3610                                 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3611        if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
3612                old_cnt = roce_get_field(*old_send,
3613                                         ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3614                                         ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3615                          roce_get_field(*old_retry,
3616                                         ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3617                                         ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3618                if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
3619                        *success_flags = 1;
3620        } else {
3621                old_cnt = roce_get_field(*old_send,
3622                                         ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3623                                         ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
3624                if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
3625                        *success_flags = 1;
3626                } else {
3627                        send_ptr = roce_get_field(*old_send,
3628                                            ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3629                                            ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3630                                   roce_get_field(sdb_retry_cnt,
3631                                            ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3632                                            ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3633                        roce_set_field(*old_send,
3634                                       ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3635                                       ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
3636                                       send_ptr);
3637                }
3638        }
3639}
3640
3641static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
3642                                      struct hns_roce_qp *hr_qp,
3643                                      u32 sdb_issue_ptr,
3644                                      u32 *sdb_inv_cnt,
3645                                      u32 *wait_stage)
3646{
3647        struct device *dev = &hr_dev->pdev->dev;
3648        u32 sdb_send_ptr, old_send;
3649        u32 success_flags = 0;
3650        unsigned long end;
3651        u32 old_retry;
3652        u32 inv_cnt;
3653        u32 tsp_st;
3654
3655        if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
3656            *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
3657                dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
3658                        hr_qp->qpn, *wait_stage);
3659                return -EINVAL;
3660        }
3661
3662        /* Calculate the total timeout for the entire verification process */
3663        end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
3664
3665        if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
3666                /* Query db process status, until hw process completely */
3667                sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
3668                while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
3669                                            ROCEE_SDB_PTR_CMP_BITS)) {
3670                        if (!time_before(jiffies, end)) {
3671                                dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
3672                                        hr_qp->qpn, sdb_issue_ptr,
3673                                        sdb_send_ptr);
3674                                return 0;
3675                        }
3676
3677                        msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3678                        sdb_send_ptr = roce_read(hr_dev,
3679                                                 ROCEE_SDB_SEND_PTR_REG);
3680                }
3681
3682                if (roce_get_field(sdb_issue_ptr,
3683                                   ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
3684                                   ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
3685                    roce_get_field(sdb_send_ptr,
3686                                   ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3687                                   ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
3688                        old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
3689                        old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
3690
3691                        do {
3692                                tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
3693                                if (roce_get_bit(tsp_st,
3694                                        ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
3695                                        *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
3696                                        return 0;
3697                                }
3698
3699                                if (!time_before(jiffies, end)) {
3700                                        dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
3701                                                     "issue 0x%x send 0x%x.\n",
3702                                                hr_qp->qpn, sdb_issue_ptr,
3703                                                sdb_send_ptr);
3704                                        return 0;
3705                                }
3706
3707                                msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3708
3709                                hns_roce_check_sdb_status(hr_dev, &old_send,
3710                                                          &old_retry, &tsp_st,
3711                                                          &success_flags);
3712                        } while (!success_flags);
3713                }
3714
3715                *wait_stage = HNS_ROCE_V1_DB_STAGE2;
3716
3717                /* Get list pointer */
3718                *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3719                dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
3720                        hr_qp->qpn, *sdb_inv_cnt);
3721        }
3722
3723        if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
3724                /* Query db's list status, until hw reversal */
3725                inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3726                while (roce_hw_index_cmp_lt(inv_cnt,
3727                                            *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
3728                                            ROCEE_SDB_CNT_CMP_BITS)) {
3729                        if (!time_before(jiffies, end)) {
3730                                dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
3731                                        hr_qp->qpn, inv_cnt);
3732                                return 0;
3733                        }
3734
3735                        msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3736                        inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3737                }
3738
3739                *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
3740        }
3741
3742        return 0;
3743}
3744
3745static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
3746                                struct hns_roce_qp *hr_qp,
3747                                struct hns_roce_qp_work *qp_work_entry,
3748                                int *is_timeout)
3749{
3750        struct device *dev = &hr_dev->pdev->dev;
3751        u32 sdb_issue_ptr;
3752        int ret;
3753
3754        if (hr_qp->state != IB_QPS_RESET) {
3755                /* Set qp to ERR, waiting for hw complete processing all dbs */
3756                ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3757                                            IB_QPS_ERR);
3758                if (ret) {
3759                        dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
3760                                hr_qp->qpn);
3761                        return ret;
3762                }
3763
3764                /* Record issued doorbell */
3765                sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
3766                qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
3767                qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
3768
3769                /* Query db process status, until hw process completely */
3770                ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
3771                                                 &qp_work_entry->sdb_inv_cnt,
3772                                                 &qp_work_entry->db_wait_stage);
3773                if (ret) {
3774                        dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
3775                                hr_qp->qpn);
3776                        return ret;
3777                }
3778
3779                if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
3780                        qp_work_entry->sche_cnt = 0;
3781                        *is_timeout = 1;
3782                        return 0;
3783                }
3784
3785                /* Modify qp to reset before destroying qp */
3786                ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3787                                            IB_QPS_RESET);
3788                if (ret) {
3789                        dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
3790                                hr_qp->qpn);
3791                        return ret;
3792                }
3793        }
3794
3795        return 0;
3796}
3797
3798static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3799{
3800        struct hns_roce_qp_work *qp_work_entry;
3801        struct hns_roce_v1_priv *priv;
3802        struct hns_roce_dev *hr_dev;
3803        struct hns_roce_qp *hr_qp;
3804        struct device *dev;
3805        unsigned long qpn;
3806        int ret;
3807
3808        qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
3809        hr_dev = to_hr_dev(qp_work_entry->ib_dev);
3810        dev = &hr_dev->pdev->dev;
3811        priv = (struct hns_roce_v1_priv *)hr_dev->priv;
3812        hr_qp = qp_work_entry->qp;
3813        qpn = hr_qp->qpn;
3814
3815        dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
3816
3817        qp_work_entry->sche_cnt++;
3818
3819        /* Query db process status, until hw process completely */
3820        ret = check_qp_db_process_status(hr_dev, hr_qp,
3821                                         qp_work_entry->sdb_issue_ptr,
3822                                         &qp_work_entry->sdb_inv_cnt,
3823                                         &qp_work_entry->db_wait_stage);
3824        if (ret) {
3825                dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
3826                        qpn);
3827                return;
3828        }
3829
3830        if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
3831            priv->des_qp.requeue_flag) {
3832                queue_work(priv->des_qp.qp_wq, work);
3833                return;
3834        }
3835
3836        /* Modify qp to reset before destroying qp */
3837        ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3838                                    IB_QPS_RESET);
3839        if (ret) {
3840                dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
3841                return;
3842        }
3843
3844        hns_roce_qp_remove(hr_dev, hr_qp);
3845        hns_roce_qp_free(hr_dev, hr_qp);
3846
3847        if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
3848                /* RC QP, release QPN */
3849                hns_roce_release_range_qp(hr_dev, qpn, 1);
3850                kfree(hr_qp);
3851        } else
3852                kfree(hr_to_hr_sqp(hr_qp));
3853
3854        kfree(qp_work_entry);
3855
3856        dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
3857}
3858
3859int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
3860{
3861        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3862        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3863        struct device *dev = &hr_dev->pdev->dev;
3864        struct hns_roce_qp_work qp_work_entry;
3865        struct hns_roce_qp_work *qp_work;
3866        struct hns_roce_v1_priv *priv;
3867        struct hns_roce_cq *send_cq, *recv_cq;
3868        int is_user = !!ibqp->pd->uobject;
3869        int is_timeout = 0;
3870        int ret;
3871
3872        ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
3873        if (ret) {
3874                dev_err(dev, "QP reset state check failed(%d)!\n", ret);
3875                return ret;
3876        }
3877
3878        send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3879        recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3880
3881        hns_roce_lock_cqs(send_cq, recv_cq);
3882        if (!is_user) {
3883                __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3884                                       to_hr_srq(hr_qp->ibqp.srq) : NULL);
3885                if (send_cq != recv_cq)
3886                        __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3887        }
3888        hns_roce_unlock_cqs(send_cq, recv_cq);
3889
3890        if (!is_timeout) {
3891                hns_roce_qp_remove(hr_dev, hr_qp);
3892                hns_roce_qp_free(hr_dev, hr_qp);
3893
3894                /* RC QP, release QPN */
3895                if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3896                        hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3897        }
3898
3899        hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3900
3901        if (is_user)
3902                ib_umem_release(hr_qp->umem);
3903        else {
3904                kfree(hr_qp->sq.wrid);
3905                kfree(hr_qp->rq.wrid);
3906
3907                hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3908        }
3909
3910        if (!is_timeout) {
3911                if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3912                        kfree(hr_qp);
3913                else
3914                        kfree(hr_to_hr_sqp(hr_qp));
3915        } else {
3916                qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
3917                if (!qp_work)
3918                        return -ENOMEM;
3919
3920                INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
3921                qp_work->ib_dev = &hr_dev->ib_dev;
3922                qp_work->qp             = hr_qp;
3923                qp_work->db_wait_stage  = qp_work_entry.db_wait_stage;
3924                qp_work->sdb_issue_ptr  = qp_work_entry.sdb_issue_ptr;
3925                qp_work->sdb_inv_cnt    = qp_work_entry.sdb_inv_cnt;
3926                qp_work->sche_cnt       = qp_work_entry.sche_cnt;
3927
3928                priv = (struct hns_roce_v1_priv *)hr_dev->priv;
3929                queue_work(priv->des_qp.qp_wq, &qp_work->work);
3930                dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
3931        }
3932
3933        return 0;
3934}
3935
3936static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
3937{
3938        struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3939        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3940        struct device *dev = &hr_dev->pdev->dev;
3941        u32 cqe_cnt_ori;
3942        u32 cqe_cnt_cur;
3943        u32 cq_buf_size;
3944        int wait_time = 0;
3945        int ret = 0;
3946
3947        hns_roce_free_cq(hr_dev, hr_cq);
3948
3949        /*
3950         * Before freeing cq buffer, we need to ensure that the outstanding CQE
3951         * have been written by checking the CQE counter.
3952         */
3953        cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3954        while (1) {
3955                if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3956                    HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3957                        break;
3958
3959                cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3960                if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3961                        break;
3962
3963                msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3964                if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3965                        dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3966                                hr_cq->cqn);
3967                        ret = -ETIMEDOUT;
3968                        break;
3969                }
3970                wait_time++;
3971        }
3972
3973        hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
3974
3975        if (ibcq->uobject)
3976                ib_umem_release(hr_cq->umem);
3977        else {
3978                /* Free the buff of stored cq */
3979                cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
3980                hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
3981        }
3982
3983        kfree(hr_cq);
3984
3985        return ret;
3986}
3987
3988static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
3989{
3990        roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
3991                      (req_not << eq->log_entries), eq->doorbell);
3992}
3993
3994static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3995                                            struct hns_roce_aeqe *aeqe, int qpn)
3996{
3997        struct device *dev = &hr_dev->pdev->dev;
3998
3999        dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
4000        switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
4001                               HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
4002        case HNS_ROCE_LWQCE_QPC_ERROR:
4003                dev_warn(dev, "QP %d, QPC error.\n", qpn);
4004                break;
4005        case HNS_ROCE_LWQCE_MTU_ERROR:
4006                dev_warn(dev, "QP %d, MTU error.\n", qpn);
4007                break;
4008        case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
4009                dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
4010                break;
4011        case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
4012                dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
4013                break;
4014        case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
4015                dev_warn(dev, "QP %d, WQE shift error\n", qpn);
4016                break;
4017        case HNS_ROCE_LWQCE_SL_ERROR:
4018                dev_warn(dev, "QP %d, SL error.\n", qpn);
4019                break;
4020        case HNS_ROCE_LWQCE_PORT_ERROR:
4021                dev_warn(dev, "QP %d, port error.\n", qpn);
4022                break;
4023        default:
4024                break;
4025        }
4026}
4027
4028static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
4029                                                   struct hns_roce_aeqe *aeqe,
4030                                                   int qpn)
4031{
4032        struct device *dev = &hr_dev->pdev->dev;
4033
4034        dev_warn(dev, "Local Access Violation Work Queue Error.\n");
4035        switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
4036                               HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
4037        case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
4038                dev_warn(dev, "QP %d, R_key violation.\n", qpn);
4039                break;
4040        case HNS_ROCE_LAVWQE_LENGTH_ERROR:
4041                dev_warn(dev, "QP %d, length error.\n", qpn);
4042                break;
4043        case HNS_ROCE_LAVWQE_VA_ERROR:
4044                dev_warn(dev, "QP %d, VA error.\n", qpn);
4045                break;
4046        case HNS_ROCE_LAVWQE_PD_ERROR:
4047                dev_err(dev, "QP %d, PD error.\n", qpn);
4048                break;
4049        case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
4050                dev_warn(dev, "QP %d, rw acc error.\n", qpn);
4051                break;
4052        case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
4053                dev_warn(dev, "QP %d, key state error.\n", qpn);
4054                break;
4055        case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
4056                dev_warn(dev, "QP %d, MR operation error.\n", qpn);
4057                break;
4058        default:
4059                break;
4060        }
4061}
4062
4063static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
4064                                      struct hns_roce_aeqe *aeqe,
4065                                      int event_type)
4066{
4067        struct device *dev = &hr_dev->pdev->dev;
4068        int phy_port;
4069        int qpn;
4070
4071        qpn = roce_get_field(aeqe->event.qp_event.qp,
4072                             HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
4073                             HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
4074        phy_port = roce_get_field(aeqe->event.qp_event.qp,
4075                                  HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
4076                                  HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
4077        if (qpn <= 1)
4078                qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
4079
4080        switch (event_type) {
4081        case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4082                dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
4083                         "QP %d, phy_port %d.\n", qpn, phy_port);
4084                break;
4085        case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4086                hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
4087                break;
4088        case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4089                hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
4090                break;
4091        default:
4092                break;
4093        }
4094
4095        hns_roce_qp_event(hr_dev, qpn, event_type);
4096}
4097
4098static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
4099                                      struct hns_roce_aeqe *aeqe,
4100                                      int event_type)
4101{
4102        struct device *dev = &hr_dev->pdev->dev;
4103        u32 cqn;
4104
4105        cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
4106                          HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
4107                          HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
4108
4109        switch (event_type) {
4110        case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4111                dev_warn(dev, "CQ 0x%x access err.\n", cqn);
4112                break;
4113        case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4114                dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4115                break;
4116        case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
4117                dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
4118                break;
4119        default:
4120                break;
4121        }
4122
4123        hns_roce_cq_event(hr_dev, cqn, event_type);
4124}
4125
4126static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
4127                                           struct hns_roce_aeqe *aeqe)
4128{
4129        struct device *dev = &hr_dev->pdev->dev;
4130
4131        switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
4132                               HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
4133        case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
4134                dev_warn(dev, "SDB overflow.\n");
4135                break;
4136        case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
4137                dev_warn(dev, "SDB almost overflow.\n");
4138                break;
4139        case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
4140                dev_warn(dev, "SDB almost empty.\n");
4141                break;
4142        case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
4143                dev_warn(dev, "ODB overflow.\n");
4144                break;
4145        case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
4146                dev_warn(dev, "ODB almost overflow.\n");
4147                break;
4148        case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
4149                dev_warn(dev, "SDB almost empty.\n");
4150                break;
4151        default:
4152                break;
4153        }
4154}
4155
4156static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
4157{
4158        unsigned long off = (entry & (eq->entries - 1)) *
4159                             HNS_ROCE_AEQ_ENTRY_SIZE;
4160
4161        return (struct hns_roce_aeqe *)((u8 *)
4162                (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
4163                off % HNS_ROCE_BA_SIZE);
4164}
4165
4166static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
4167{
4168        struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
4169
4170        return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
4171                !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
4172}
4173
4174static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
4175                               struct hns_roce_eq *eq)
4176{
4177        struct device *dev = &hr_dev->pdev->dev;
4178        struct hns_roce_aeqe *aeqe;
4179        int aeqes_found = 0;
4180        int event_type;
4181
4182        while ((aeqe = next_aeqe_sw_v1(eq))) {
4183
4184                /* Make sure we read the AEQ entry after we have checked the
4185                 * ownership bit
4186                 */
4187                dma_rmb();
4188
4189                dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
4190                        roce_get_field(aeqe->asyn,
4191                                       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
4192                                       HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
4193                event_type = roce_get_field(aeqe->asyn,
4194                                            HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
4195                                            HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
4196                switch (event_type) {
4197                case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4198                        dev_warn(dev, "PATH MIG not supported\n");
4199                        break;
4200                case HNS_ROCE_EVENT_TYPE_COMM_EST:
4201                        dev_warn(dev, "COMMUNICATION established\n");
4202                        break;
4203                case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4204                        dev_warn(dev, "SQ DRAINED not supported\n");
4205                        break;
4206                case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4207                        dev_warn(dev, "PATH MIG failed\n");
4208                        break;
4209                case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4210                case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4211                case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4212                        hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
4213                        break;
4214                case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4215                case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4216                case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4217                        dev_warn(dev, "SRQ not support!\n");
4218                        break;
4219                case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4220                case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4221                case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
4222                        hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
4223                        break;
4224                case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
4225                        dev_warn(dev, "port change.\n");
4226                        break;
4227                case HNS_ROCE_EVENT_TYPE_MB:
4228                        hns_roce_cmd_event(hr_dev,
4229                                           le16_to_cpu(aeqe->event.cmd.token),
4230                                           aeqe->event.cmd.status,
4231                                           le64_to_cpu(aeqe->event.cmd.out_param
4232                                           ));
4233                        break;
4234                case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4235                        hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
4236                        break;
4237                case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
4238                        dev_warn(dev, "CEQ 0x%lx overflow.\n",
4239                        roce_get_field(aeqe->event.ce_event.ceqe,
4240                                     HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
4241                                     HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
4242                        break;
4243                default:
4244                        dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
4245                                 event_type, eq->eqn, eq->cons_index);
4246                        break;
4247                }
4248
4249                eq->cons_index++;
4250                aeqes_found = 1;
4251
4252                if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
4253                        dev_warn(dev, "cons_index overflow, set back to 0.\n");
4254                        eq->cons_index = 0;
4255                }
4256        }
4257
4258        set_eq_cons_index_v1(eq, 0);
4259
4260        return aeqes_found;
4261}
4262
4263static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
4264{
4265        unsigned long off = (entry & (eq->entries - 1)) *
4266                             HNS_ROCE_CEQ_ENTRY_SIZE;
4267
4268        return (struct hns_roce_ceqe *)((u8 *)
4269                        (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
4270                        off % HNS_ROCE_BA_SIZE);
4271}
4272
4273static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
4274{
4275        struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
4276
4277        return (!!(roce_get_bit(ceqe->comp,
4278                HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
4279                (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
4280}
4281
4282static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
4283                               struct hns_roce_eq *eq)
4284{
4285        struct hns_roce_ceqe *ceqe;
4286        int ceqes_found = 0;
4287        u32 cqn;
4288
4289        while ((ceqe = next_ceqe_sw_v1(eq))) {
4290
4291                /* Make sure we read CEQ entry after we have checked the
4292                 * ownership bit
4293                 */
4294                dma_rmb();
4295
4296                cqn = roce_get_field(ceqe->comp,
4297                                     HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
4298                                     HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
4299                hns_roce_cq_completion(hr_dev, cqn);
4300
4301                ++eq->cons_index;
4302                ceqes_found = 1;
4303
4304                if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) {
4305                        dev_warn(&eq->hr_dev->pdev->dev,
4306                                "cons_index overflow, set back to 0.\n");
4307                        eq->cons_index = 0;
4308                }
4309        }
4310
4311        set_eq_cons_index_v1(eq, 0);
4312
4313        return ceqes_found;
4314}
4315
4316static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
4317{
4318        struct hns_roce_eq  *eq  = eq_ptr;
4319        struct hns_roce_dev *hr_dev = eq->hr_dev;
4320        int int_work = 0;
4321
4322        if (eq->type_flag == HNS_ROCE_CEQ)
4323                /* CEQ irq routine, CEQ is pulse irq, not clear */
4324                int_work = hns_roce_v1_ceq_int(hr_dev, eq);
4325        else
4326                /* AEQ irq routine, AEQ is pulse irq, not clear */
4327                int_work = hns_roce_v1_aeq_int(hr_dev, eq);
4328
4329        return IRQ_RETVAL(int_work);
4330}
4331
4332static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
4333{
4334        struct hns_roce_dev *hr_dev = dev_id;
4335        struct device *dev = &hr_dev->pdev->dev;
4336        int int_work = 0;
4337        u32 caepaemask_val;
4338        u32 cealmovf_val;
4339        u32 caepaest_val;
4340        u32 aeshift_val;
4341        u32 ceshift_val;
4342        u32 cemask_val;
4343        int i;
4344
4345        /*
4346         * Abnormal interrupt:
4347         * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
4348         * interrupt, mask irq, clear irq, cancel mask operation
4349         */
4350        aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
4351
4352        /* AEQE overflow */
4353        if (roce_get_bit(aeshift_val,
4354                ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
4355                dev_warn(dev, "AEQ overflow!\n");
4356
4357                /* Set mask */
4358                caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4359                roce_set_bit(caepaemask_val,
4360                             ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4361                             HNS_ROCE_INT_MASK_ENABLE);
4362                roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4363
4364                /* Clear int state(INT_WC : write 1 clear) */
4365                caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
4366                roce_set_bit(caepaest_val,
4367                             ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
4368                roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
4369
4370                /* Clear mask */
4371                caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4372                roce_set_bit(caepaemask_val,
4373                             ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4374                             HNS_ROCE_INT_MASK_DISABLE);
4375                roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4376        }
4377
4378        /* CEQ almost overflow */
4379        for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4380                ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
4381                                        i * CEQ_REG_OFFSET);
4382
4383                if (roce_get_bit(ceshift_val,
4384                        ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
4385                        dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
4386                        int_work++;
4387
4388                        /* Set mask */
4389                        cemask_val = roce_read(hr_dev,
4390                                               ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4391                                               i * CEQ_REG_OFFSET);
4392                        roce_set_bit(cemask_val,
4393                                ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4394                                HNS_ROCE_INT_MASK_ENABLE);
4395                        roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4396                                   i * CEQ_REG_OFFSET, cemask_val);
4397
4398                        /* Clear int state(INT_WC : write 1 clear) */
4399                        cealmovf_val = roce_read(hr_dev,
4400                                       ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4401                                       i * CEQ_REG_OFFSET);
4402                        roce_set_bit(cealmovf_val,
4403                                     ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
4404                                     1);
4405                        roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4406                                   i * CEQ_REG_OFFSET, cealmovf_val);
4407
4408                        /* Clear mask */
4409                        cemask_val = roce_read(hr_dev,
4410                                     ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4411                                     i * CEQ_REG_OFFSET);
4412                        roce_set_bit(cemask_val,
4413                               ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4414                               HNS_ROCE_INT_MASK_DISABLE);
4415                        roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4416                                   i * CEQ_REG_OFFSET, cemask_val);
4417                }
4418        }
4419
4420        /* ECC multi-bit error alarm */
4421        dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
4422                 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
4423                 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
4424                 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
4425
4426        dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
4427                 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
4428                 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
4429                 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
4430
4431        return IRQ_RETVAL(int_work);
4432}
4433
4434static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
4435{
4436        u32 aemask_val;
4437        int masken = 0;
4438        int i;
4439
4440        /* AEQ INT */
4441        aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4442        roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4443                     masken);
4444        roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
4445        roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
4446
4447        /* CEQ INT */
4448        for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4449                /* IRQ mask */
4450                roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4451                           i * CEQ_REG_OFFSET, masken);
4452        }
4453}
4454
4455static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
4456                                struct hns_roce_eq *eq)
4457{
4458        int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
4459                      HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4460        int i;
4461
4462        if (!eq->buf_list)
4463                return;
4464
4465        for (i = 0; i < npages; ++i)
4466                dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
4467                                  eq->buf_list[i].buf, eq->buf_list[i].map);
4468
4469        kfree(eq->buf_list);
4470}
4471
4472static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
4473                                  int enable_flag)
4474{
4475        void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
4476        u32 val;
4477
4478        val = readl(eqc);
4479
4480        if (enable_flag)
4481                roce_set_field(val,
4482                               ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4483                               ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4484                               HNS_ROCE_EQ_STAT_VALID);
4485        else
4486                roce_set_field(val,
4487                               ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4488                               ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4489                               HNS_ROCE_EQ_STAT_INVALID);
4490        writel(val, eqc);
4491}
4492
4493static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
4494                                 struct hns_roce_eq *eq)
4495{
4496        void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
4497        struct device *dev = &hr_dev->pdev->dev;
4498        dma_addr_t tmp_dma_addr;
4499        u32 eqconsindx_val = 0;
4500        u32 eqcuridx_val = 0;
4501        u32 eqshift_val = 0;
4502        int num_bas;
4503        int ret;
4504        int i;
4505
4506        num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
4507                   HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4508
4509        if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
4510                dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
4511                        (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
4512                        num_bas);
4513                return -EINVAL;
4514        }
4515
4516        eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
4517        if (!eq->buf_list)
4518                return -ENOMEM;
4519
4520        for (i = 0; i < num_bas; ++i) {
4521                eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
4522                                                         &tmp_dma_addr,
4523                                                         GFP_KERNEL);
4524                if (!eq->buf_list[i].buf) {
4525                        ret = -ENOMEM;
4526                        goto err_out_free_pages;
4527                }
4528
4529                eq->buf_list[i].map = tmp_dma_addr;
4530                memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
4531        }
4532        eq->cons_index = 0;
4533        roce_set_field(eqshift_val,
4534                       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4535                       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4536                       HNS_ROCE_EQ_STAT_INVALID);
4537        roce_set_field(eqshift_val,
4538                       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
4539                       ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
4540                       eq->log_entries);
4541        writel(eqshift_val, eqc);
4542
4543        /* Configure eq extended address 12~44bit */
4544        writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
4545
4546        /*
4547         * Configure eq extended address 45~49 bit.
4548         * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
4549         * using 4K page, and shift more 32 because of
4550         * caculating the high 32 bit value evaluated to hardware.
4551         */
4552        roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
4553                       ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
4554                       eq->buf_list[0].map >> 44);
4555        roce_set_field(eqcuridx_val,
4556                       ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
4557                       ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
4558        writel(eqcuridx_val, eqc + 8);
4559
4560        /* Configure eq consumer index */
4561        roce_set_field(eqconsindx_val,
4562                       ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
4563                       ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
4564        writel(eqconsindx_val, eqc + 0xc);
4565
4566        return 0;
4567
4568err_out_free_pages:
4569        for (i -= 1; i >= 0; i--)
4570                dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
4571                                  eq->buf_list[i].map);
4572
4573        kfree(eq->buf_list);
4574        return ret;
4575}
4576
4577static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
4578{
4579        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4580        struct device *dev = &hr_dev->pdev->dev;
4581        struct hns_roce_eq *eq;
4582        int irq_num;
4583        int eq_num;
4584        int ret;
4585        int i, j;
4586
4587        eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4588        irq_num = eq_num + hr_dev->caps.num_other_vectors;
4589
4590        eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4591        if (!eq_table->eq)
4592                return -ENOMEM;
4593
4594        eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
4595                                     GFP_KERNEL);
4596        if (!eq_table->eqc_base) {
4597                ret = -ENOMEM;
4598                goto err_eqc_base_alloc_fail;
4599        }
4600
4601        for (i = 0; i < eq_num; i++) {
4602                eq = &eq_table->eq[i];
4603                eq->hr_dev = hr_dev;
4604                eq->eqn = i;
4605                eq->irq = hr_dev->irq[i];
4606                eq->log_page_size = PAGE_SHIFT;
4607
4608                if (i < hr_dev->caps.num_comp_vectors) {
4609                        /* CEQ */
4610                        eq_table->eqc_base[i] = hr_dev->reg_base +
4611                                                ROCEE_CAEP_CEQC_SHIFT_0_REG +
4612                                                CEQ_REG_OFFSET * i;
4613                        eq->type_flag = HNS_ROCE_CEQ;
4614                        eq->doorbell = hr_dev->reg_base +
4615                                       ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
4616                                       CEQ_REG_OFFSET * i;
4617                        eq->entries = hr_dev->caps.ceqe_depth;
4618                        eq->log_entries = ilog2(eq->entries);
4619                        eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4620                } else {
4621                        /* AEQ */
4622                        eq_table->eqc_base[i] = hr_dev->reg_base +
4623                                                ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
4624                        eq->type_flag = HNS_ROCE_AEQ;
4625                        eq->doorbell = hr_dev->reg_base +
4626                                       ROCEE_CAEP_AEQE_CONS_IDX_REG;
4627                        eq->entries = hr_dev->caps.aeqe_depth;
4628                        eq->log_entries = ilog2(eq->entries);
4629                        eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4630                }
4631        }
4632
4633        /* Disable irq */
4634        hns_roce_v1_int_mask_enable(hr_dev);
4635
4636        /* Configure ce int interval */
4637        roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
4638                   HNS_ROCE_CEQ_DEFAULT_INTERVAL);
4639
4640        /* Configure ce int burst num */
4641        roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
4642                   HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
4643
4644        for (i = 0; i < eq_num; i++) {
4645                ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
4646                if (ret) {
4647                        dev_err(dev, "eq create failed\n");
4648                        goto err_create_eq_fail;
4649                }
4650        }
4651
4652        for (j = 0; j < irq_num; j++) {
4653                if (j < eq_num)
4654                        ret = request_irq(hr_dev->irq[j],
4655                                          hns_roce_v1_msix_interrupt_eq, 0,
4656                                          hr_dev->irq_names[j],
4657                                          &eq_table->eq[j]);
4658                else
4659                        ret = request_irq(hr_dev->irq[j],
4660                                          hns_roce_v1_msix_interrupt_abn, 0,
4661                                          hr_dev->irq_names[j], hr_dev);
4662
4663                if (ret) {
4664                        dev_err(dev, "request irq error!\n");
4665                        goto err_request_irq_fail;
4666                }
4667        }
4668
4669        for (i = 0; i < eq_num; i++)
4670                hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
4671
4672        return 0;
4673
4674err_request_irq_fail:
4675        for (j -= 1; j >= 0; j--)
4676                free_irq(hr_dev->irq[j], &eq_table->eq[j]);
4677
4678err_create_eq_fail:
4679        for (i -= 1; i >= 0; i--)
4680                hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4681
4682        kfree(eq_table->eqc_base);
4683
4684err_eqc_base_alloc_fail:
4685        kfree(eq_table->eq);
4686
4687        return ret;
4688}
4689
4690static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4691{
4692        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4693        int irq_num;
4694        int eq_num;
4695        int i;
4696
4697        eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4698        irq_num = eq_num + hr_dev->caps.num_other_vectors;
4699        for (i = 0; i < eq_num; i++) {
4700                /* Disable EQ */
4701                hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
4702
4703                free_irq(hr_dev->irq[i], &eq_table->eq[i]);
4704
4705                hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4706        }
4707        for (i = eq_num; i < irq_num; i++)
4708                free_irq(hr_dev->irq[i], hr_dev);
4709
4710        kfree(eq_table->eqc_base);
4711        kfree(eq_table->eq);
4712}
4713
4714static const struct hns_roce_hw hns_roce_hw_v1 = {
4715        .reset = hns_roce_v1_reset,
4716        .hw_profile = hns_roce_v1_profile,
4717        .hw_init = hns_roce_v1_init,
4718        .hw_exit = hns_roce_v1_exit,
4719        .post_mbox = hns_roce_v1_post_mbox,
4720        .chk_mbox = hns_roce_v1_chk_mbox,
4721        .set_gid = hns_roce_v1_set_gid,
4722        .set_mac = hns_roce_v1_set_mac,
4723        .set_mtu = hns_roce_v1_set_mtu,
4724        .write_mtpt = hns_roce_v1_write_mtpt,
4725        .write_cqc = hns_roce_v1_write_cqc,
4726        .modify_cq = hns_roce_v1_modify_cq,
4727        .clear_hem = hns_roce_v1_clear_hem,
4728        .modify_qp = hns_roce_v1_modify_qp,
4729        .query_qp = hns_roce_v1_query_qp,
4730        .destroy_qp = hns_roce_v1_destroy_qp,
4731        .post_send = hns_roce_v1_post_send,
4732        .post_recv = hns_roce_v1_post_recv,
4733        .req_notify_cq = hns_roce_v1_req_notify_cq,
4734        .poll_cq = hns_roce_v1_poll_cq,
4735        .dereg_mr = hns_roce_v1_dereg_mr,
4736        .destroy_cq = hns_roce_v1_destroy_cq,
4737        .init_eq = hns_roce_v1_init_eq_table,
4738        .cleanup_eq = hns_roce_v1_cleanup_eq_table,
4739};
4740
4741static const struct of_device_id hns_roce_of_match[] = {
4742        { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
4743        {},
4744};
4745MODULE_DEVICE_TABLE(of, hns_roce_of_match);
4746
4747static const struct acpi_device_id hns_roce_acpi_match[] = {
4748        { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
4749        {},
4750};
4751MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
4752
4753static int hns_roce_node_match(struct device *dev, void *fwnode)
4754{
4755        return dev->fwnode == fwnode;
4756}
4757
4758static struct
4759platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
4760{
4761        struct device *dev;
4762
4763        /* get the 'device' corresponding to the matching 'fwnode' */
4764        dev = bus_find_device(&platform_bus_type, NULL,
4765                              fwnode, hns_roce_node_match);
4766        /* get the platform device */
4767        return dev ? to_platform_device(dev) : NULL;
4768}
4769
4770static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
4771{
4772        struct device *dev = &hr_dev->pdev->dev;
4773        struct platform_device *pdev = NULL;
4774        struct net_device *netdev = NULL;
4775        struct device_node *net_node;
4776        struct resource *res;
4777        int port_cnt = 0;
4778        u8 phy_port;
4779        int ret;
4780        int i;
4781
4782        /* check if we are compatible with the underlying SoC */
4783        if (dev_of_node(dev)) {
4784                const struct of_device_id *of_id;
4785
4786                of_id = of_match_node(hns_roce_of_match, dev->of_node);
4787                if (!of_id) {
4788                        dev_err(dev, "device is not compatible!\n");
4789                        return -ENXIO;
4790                }
4791                hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
4792                if (!hr_dev->hw) {
4793                        dev_err(dev, "couldn't get H/W specific DT data!\n");
4794                        return -ENXIO;
4795                }
4796        } else if (is_acpi_device_node(dev->fwnode)) {
4797                const struct acpi_device_id *acpi_id;
4798
4799                acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
4800                if (!acpi_id) {
4801                        dev_err(dev, "device is not compatible!\n");
4802                        return -ENXIO;
4803                }
4804                hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
4805                if (!hr_dev->hw) {
4806                        dev_err(dev, "couldn't get H/W specific ACPI data!\n");
4807                        return -ENXIO;
4808                }
4809        } else {
4810                dev_err(dev, "can't read compatibility data from DT or ACPI\n");
4811                return -ENXIO;
4812        }
4813
4814        /* get the mapped register base address */
4815        res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
4816        hr_dev->reg_base = devm_ioremap_resource(dev, res);
4817        if (IS_ERR(hr_dev->reg_base))
4818                return PTR_ERR(hr_dev->reg_base);
4819
4820        /* read the node_guid of IB device from the DT or ACPI */
4821        ret = device_property_read_u8_array(dev, "node-guid",
4822                                            (u8 *)&hr_dev->ib_dev.node_guid,
4823                                            GUID_LEN);
4824        if (ret) {
4825                dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
4826                return ret;
4827        }
4828
4829        /* get the RoCE associated ethernet ports or netdevices */
4830        for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
4831                if (dev_of_node(dev)) {
4832                        net_node = of_parse_phandle(dev->of_node, "eth-handle",
4833                                                    i);
4834                        if (!net_node)
4835                                continue;
4836                        pdev = of_find_device_by_node(net_node);
4837                } else if (is_acpi_device_node(dev->fwnode)) {
4838                        struct acpi_reference_args args;
4839                        struct fwnode_handle *fwnode;
4840
4841                        ret = acpi_node_get_property_reference(dev->fwnode,
4842                                                               "eth-handle",
4843                                                               i, &args);
4844                        if (ret)
4845                                continue;
4846                        fwnode = acpi_fwnode_handle(args.adev);
4847                        pdev = hns_roce_find_pdev(fwnode);
4848                } else {
4849                        dev_err(dev, "cannot read data from DT or ACPI\n");
4850                        return -ENXIO;
4851                }
4852
4853                if (pdev) {
4854                        netdev = platform_get_drvdata(pdev);
4855                        phy_port = (u8)i;
4856                        if (netdev) {
4857                                hr_dev->iboe.netdevs[port_cnt] = netdev;
4858                                hr_dev->iboe.phy_port[port_cnt] = phy_port;
4859                        } else {
4860                                dev_err(dev, "no netdev found with pdev %s\n",
4861                                        pdev->name);
4862                                return -ENODEV;
4863                        }
4864                        port_cnt++;
4865                }
4866        }
4867
4868        if (port_cnt == 0) {
4869                dev_err(dev, "unable to get eth-handle for available ports!\n");
4870                return -EINVAL;
4871        }
4872
4873        hr_dev->caps.num_ports = port_cnt;
4874
4875        /* cmd issue mode: 0 is poll, 1 is event */
4876        hr_dev->cmd_mod = 1;
4877        hr_dev->loop_idc = 0;
4878        hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4879        hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
4880
4881        /* read the interrupt names from the DT or ACPI */
4882        ret = device_property_read_string_array(dev, "interrupt-names",
4883                                                hr_dev->irq_names,
4884                                                HNS_ROCE_V1_MAX_IRQ_NUM);
4885        if (ret < 0) {
4886                dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
4887                return ret;
4888        }
4889
4890        /* fetch the interrupt numbers */
4891        for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
4892                hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
4893                if (hr_dev->irq[i] <= 0) {
4894                        dev_err(dev, "platform get of irq[=%d] failed!\n", i);
4895                        return -EINVAL;
4896                }
4897        }
4898
4899        return 0;
4900}
4901
4902/**
4903 * hns_roce_probe - RoCE driver entrance
4904 * @pdev: pointer to platform device
4905 * Return : int
4906 *
4907 */
4908static int hns_roce_probe(struct platform_device *pdev)
4909{
4910        int ret;
4911        struct hns_roce_dev *hr_dev;
4912        struct device *dev = &pdev->dev;
4913
4914        hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
4915        if (!hr_dev)
4916                return -ENOMEM;
4917
4918        hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
4919        if (!hr_dev->priv) {
4920                ret = -ENOMEM;
4921                goto error_failed_kzalloc;
4922        }
4923
4924        hr_dev->pdev = pdev;
4925        hr_dev->dev = dev;
4926        platform_set_drvdata(pdev, hr_dev);
4927
4928        if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
4929            dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
4930                dev_err(dev, "Not usable DMA addressing mode\n");
4931                ret = -EIO;
4932                goto error_failed_get_cfg;
4933        }
4934
4935        ret = hns_roce_get_cfg(hr_dev);
4936        if (ret) {
4937                dev_err(dev, "Get Configuration failed!\n");
4938                goto error_failed_get_cfg;
4939        }
4940
4941        ret = hns_roce_init(hr_dev);
4942        if (ret) {
4943                dev_err(dev, "RoCE engine init failed!\n");
4944                goto error_failed_get_cfg;
4945        }
4946
4947        return 0;
4948
4949error_failed_get_cfg:
4950        kfree(hr_dev->priv);
4951
4952error_failed_kzalloc:
4953        ib_dealloc_device(&hr_dev->ib_dev);
4954
4955        return ret;
4956}
4957
4958/**
4959 * hns_roce_remove - remove RoCE device
4960 * @pdev: pointer to platform device
4961 */
4962static int hns_roce_remove(struct platform_device *pdev)
4963{
4964        struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
4965
4966        hns_roce_exit(hr_dev);
4967        kfree(hr_dev->priv);
4968        ib_dealloc_device(&hr_dev->ib_dev);
4969
4970        return 0;
4971}
4972
4973static struct platform_driver hns_roce_driver = {
4974        .probe = hns_roce_probe,
4975        .remove = hns_roce_remove,
4976        .driver = {
4977                .name = DRV_NAME,
4978                .of_match_table = hns_roce_of_match,
4979                .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
4980        },
4981};
4982
4983module_platform_driver(hns_roce_driver);
4984
4985MODULE_LICENSE("Dual BSD/GPL");
4986MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4987MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
4988MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4989MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
4990