linux/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016-2017 Hisilicon Limited.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/acpi.h>
  34#include <linux/etherdevice.h>
  35#include <linux/interrupt.h>
  36#include <linux/kernel.h>
  37#include <linux/types.h>
  38#include <net/addrconf.h>
  39#include <rdma/ib_addr.h>
  40#include <rdma/ib_cache.h>
  41#include <rdma/ib_umem.h>
  42#include <rdma/uverbs_ioctl.h>
  43
  44#include "hnae3.h"
  45#include "hns_roce_common.h"
  46#include "hns_roce_device.h"
  47#include "hns_roce_cmd.h"
  48#include "hns_roce_hem.h"
  49#include "hns_roce_hw_v2.h"
  50
  51enum {
  52        CMD_RST_PRC_OTHERS,
  53        CMD_RST_PRC_SUCCESS,
  54        CMD_RST_PRC_EBUSY,
  55};
  56
  57static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
  58                                   struct ib_sge *sg)
  59{
  60        dseg->lkey = cpu_to_le32(sg->lkey);
  61        dseg->addr = cpu_to_le64(sg->addr);
  62        dseg->len  = cpu_to_le32(sg->length);
  63}
  64
  65/*
  66 * mapped-value = 1 + real-value
  67 * The hns wr opcode real value is start from 0, In order to distinguish between
  68 * initialized and uninitialized map values, we plus 1 to the actual value when
  69 * defining the mapping, so that the validity can be identified by checking the
  70 * mapped value is greater than 0.
  71 */
  72#define HR_OPC_MAP(ib_key, hr_key) \
  73                [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
  74
  75static const u32 hns_roce_op_code[] = {
  76        HR_OPC_MAP(RDMA_WRITE,                  RDMA_WRITE),
  77        HR_OPC_MAP(RDMA_WRITE_WITH_IMM,         RDMA_WRITE_WITH_IMM),
  78        HR_OPC_MAP(SEND,                        SEND),
  79        HR_OPC_MAP(SEND_WITH_IMM,               SEND_WITH_IMM),
  80        HR_OPC_MAP(RDMA_READ,                   RDMA_READ),
  81        HR_OPC_MAP(ATOMIC_CMP_AND_SWP,          ATOM_CMP_AND_SWAP),
  82        HR_OPC_MAP(ATOMIC_FETCH_AND_ADD,        ATOM_FETCH_AND_ADD),
  83        HR_OPC_MAP(SEND_WITH_INV,               SEND_WITH_INV),
  84        HR_OPC_MAP(LOCAL_INV,                   LOCAL_INV),
  85        HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP,   ATOM_MSK_CMP_AND_SWAP),
  86        HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
  87        HR_OPC_MAP(REG_MR,                      FAST_REG_PMR),
  88};
  89
  90static u32 to_hr_opcode(u32 ib_opcode)
  91{
  92        if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
  93                return HNS_ROCE_V2_WQE_OP_MASK;
  94
  95        return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
  96                                             HNS_ROCE_V2_WQE_OP_MASK;
  97}
  98
  99static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
 100                         const struct ib_reg_wr *wr)
 101{
 102        struct hns_roce_wqe_frmr_seg *fseg =
 103                (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
 104        struct hns_roce_mr *mr = to_hr_mr(wr->mr);
 105        u64 pbl_ba;
 106
 107        /* use ib_access_flags */
 108        hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND);
 109        hr_reg_write_bool(fseg, FRMR_ATOMIC,
 110                          wr->access & IB_ACCESS_REMOTE_ATOMIC);
 111        hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
 112        hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE);
 113        hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);
 114
 115        /* Data structure reuse may lead to confusion */
 116        pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
 117        rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
 118        rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
 119
 120        rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
 121        rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
 122        rc_sq_wqe->rkey = cpu_to_le32(wr->key);
 123        rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
 124
 125        hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
 126        hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
 127                     to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
 128        hr_reg_clear(fseg, FRMR_BLK_MODE);
 129}
 130
 131static void set_atomic_seg(const struct ib_send_wr *wr,
 132                           struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
 133                           unsigned int valid_num_sge)
 134{
 135        struct hns_roce_v2_wqe_data_seg *dseg =
 136                (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
 137        struct hns_roce_wqe_atomic_seg *aseg =
 138                (void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);
 139
 140        set_data_seg_v2(dseg, wr->sg_list);
 141
 142        if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
 143                aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
 144                aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
 145        } else {
 146                aseg->fetchadd_swap_data =
 147                        cpu_to_le64(atomic_wr(wr)->compare_add);
 148                aseg->cmp_data = 0;
 149        }
 150
 151        roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
 152                       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
 153}
 154
 155static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
 156                                 const struct ib_send_wr *wr,
 157                                 unsigned int *sge_idx, u32 msg_len)
 158{
 159        struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
 160        unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
 161        unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
 162        unsigned int left_len_in_pg;
 163        unsigned int idx = *sge_idx;
 164        unsigned int i = 0;
 165        unsigned int len;
 166        void *addr;
 167        void *dseg;
 168
 169        if (msg_len > ext_sge_sz) {
 170                ibdev_err(ibdev,
 171                          "no enough extended sge space for inline data.\n");
 172                return -EINVAL;
 173        }
 174
 175        dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
 176        left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg;
 177        len = wr->sg_list[0].length;
 178        addr = (void *)(unsigned long)(wr->sg_list[0].addr);
 179
 180        /* When copying data to extended sge space, the left length in page may
 181         * not long enough for current user's sge. So the data should be
 182         * splited into several parts, one in the first page, and the others in
 183         * the subsequent pages.
 184         */
 185        while (1) {
 186                if (len <= left_len_in_pg) {
 187                        memcpy(dseg, addr, len);
 188
 189                        idx += len / dseg_len;
 190
 191                        i++;
 192                        if (i >= wr->num_sge)
 193                                break;
 194
 195                        left_len_in_pg -= len;
 196                        len = wr->sg_list[i].length;
 197                        addr = (void *)(unsigned long)(wr->sg_list[i].addr);
 198                        dseg += len;
 199                } else {
 200                        memcpy(dseg, addr, left_len_in_pg);
 201
 202                        len -= left_len_in_pg;
 203                        addr += left_len_in_pg;
 204                        idx += left_len_in_pg / dseg_len;
 205                        dseg = hns_roce_get_extend_sge(qp,
 206                                                idx & (qp->sge.sge_cnt - 1));
 207                        left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
 208                }
 209        }
 210
 211        *sge_idx = idx;
 212
 213        return 0;
 214}
 215
 216static void set_extend_sge(struct hns_roce_qp *qp, struct ib_sge *sge,
 217                           unsigned int *sge_ind, unsigned int cnt)
 218{
 219        struct hns_roce_v2_wqe_data_seg *dseg;
 220        unsigned int idx = *sge_ind;
 221
 222        while (cnt > 0) {
 223                dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
 224                if (likely(sge->length)) {
 225                        set_data_seg_v2(dseg, sge);
 226                        idx++;
 227                        cnt--;
 228                }
 229                sge++;
 230        }
 231
 232        *sge_ind = idx;
 233}
 234
 235static bool check_inl_data_len(struct hns_roce_qp *qp, unsigned int len)
 236{
 237        struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
 238        int mtu = ib_mtu_enum_to_int(qp->path_mtu);
 239
 240        if (len > qp->max_inline_data || len > mtu) {
 241                ibdev_err(&hr_dev->ib_dev,
 242                          "invalid length of data, data len = %u, max inline len = %u, path mtu = %d.\n",
 243                          len, qp->max_inline_data, mtu);
 244                return false;
 245        }
 246
 247        return true;
 248}
 249
 250static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
 251                      struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
 252                      unsigned int *sge_idx)
 253{
 254        struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
 255        u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len);
 256        struct ib_device *ibdev = &hr_dev->ib_dev;
 257        unsigned int curr_idx = *sge_idx;
 258        void *dseg = rc_sq_wqe;
 259        unsigned int i;
 260        int ret;
 261
 262        if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
 263                ibdev_err(ibdev, "invalid inline parameters!\n");
 264                return -EINVAL;
 265        }
 266
 267        if (!check_inl_data_len(qp, msg_len))
 268                return -EINVAL;
 269
 270        dseg += sizeof(struct hns_roce_v2_rc_send_wqe);
 271
 272        if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) {
 273                roce_set_bit(rc_sq_wqe->byte_20,
 274                             V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0);
 275
 276                for (i = 0; i < wr->num_sge; i++) {
 277                        memcpy(dseg, ((void *)wr->sg_list[i].addr),
 278                               wr->sg_list[i].length);
 279                        dseg += wr->sg_list[i].length;
 280                }
 281        } else {
 282                roce_set_bit(rc_sq_wqe->byte_20,
 283                             V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1);
 284
 285                ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len);
 286                if (ret)
 287                        return ret;
 288
 289                roce_set_field(rc_sq_wqe->byte_16,
 290                               V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
 291                               V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
 292                               curr_idx - *sge_idx);
 293        }
 294
 295        *sge_idx = curr_idx;
 296
 297        return 0;
 298}
 299
 300static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 301                             struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
 302                             unsigned int *sge_ind,
 303                             unsigned int valid_num_sge)
 304{
 305        struct hns_roce_v2_wqe_data_seg *dseg =
 306                (void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
 307        struct hns_roce_qp *qp = to_hr_qp(ibqp);
 308        int j = 0;
 309        int i;
 310
 311        roce_set_field(rc_sq_wqe->byte_20,
 312                       V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
 313                       V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
 314                       (*sge_ind) & (qp->sge.sge_cnt - 1));
 315
 316        roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
 317                     !!(wr->send_flags & IB_SEND_INLINE));
 318        if (wr->send_flags & IB_SEND_INLINE)
 319                return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind);
 320
 321        if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
 322                for (i = 0; i < wr->num_sge; i++) {
 323                        if (likely(wr->sg_list[i].length)) {
 324                                set_data_seg_v2(dseg, wr->sg_list + i);
 325                                dseg++;
 326                        }
 327                }
 328        } else {
 329                for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) {
 330                        if (likely(wr->sg_list[i].length)) {
 331                                set_data_seg_v2(dseg, wr->sg_list + i);
 332                                dseg++;
 333                                j++;
 334                        }
 335                }
 336
 337                set_extend_sge(qp, wr->sg_list + i, sge_ind,
 338                               valid_num_sge - HNS_ROCE_SGE_IN_WQE);
 339        }
 340
 341        roce_set_field(rc_sq_wqe->byte_16,
 342                       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
 343                       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
 344
 345        return 0;
 346}
 347
 348static int check_send_valid(struct hns_roce_dev *hr_dev,
 349                            struct hns_roce_qp *hr_qp)
 350{
 351        struct ib_device *ibdev = &hr_dev->ib_dev;
 352        struct ib_qp *ibqp = &hr_qp->ibqp;
 353
 354        if (unlikely(ibqp->qp_type != IB_QPT_RC &&
 355                     ibqp->qp_type != IB_QPT_GSI &&
 356                     ibqp->qp_type != IB_QPT_UD)) {
 357                ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
 358                          ibqp->qp_type);
 359                return -EOPNOTSUPP;
 360        } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
 361                   hr_qp->state == IB_QPS_INIT ||
 362                   hr_qp->state == IB_QPS_RTR)) {
 363                ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
 364                          hr_qp->state);
 365                return -EINVAL;
 366        } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
 367                ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
 368                          hr_dev->state);
 369                return -EIO;
 370        }
 371
 372        return 0;
 373}
 374
 375static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
 376                                    unsigned int *sge_len)
 377{
 378        unsigned int valid_num = 0;
 379        unsigned int len = 0;
 380        int i;
 381
 382        for (i = 0; i < wr->num_sge; i++) {
 383                if (likely(wr->sg_list[i].length)) {
 384                        len += wr->sg_list[i].length;
 385                        valid_num++;
 386                }
 387        }
 388
 389        *sge_len = len;
 390        return valid_num;
 391}
 392
 393static __le32 get_immtdata(const struct ib_send_wr *wr)
 394{
 395        switch (wr->opcode) {
 396        case IB_WR_SEND_WITH_IMM:
 397        case IB_WR_RDMA_WRITE_WITH_IMM:
 398                return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
 399        default:
 400                return 0;
 401        }
 402}
 403
 404static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
 405                         const struct ib_send_wr *wr)
 406{
 407        u32 ib_op = wr->opcode;
 408
 409        if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
 410                return -EINVAL;
 411
 412        ud_sq_wqe->immtdata = get_immtdata(wr);
 413
 414        roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
 415                       V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
 416
 417        return 0;
 418}
 419
 420static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
 421                      struct hns_roce_ah *ah)
 422{
 423        struct ib_device *ib_dev = ah->ibah.device;
 424        struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
 425
 426        roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
 427                       V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport);
 428
 429        roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
 430                       V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
 431        roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
 432                       V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
 433        roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
 434                       V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
 435
 436        if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
 437                return -EINVAL;
 438
 439        roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
 440                       V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
 441
 442        ud_sq_wqe->sgid_index = ah->av.gid_index;
 443
 444        memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN);
 445        memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2);
 446
 447        if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
 448                return 0;
 449
 450        roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
 451                     ah->av.vlan_en);
 452        roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
 453                       V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
 454
 455        return 0;
 456}
 457
 458static inline int set_ud_wqe(struct hns_roce_qp *qp,
 459                             const struct ib_send_wr *wr,
 460                             void *wqe, unsigned int *sge_idx,
 461                             unsigned int owner_bit)
 462{
 463        struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
 464        struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
 465        unsigned int curr_idx = *sge_idx;
 466        unsigned int valid_num_sge;
 467        u32 msg_len = 0;
 468        int ret;
 469
 470        valid_num_sge = calc_wr_sge_num(wr, &msg_len);
 471
 472        ret = set_ud_opcode(ud_sq_wqe, wr);
 473        if (WARN_ON(ret))
 474                return ret;
 475
 476        ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
 477
 478        roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
 479                     !!(wr->send_flags & IB_SEND_SIGNALED));
 480
 481        roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
 482                     !!(wr->send_flags & IB_SEND_SOLICITED));
 483
 484        roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
 485                       V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
 486
 487        roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
 488                       V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
 489
 490        roce_set_field(ud_sq_wqe->byte_20,
 491                       V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
 492                       V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
 493                       curr_idx & (qp->sge.sge_cnt - 1));
 494
 495        ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
 496                          qp->qkey : ud_wr(wr)->remote_qkey);
 497        roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
 498                       V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
 499
 500        ret = fill_ud_av(ud_sq_wqe, ah);
 501        if (ret)
 502                return ret;
 503
 504        qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl;
 505
 506        set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge);
 507
 508        /*
 509         * The pipeline can sequentially post all valid WQEs into WQ buffer,
 510         * including new WQEs waiting for the doorbell to update the PI again.
 511         * Therefore, the owner bit of WQE MUST be updated after all fields
 512         * and extSGEs have been written into DDR instead of cache.
 513         */
 514        if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
 515                dma_wmb();
 516
 517        *sge_idx = curr_idx;
 518        roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
 519                     owner_bit);
 520
 521        return 0;
 522}
 523
 524static int set_rc_opcode(struct hns_roce_dev *hr_dev,
 525                         struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
 526                         const struct ib_send_wr *wr)
 527{
 528        u32 ib_op = wr->opcode;
 529        int ret = 0;
 530
 531        rc_sq_wqe->immtdata = get_immtdata(wr);
 532
 533        switch (ib_op) {
 534        case IB_WR_RDMA_READ:
 535        case IB_WR_RDMA_WRITE:
 536        case IB_WR_RDMA_WRITE_WITH_IMM:
 537                rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
 538                rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
 539                break;
 540        case IB_WR_SEND:
 541        case IB_WR_SEND_WITH_IMM:
 542                break;
 543        case IB_WR_ATOMIC_CMP_AND_SWP:
 544        case IB_WR_ATOMIC_FETCH_AND_ADD:
 545                rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
 546                rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
 547                break;
 548        case IB_WR_REG_MR:
 549                if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
 550                        set_frmr_seg(rc_sq_wqe, reg_wr(wr));
 551                else
 552                        ret = -EOPNOTSUPP;
 553                break;
 554        case IB_WR_LOCAL_INV:
 555                roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
 556                fallthrough;
 557        case IB_WR_SEND_WITH_INV:
 558                rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
 559                break;
 560        default:
 561                ret = -EINVAL;
 562        }
 563
 564        if (unlikely(ret))
 565                return ret;
 566
 567        roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
 568                       V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
 569
 570        return ret;
 571}
 572static inline int set_rc_wqe(struct hns_roce_qp *qp,
 573                             const struct ib_send_wr *wr,
 574                             void *wqe, unsigned int *sge_idx,
 575                             unsigned int owner_bit)
 576{
 577        struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
 578        struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
 579        unsigned int curr_idx = *sge_idx;
 580        unsigned int valid_num_sge;
 581        u32 msg_len = 0;
 582        int ret;
 583
 584        valid_num_sge = calc_wr_sge_num(wr, &msg_len);
 585
 586        rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
 587
 588        ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
 589        if (WARN_ON(ret))
 590                return ret;
 591
 592        roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
 593                     (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
 594
 595        roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
 596                     (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
 597
 598        roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
 599                     (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
 600
 601        if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
 602            wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
 603                set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
 604        else if (wr->opcode != IB_WR_REG_MR)
 605                ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
 606                                        &curr_idx, valid_num_sge);
 607
 608        /*
 609         * The pipeline can sequentially post all valid WQEs into WQ buffer,
 610         * including new WQEs waiting for the doorbell to update the PI again.
 611         * Therefore, the owner bit of WQE MUST be updated after all fields
 612         * and extSGEs have been written into DDR instead of cache.
 613         */
 614        if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB)
 615                dma_wmb();
 616
 617        *sge_idx = curr_idx;
 618        roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
 619                     owner_bit);
 620
 621        return ret;
 622}
 623
 624static inline void update_sq_db(struct hns_roce_dev *hr_dev,
 625                                struct hns_roce_qp *qp)
 626{
 627        if (unlikely(qp->state == IB_QPS_ERR)) {
 628                flush_cqe(hr_dev, qp);
 629        } else {
 630                struct hns_roce_v2_db sq_db = {};
 631
 632                hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn);
 633                hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
 634                hr_reg_write(&sq_db, DB_PI, qp->sq.head);
 635                hr_reg_write(&sq_db, DB_SL, qp->sl);
 636
 637                hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
 638        }
 639}
 640
 641static inline void update_rq_db(struct hns_roce_dev *hr_dev,
 642                                struct hns_roce_qp *qp)
 643{
 644        if (unlikely(qp->state == IB_QPS_ERR)) {
 645                flush_cqe(hr_dev, qp);
 646        } else {
 647                if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
 648                        *qp->rdb.db_record =
 649                                        qp->rq.head & V2_DB_PRODUCER_IDX_M;
 650                } else {
 651                        struct hns_roce_v2_db rq_db = {};
 652
 653                        hr_reg_write(&rq_db, DB_TAG, qp->qpn);
 654                        hr_reg_write(&rq_db, DB_CMD, HNS_ROCE_V2_RQ_DB);
 655                        hr_reg_write(&rq_db, DB_PI, qp->rq.head);
 656
 657                        hns_roce_write64(hr_dev, (__le32 *)&rq_db,
 658                                         qp->rq.db_reg);
 659                }
 660        }
 661}
 662
 663static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
 664                              u64 __iomem *dest)
 665{
 666#define HNS_ROCE_WRITE_TIMES 8
 667        struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
 668        struct hnae3_handle *handle = priv->handle;
 669        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
 670        int i;
 671
 672        if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
 673                for (i = 0; i < HNS_ROCE_WRITE_TIMES; i++)
 674                        writeq_relaxed(*(val + i), dest + i);
 675}
 676
 677static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
 678                       void *wqe)
 679{
 680        struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
 681
 682        /* All kinds of DirectWQE have the same header field layout */
 683        roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FLAG_S, 1);
 684        roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
 685                       V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
 686        roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
 687                       V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2);
 688        roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
 689                       V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
 690
 691        hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
 692}
 693
 694static int hns_roce_v2_post_send(struct ib_qp *ibqp,
 695                                 const struct ib_send_wr *wr,
 696                                 const struct ib_send_wr **bad_wr)
 697{
 698        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 699        struct ib_device *ibdev = &hr_dev->ib_dev;
 700        struct hns_roce_qp *qp = to_hr_qp(ibqp);
 701        unsigned long flags = 0;
 702        unsigned int owner_bit;
 703        unsigned int sge_idx;
 704        unsigned int wqe_idx;
 705        void *wqe = NULL;
 706        u32 nreq;
 707        int ret;
 708
 709        spin_lock_irqsave(&qp->sq.lock, flags);
 710
 711        ret = check_send_valid(hr_dev, qp);
 712        if (unlikely(ret)) {
 713                *bad_wr = wr;
 714                nreq = 0;
 715                goto out;
 716        }
 717
 718        sge_idx = qp->next_sge;
 719
 720        for (nreq = 0; wr; ++nreq, wr = wr->next) {
 721                if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
 722                        ret = -ENOMEM;
 723                        *bad_wr = wr;
 724                        goto out;
 725                }
 726
 727                wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
 728
 729                if (unlikely(wr->num_sge > qp->sq.max_gs)) {
 730                        ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
 731                                  wr->num_sge, qp->sq.max_gs);
 732                        ret = -EINVAL;
 733                        *bad_wr = wr;
 734                        goto out;
 735                }
 736
 737                wqe = hns_roce_get_send_wqe(qp, wqe_idx);
 738                qp->sq.wrid[wqe_idx] = wr->wr_id;
 739                owner_bit =
 740                       ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
 741
 742                /* Corresponding to the QP type, wqe process separately */
 743                if (ibqp->qp_type == IB_QPT_RC)
 744                        ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
 745                else
 746                        ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
 747
 748                if (unlikely(ret)) {
 749                        *bad_wr = wr;
 750                        goto out;
 751                }
 752        }
 753
 754out:
 755        if (likely(nreq)) {
 756                qp->sq.head += nreq;
 757                qp->next_sge = sge_idx;
 758
 759                if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
 760                        write_dwqe(hr_dev, qp, wqe);
 761                else
 762                        update_sq_db(hr_dev, qp);
 763        }
 764
 765        spin_unlock_irqrestore(&qp->sq.lock, flags);
 766
 767        return ret;
 768}
 769
 770static int check_recv_valid(struct hns_roce_dev *hr_dev,
 771                            struct hns_roce_qp *hr_qp)
 772{
 773        struct ib_device *ibdev = &hr_dev->ib_dev;
 774        struct ib_qp *ibqp = &hr_qp->ibqp;
 775
 776        if (unlikely(ibqp->qp_type != IB_QPT_RC &&
 777                     ibqp->qp_type != IB_QPT_GSI &&
 778                     ibqp->qp_type != IB_QPT_UD)) {
 779                ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n",
 780                          ibqp->qp_type);
 781                return -EOPNOTSUPP;
 782        }
 783
 784        if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
 785                return -EIO;
 786
 787        if (hr_qp->state == IB_QPS_RESET)
 788                return -EINVAL;
 789
 790        return 0;
 791}
 792
 793static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
 794                                 u32 max_sge, bool rsv)
 795{
 796        struct hns_roce_v2_wqe_data_seg *dseg = wqe;
 797        u32 i, cnt;
 798
 799        for (i = 0, cnt = 0; i < wr->num_sge; i++) {
 800                /* Skip zero-length sge */
 801                if (!wr->sg_list[i].length)
 802                        continue;
 803                set_data_seg_v2(dseg + cnt, wr->sg_list + i);
 804                cnt++;
 805        }
 806
 807        /* Fill a reserved sge to make hw stop reading remaining segments */
 808        if (rsv) {
 809                dseg[cnt].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
 810                dseg[cnt].addr = 0;
 811                dseg[cnt].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
 812        } else {
 813                /* Clear remaining segments to make ROCEE ignore sges */
 814                if (cnt < max_sge)
 815                        memset(dseg + cnt, 0,
 816                               (max_sge - cnt) * HNS_ROCE_SGE_SIZE);
 817        }
 818}
 819
 820static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
 821                        u32 wqe_idx, u32 max_sge)
 822{
 823        struct hns_roce_rinl_sge *sge_list;
 824        void *wqe = NULL;
 825        u32 i;
 826
 827        wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
 828        fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
 829
 830        /* rq support inline data */
 831        if (hr_qp->rq_inl_buf.wqe_cnt) {
 832                sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
 833                hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
 834                for (i = 0; i < wr->num_sge; i++) {
 835                        sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
 836                        sge_list[i].len = wr->sg_list[i].length;
 837                }
 838        }
 839}
 840
 841static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
 842                                 const struct ib_recv_wr *wr,
 843                                 const struct ib_recv_wr **bad_wr)
 844{
 845        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 846        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 847        struct ib_device *ibdev = &hr_dev->ib_dev;
 848        u32 wqe_idx, nreq, max_sge;
 849        unsigned long flags;
 850        int ret;
 851
 852        spin_lock_irqsave(&hr_qp->rq.lock, flags);
 853
 854        ret = check_recv_valid(hr_dev, hr_qp);
 855        if (unlikely(ret)) {
 856                *bad_wr = wr;
 857                nreq = 0;
 858                goto out;
 859        }
 860
 861        max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
 862        for (nreq = 0; wr; ++nreq, wr = wr->next) {
 863                if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
 864                                                  hr_qp->ibqp.recv_cq))) {
 865                        ret = -ENOMEM;
 866                        *bad_wr = wr;
 867                        goto out;
 868                }
 869
 870                if (unlikely(wr->num_sge > max_sge)) {
 871                        ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
 872                                  wr->num_sge, max_sge);
 873                        ret = -EINVAL;
 874                        *bad_wr = wr;
 875                        goto out;
 876                }
 877
 878                wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
 879                fill_rq_wqe(hr_qp, wr, wqe_idx, max_sge);
 880                hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
 881        }
 882
 883out:
 884        if (likely(nreq)) {
 885                hr_qp->rq.head += nreq;
 886
 887                update_rq_db(hr_dev, hr_qp);
 888        }
 889        spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
 890
 891        return ret;
 892}
 893
 894static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
 895{
 896        return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
 897}
 898
 899static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
 900{
 901        return hns_roce_buf_offset(idx_que->mtr.kmem,
 902                                   n << idx_que->entry_shift);
 903}
 904
 905static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, u32 wqe_index)
 906{
 907        /* always called with interrupts disabled. */
 908        spin_lock(&srq->lock);
 909
 910        bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
 911        srq->idx_que.tail++;
 912
 913        spin_unlock(&srq->lock);
 914}
 915
 916static int hns_roce_srqwq_overflow(struct hns_roce_srq *srq)
 917{
 918        struct hns_roce_idx_que *idx_que = &srq->idx_que;
 919
 920        return idx_que->head - idx_que->tail >= srq->wqe_cnt;
 921}
 922
 923static int check_post_srq_valid(struct hns_roce_srq *srq, u32 max_sge,
 924                                const struct ib_recv_wr *wr)
 925{
 926        struct ib_device *ib_dev = srq->ibsrq.device;
 927
 928        if (unlikely(wr->num_sge > max_sge)) {
 929                ibdev_err(ib_dev,
 930                          "failed to check sge, wr->num_sge = %d, max_sge = %u.\n",
 931                          wr->num_sge, max_sge);
 932                return -EINVAL;
 933        }
 934
 935        if (unlikely(hns_roce_srqwq_overflow(srq))) {
 936                ibdev_err(ib_dev,
 937                          "failed to check srqwq status, srqwq is full.\n");
 938                return -ENOMEM;
 939        }
 940
 941        return 0;
 942}
 943
 944static int get_srq_wqe_idx(struct hns_roce_srq *srq, u32 *wqe_idx)
 945{
 946        struct hns_roce_idx_que *idx_que = &srq->idx_que;
 947        u32 pos;
 948
 949        pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt);
 950        if (unlikely(pos == srq->wqe_cnt))
 951                return -ENOSPC;
 952
 953        bitmap_set(idx_que->bitmap, pos, 1);
 954        *wqe_idx = pos;
 955        return 0;
 956}
 957
 958static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx)
 959{
 960        struct hns_roce_idx_que *idx_que = &srq->idx_que;
 961        unsigned int head;
 962        __le32 *buf;
 963
 964        head = idx_que->head & (srq->wqe_cnt - 1);
 965
 966        buf = get_idx_buf(idx_que, head);
 967        *buf = cpu_to_le32(wqe_idx);
 968
 969        idx_que->head++;
 970}
 971
 972static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq)
 973{
 974        hr_reg_write(db, DB_TAG, srq->srqn);
 975        hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB);
 976        hr_reg_write(db, DB_PI, srq->idx_que.head);
 977}
 978
 979static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
 980                                     const struct ib_recv_wr *wr,
 981                                     const struct ib_recv_wr **bad_wr)
 982{
 983        struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
 984        struct hns_roce_srq *srq = to_hr_srq(ibsrq);
 985        struct hns_roce_v2_db srq_db;
 986        unsigned long flags;
 987        int ret = 0;
 988        u32 max_sge;
 989        u32 wqe_idx;
 990        void *wqe;
 991        u32 nreq;
 992
 993        spin_lock_irqsave(&srq->lock, flags);
 994
 995        max_sge = srq->max_gs - srq->rsv_sge;
 996        for (nreq = 0; wr; ++nreq, wr = wr->next) {
 997                ret = check_post_srq_valid(srq, max_sge, wr);
 998                if (ret) {
 999                        *bad_wr = wr;
1000                        break;
1001                }
1002
1003                ret = get_srq_wqe_idx(srq, &wqe_idx);
1004                if (unlikely(ret)) {
1005                        *bad_wr = wr;
1006                        break;
1007                }
1008
1009                wqe = get_srq_wqe_buf(srq, wqe_idx);
1010                fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge);
1011                fill_wqe_idx(srq, wqe_idx);
1012                srq->wrid[wqe_idx] = wr->wr_id;
1013        }
1014
1015        if (likely(nreq)) {
1016                update_srq_db(&srq_db, srq);
1017
1018                hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
1019        }
1020
1021        spin_unlock_irqrestore(&srq->lock, flags);
1022
1023        return ret;
1024}
1025
1026static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
1027                                      unsigned long instance_stage,
1028                                      unsigned long reset_stage)
1029{
1030        /* When hardware reset has been completed once or more, we should stop
1031         * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
1032         * function, we should exit with error. If now at HNAE3_INIT_CLIENT
1033         * stage of soft reset process, we should exit with error, and then
1034         * HNAE3_INIT_CLIENT related process can rollback the operation like
1035         * notifing hardware to free resources, HNAE3_INIT_CLIENT related
1036         * process will exit with error to notify NIC driver to reschedule soft
1037         * reset process once again.
1038         */
1039        hr_dev->is_reset = true;
1040        hr_dev->dis_db = true;
1041
1042        if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
1043            instance_stage == HNS_ROCE_STATE_INIT)
1044                return CMD_RST_PRC_EBUSY;
1045
1046        return CMD_RST_PRC_SUCCESS;
1047}
1048
1049static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
1050                                        unsigned long instance_stage,
1051                                        unsigned long reset_stage)
1052{
1053        struct hns_roce_v2_priv *priv = hr_dev->priv;
1054        struct hnae3_handle *handle = priv->handle;
1055        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1056
1057        /* When hardware reset is detected, we should stop sending mailbox&cmq&
1058         * doorbell to hardware. If now in .init_instance() function, we should
1059         * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
1060         * process, we should exit with error, and then HNAE3_INIT_CLIENT
1061         * related process can rollback the operation like notifing hardware to
1062         * free resources, HNAE3_INIT_CLIENT related process will exit with
1063         * error to notify NIC driver to reschedule soft reset process once
1064         * again.
1065         */
1066        hr_dev->dis_db = true;
1067        if (!ops->get_hw_reset_stat(handle))
1068                hr_dev->is_reset = true;
1069
1070        if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
1071            instance_stage == HNS_ROCE_STATE_INIT)
1072                return CMD_RST_PRC_EBUSY;
1073
1074        return CMD_RST_PRC_SUCCESS;
1075}
1076
1077static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
1078{
1079        struct hns_roce_v2_priv *priv = hr_dev->priv;
1080        struct hnae3_handle *handle = priv->handle;
1081        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1082
1083        /* When software reset is detected at .init_instance() function, we
1084         * should stop sending mailbox&cmq&doorbell to hardware, and exit
1085         * with error.
1086         */
1087        hr_dev->dis_db = true;
1088        if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
1089                hr_dev->is_reset = true;
1090
1091        return CMD_RST_PRC_EBUSY;
1092}
1093
1094static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
1095                                    struct hnae3_handle *handle)
1096{
1097        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1098        unsigned long instance_stage; /* the current instance stage */
1099        unsigned long reset_stage; /* the current reset stage */
1100        unsigned long reset_cnt;
1101        bool sw_resetting;
1102        bool hw_resetting;
1103
1104        /* Get information about reset from NIC driver or RoCE driver itself,
1105         * the meaning of the following variables from NIC driver are described
1106         * as below:
1107         * reset_cnt -- The count value of completed hardware reset.
1108         * hw_resetting -- Whether hardware device is resetting now.
1109         * sw_resetting -- Whether NIC's software reset process is running now.
1110         */
1111        instance_stage = handle->rinfo.instance_state;
1112        reset_stage = handle->rinfo.reset_state;
1113        reset_cnt = ops->ae_dev_reset_cnt(handle);
1114        if (reset_cnt != hr_dev->reset_cnt)
1115                return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
1116                                                  reset_stage);
1117
1118        hw_resetting = ops->get_cmdq_stat(handle);
1119        if (hw_resetting)
1120                return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
1121                                                    reset_stage);
1122
1123        sw_resetting = ops->ae_dev_resetting(handle);
1124        if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
1125                return hns_roce_v2_cmd_sw_resetting(hr_dev);
1126
1127        return CMD_RST_PRC_OTHERS;
1128}
1129
1130static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
1131{
1132        struct hns_roce_v2_priv *priv = hr_dev->priv;
1133        struct hnae3_handle *handle = priv->handle;
1134        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1135
1136        if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
1137                return true;
1138
1139        if (ops->get_hw_reset_stat(handle))
1140                return true;
1141
1142        if (ops->ae_dev_resetting(handle))
1143                return true;
1144
1145        return false;
1146}
1147
1148static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
1149{
1150        struct hns_roce_v2_priv *priv = hr_dev->priv;
1151        u32 status;
1152
1153        if (hr_dev->is_reset)
1154                status = CMD_RST_PRC_SUCCESS;
1155        else
1156                status = check_aedev_reset_status(hr_dev, priv->handle);
1157
1158        *busy = (status == CMD_RST_PRC_EBUSY);
1159
1160        return status == CMD_RST_PRC_OTHERS;
1161}
1162
1163static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
1164                                   struct hns_roce_v2_cmq_ring *ring)
1165{
1166        int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1167
1168        ring->desc = kzalloc(size, GFP_KERNEL);
1169        if (!ring->desc)
1170                return -ENOMEM;
1171
1172        ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
1173                                             DMA_BIDIRECTIONAL);
1174        if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
1175                ring->desc_dma_addr = 0;
1176                kfree(ring->desc);
1177                ring->desc = NULL;
1178
1179                return -ENOMEM;
1180        }
1181
1182        return 0;
1183}
1184
1185static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
1186                                   struct hns_roce_v2_cmq_ring *ring)
1187{
1188        dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
1189                         ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1190                         DMA_BIDIRECTIONAL);
1191
1192        ring->desc_dma_addr = 0;
1193        kfree(ring->desc);
1194}
1195
1196static int init_csq(struct hns_roce_dev *hr_dev,
1197                    struct hns_roce_v2_cmq_ring *csq)
1198{
1199        dma_addr_t dma;
1200        int ret;
1201
1202        csq->desc_num = CMD_CSQ_DESC_NUM;
1203        spin_lock_init(&csq->lock);
1204        csq->flag = TYPE_CSQ;
1205        csq->head = 0;
1206
1207        ret = hns_roce_alloc_cmq_desc(hr_dev, csq);
1208        if (ret)
1209                return ret;
1210
1211        dma = csq->desc_dma_addr;
1212        roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, lower_32_bits(dma));
1213        roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG, upper_32_bits(dma));
1214        roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
1215                   (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
1216
1217        /* Make sure to write CI first and then PI */
1218        roce_write(hr_dev, ROCEE_TX_CMQ_CI_REG, 0);
1219        roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, 0);
1220
1221        return 0;
1222}
1223
1224static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
1225{
1226        struct hns_roce_v2_priv *priv = hr_dev->priv;
1227        int ret;
1228
1229        priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
1230
1231        ret = init_csq(hr_dev, &priv->cmq.csq);
1232        if (ret)
1233                dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret);
1234
1235        return ret;
1236}
1237
1238static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
1239{
1240        struct hns_roce_v2_priv *priv = hr_dev->priv;
1241
1242        hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
1243}
1244
1245static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
1246                                          enum hns_roce_opcode_type opcode,
1247                                          bool is_read)
1248{
1249        memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
1250        desc->opcode = cpu_to_le16(opcode);
1251        desc->flag =
1252                cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1253        if (is_read)
1254                desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
1255        else
1256                desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1257}
1258
1259static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
1260{
1261        u32 tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1262        struct hns_roce_v2_priv *priv = hr_dev->priv;
1263
1264        return tail == priv->cmq.csq.head;
1265}
1266
1267static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1268                               struct hns_roce_cmq_desc *desc, int num)
1269{
1270        struct hns_roce_v2_priv *priv = hr_dev->priv;
1271        struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1272        u32 timeout = 0;
1273        u16 desc_ret;
1274        u32 tail;
1275        int ret;
1276        int i;
1277
1278        spin_lock_bh(&csq->lock);
1279
1280        tail = csq->head;
1281
1282        for (i = 0; i < num; i++) {
1283                csq->desc[csq->head++] = desc[i];
1284                if (csq->head == csq->desc_num)
1285                        csq->head = 0;
1286        }
1287
1288        /* Write to hardware */
1289        roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head);
1290
1291        /* If the command is sync, wait for the firmware to write back,
1292         * if multi descriptors to be sent, use the first one to check
1293         */
1294        if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1295                do {
1296                        if (hns_roce_cmq_csq_done(hr_dev))
1297                                break;
1298                        udelay(1);
1299                } while (++timeout < priv->cmq.tx_timeout);
1300        }
1301
1302        if (hns_roce_cmq_csq_done(hr_dev)) {
1303                for (ret = 0, i = 0; i < num; i++) {
1304                        /* check the result of hardware write back */
1305                        desc[i] = csq->desc[tail++];
1306                        if (tail == csq->desc_num)
1307                                tail = 0;
1308
1309                        desc_ret = le16_to_cpu(desc[i].retval);
1310                        if (likely(desc_ret == CMD_EXEC_SUCCESS))
1311                                continue;
1312
1313                        dev_err_ratelimited(hr_dev->dev,
1314                                            "Cmdq IO error, opcode = %x, return = %x\n",
1315                                            desc->opcode, desc_ret);
1316                        ret = -EIO;
1317                }
1318        } else {
1319                /* FW/HW reset or incorrect number of desc */
1320                tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
1321                dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n",
1322                         csq->head, tail);
1323                csq->head = tail;
1324
1325                ret = -EAGAIN;
1326        }
1327
1328        spin_unlock_bh(&csq->lock);
1329
1330        return ret;
1331}
1332
1333static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1334                             struct hns_roce_cmq_desc *desc, int num)
1335{
1336        bool busy;
1337        int ret;
1338
1339        if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1340                return busy ? -EBUSY : 0;
1341
1342        ret = __hns_roce_cmq_send(hr_dev, desc, num);
1343        if (ret) {
1344                if (!v2_chk_mbox_is_avail(hr_dev, &busy))
1345                        return busy ? -EBUSY : 0;
1346        }
1347
1348        return ret;
1349}
1350
1351static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
1352                               dma_addr_t base_addr, u16 op)
1353{
1354        struct hns_roce_cmd_mailbox *mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1355        int ret;
1356
1357        if (IS_ERR(mbox))
1358                return PTR_ERR(mbox);
1359
1360        ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, obj, 0, op,
1361                                HNS_ROCE_CMD_TIMEOUT_MSECS);
1362        hns_roce_free_cmd_mailbox(hr_dev, mbox);
1363        return ret;
1364}
1365
1366static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1367{
1368        struct hns_roce_query_version *resp;
1369        struct hns_roce_cmq_desc desc;
1370        int ret;
1371
1372        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1373        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1374        if (ret)
1375                return ret;
1376
1377        resp = (struct hns_roce_query_version *)desc.data;
1378        hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1379        hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1380
1381        return 0;
1382}
1383
1384static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
1385                                        struct hnae3_handle *handle)
1386{
1387        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1388        unsigned long end;
1389
1390        hr_dev->dis_db = true;
1391
1392        dev_warn(hr_dev->dev,
1393                 "Func clear is pending, device in resetting state.\n");
1394        end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1395        while (end) {
1396                if (!ops->get_hw_reset_stat(handle)) {
1397                        hr_dev->is_reset = true;
1398                        dev_info(hr_dev->dev,
1399                                 "Func clear success after reset.\n");
1400                        return;
1401                }
1402                msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1403                end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1404        }
1405
1406        dev_warn(hr_dev->dev, "Func clear failed.\n");
1407}
1408
1409static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
1410                                        struct hnae3_handle *handle)
1411{
1412        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1413        unsigned long end;
1414
1415        hr_dev->dis_db = true;
1416
1417        dev_warn(hr_dev->dev,
1418                 "Func clear is pending, device in resetting state.\n");
1419        end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1420        while (end) {
1421                if (ops->ae_dev_reset_cnt(handle) !=
1422                    hr_dev->reset_cnt) {
1423                        hr_dev->is_reset = true;
1424                        dev_info(hr_dev->dev,
1425                                 "Func clear success after sw reset\n");
1426                        return;
1427                }
1428                msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1429                end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1430        }
1431
1432        dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1433}
1434
1435static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
1436                                       int flag)
1437{
1438        struct hns_roce_v2_priv *priv = hr_dev->priv;
1439        struct hnae3_handle *handle = priv->handle;
1440        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1441
1442        if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
1443                hr_dev->dis_db = true;
1444                hr_dev->is_reset = true;
1445                dev_info(hr_dev->dev, "Func clear success after reset.\n");
1446                return;
1447        }
1448
1449        if (ops->get_hw_reset_stat(handle)) {
1450                func_clr_hw_resetting_state(hr_dev, handle);
1451                return;
1452        }
1453
1454        if (ops->ae_dev_resetting(handle) &&
1455            handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
1456                func_clr_sw_resetting_state(hr_dev, handle);
1457                return;
1458        }
1459
1460        if (retval && !flag)
1461                dev_warn(hr_dev->dev,
1462                         "Func clear read failed, ret = %d.\n", retval);
1463
1464        dev_warn(hr_dev->dev, "Func clear failed.\n");
1465}
1466
1467static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
1468{
1469        bool fclr_write_fail_flag = false;
1470        struct hns_roce_func_clear *resp;
1471        struct hns_roce_cmq_desc desc;
1472        unsigned long end;
1473        int ret = 0;
1474
1475        if (check_device_is_in_reset(hr_dev))
1476                goto out;
1477
1478        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1479        resp = (struct hns_roce_func_clear *)desc.data;
1480        resp->rst_funcid_en = cpu_to_le32(vf_id);
1481
1482        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1483        if (ret) {
1484                fclr_write_fail_flag = true;
1485                dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1486                         ret);
1487                goto out;
1488        }
1489
1490        msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1491        end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1492        while (end) {
1493                if (check_device_is_in_reset(hr_dev))
1494                        goto out;
1495                msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1496                end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1497
1498                hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1499                                              true);
1500
1501                resp->rst_funcid_en = cpu_to_le32(vf_id);
1502                ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1503                if (ret)
1504                        continue;
1505
1506                if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1507                        if (vf_id == 0)
1508                                hr_dev->is_reset = true;
1509                        return;
1510                }
1511        }
1512
1513out:
1514        hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
1515}
1516
1517static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
1518{
1519        enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1520        struct hns_roce_cmq_desc desc[2];
1521        struct hns_roce_cmq_req *req_a;
1522
1523        req_a = (struct hns_roce_cmq_req *)desc[0].data;
1524        hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1525        desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1526        hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1527        hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
1528        hns_roce_cmq_send(hr_dev, desc, 2);
1529}
1530
1531static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1532{
1533        int i;
1534
1535        for (i = hr_dev->func_num - 1; i >= 0; i--) {
1536                __hns_roce_function_clear(hr_dev, i);
1537                if (i != 0)
1538                        hns_roce_free_vf_resource(hr_dev, i);
1539        }
1540}
1541
1542static int hns_roce_clear_extdb_list_info(struct hns_roce_dev *hr_dev)
1543{
1544        struct hns_roce_cmq_desc desc;
1545        int ret;
1546
1547        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLEAR_EXTDB_LIST_INFO,
1548                                      false);
1549        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1550        if (ret)
1551                ibdev_err(&hr_dev->ib_dev,
1552                          "failed to clear extended doorbell info, ret = %d.\n",
1553                          ret);
1554
1555        return ret;
1556}
1557
1558static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1559{
1560        struct hns_roce_query_fw_info *resp;
1561        struct hns_roce_cmq_desc desc;
1562        int ret;
1563
1564        hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1565        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1566        if (ret)
1567                return ret;
1568
1569        resp = (struct hns_roce_query_fw_info *)desc.data;
1570        hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1571
1572        return 0;
1573}
1574
1575static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
1576{
1577        struct hns_roce_cmq_desc desc;
1578        int ret;
1579
1580        if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) {
1581                hr_dev->func_num = 1;
1582                return 0;
1583        }
1584
1585        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
1586                                      true);
1587        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1588        if (ret) {
1589                hr_dev->func_num = 1;
1590                return ret;
1591        }
1592
1593        hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
1594        hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
1595
1596        return 0;
1597}
1598
1599static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1600{
1601        struct hns_roce_cmq_desc desc;
1602        struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1603
1604        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1605                                      false);
1606
1607        hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
1608        hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
1609
1610        return hns_roce_cmq_send(hr_dev, &desc, 1);
1611}
1612
1613static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1614{
1615        struct hns_roce_cmq_desc desc[2];
1616        struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1617        struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1618        struct hns_roce_caps *caps = &hr_dev->caps;
1619        enum hns_roce_opcode_type opcode;
1620        u32 func_num;
1621        int ret;
1622
1623        if (is_vf) {
1624                opcode = HNS_ROCE_OPC_QUERY_VF_RES;
1625                func_num = 1;
1626        } else {
1627                opcode = HNS_ROCE_OPC_QUERY_PF_RES;
1628                func_num = hr_dev->func_num;
1629        }
1630
1631        hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
1632        desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1633        hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
1634
1635        ret = hns_roce_cmq_send(hr_dev, desc, 2);
1636        if (ret)
1637                return ret;
1638
1639        caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
1640        caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
1641        caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
1642        caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
1643        caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
1644        caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
1645        caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
1646        caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
1647
1648        if (is_vf) {
1649                caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
1650                caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
1651                                               func_num;
1652        } else {
1653                caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
1654                caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
1655                                               func_num;
1656        }
1657
1658        return 0;
1659}
1660
1661static int load_ext_cfg_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1662{
1663        struct hns_roce_cmq_desc desc;
1664        struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1665        struct hns_roce_caps *caps = &hr_dev->caps;
1666        u32 func_num, qp_num;
1667        int ret;
1668
1669        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, true);
1670        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1671        if (ret)
1672                return ret;
1673
1674        func_num = is_vf ? 1 : max_t(u32, 1, hr_dev->func_num);
1675        qp_num = hr_reg_read(req, EXT_CFG_QP_PI_NUM) / func_num;
1676        caps->num_pi_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
1677
1678        qp_num = hr_reg_read(req, EXT_CFG_QP_NUM) / func_num;
1679        caps->num_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
1680
1681        return 0;
1682}
1683
1684static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
1685{
1686        struct hns_roce_cmq_desc desc;
1687        struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1688        struct hns_roce_caps *caps = &hr_dev->caps;
1689        int ret;
1690
1691        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1692                                      true);
1693
1694        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1695        if (ret)
1696                return ret;
1697
1698        caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
1699        caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
1700
1701        return 0;
1702}
1703
1704static int query_func_resource_caps(struct hns_roce_dev *hr_dev, bool is_vf)
1705{
1706        struct device *dev = hr_dev->dev;
1707        int ret;
1708
1709        ret = load_func_res_caps(hr_dev, is_vf);
1710        if (ret) {
1711                dev_err(dev, "failed to load res caps, ret = %d (%s).\n", ret,
1712                        is_vf ? "vf" : "pf");
1713                return ret;
1714        }
1715
1716        if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1717                ret = load_ext_cfg_caps(hr_dev, is_vf);
1718                if (ret)
1719                        dev_err(dev, "failed to load ext cfg, ret = %d (%s).\n",
1720                                ret, is_vf ? "vf" : "pf");
1721        }
1722
1723        return ret;
1724}
1725
1726static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1727{
1728        struct device *dev = hr_dev->dev;
1729        int ret;
1730
1731        ret = query_func_resource_caps(hr_dev, false);
1732        if (ret)
1733                return ret;
1734
1735        ret = load_pf_timer_res_caps(hr_dev);
1736        if (ret)
1737                dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
1738                        ret);
1739
1740        return ret;
1741}
1742
1743static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
1744{
1745        return query_func_resource_caps(hr_dev, true);
1746}
1747
1748static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1749                                          u32 vf_id)
1750{
1751        struct hns_roce_vf_switch *swt;
1752        struct hns_roce_cmq_desc desc;
1753        int ret;
1754
1755        swt = (struct hns_roce_vf_switch *)desc.data;
1756        hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1757        swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1758        roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1759                       VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
1760        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1761        if (ret)
1762                return ret;
1763
1764        desc.flag =
1765                cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1766        desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1767        roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1768        roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1769        roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1770
1771        return hns_roce_cmq_send(hr_dev, &desc, 1);
1772}
1773
1774static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
1775{
1776        u32 vf_id;
1777        int ret;
1778
1779        for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
1780                ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
1781                if (ret)
1782                        return ret;
1783        }
1784        return 0;
1785}
1786
1787static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
1788{
1789        struct hns_roce_cmq_desc desc[2];
1790        struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
1791        struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
1792        enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
1793        struct hns_roce_caps *caps = &hr_dev->caps;
1794
1795        hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
1796        desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1797        hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
1798
1799        hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
1800
1801        hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
1802        hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
1803        hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
1804        hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
1805        hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
1806        hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
1807        hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
1808        hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
1809        hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
1810        hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
1811        hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
1812        hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
1813        hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
1814        hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
1815
1816        if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1817                hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
1818                hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
1819                             vf_id * caps->gmv_bt_num);
1820        } else {
1821                hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
1822                hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
1823                             vf_id * caps->sgid_bt_num);
1824                hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
1825                hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
1826                             vf_id * caps->smac_bt_num);
1827        }
1828
1829        return hns_roce_cmq_send(hr_dev, desc, 2);
1830}
1831
1832static int config_vf_ext_resource(struct hns_roce_dev *hr_dev, u32 vf_id)
1833{
1834        struct hns_roce_cmq_desc desc;
1835        struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1836        struct hns_roce_caps *caps = &hr_dev->caps;
1837
1838        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, false);
1839
1840        hr_reg_write(req, EXT_CFG_VF_ID, vf_id);
1841
1842        hr_reg_write(req, EXT_CFG_QP_PI_NUM, caps->num_pi_qps);
1843        hr_reg_write(req, EXT_CFG_QP_PI_IDX, vf_id * caps->num_pi_qps);
1844        hr_reg_write(req, EXT_CFG_QP_NUM, caps->num_qps);
1845        hr_reg_write(req, EXT_CFG_QP_IDX, vf_id * caps->num_qps);
1846
1847        return hns_roce_cmq_send(hr_dev, &desc, 1);
1848}
1849
1850static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1851{
1852        u32 func_num = max_t(u32, 1, hr_dev->func_num);
1853        u32 vf_id;
1854        int ret;
1855
1856        for (vf_id = 0; vf_id < func_num; vf_id++) {
1857                ret = config_vf_hem_resource(hr_dev, vf_id);
1858                if (ret) {
1859                        dev_err(hr_dev->dev,
1860                                "failed to config vf-%u hem res, ret = %d.\n",
1861                                vf_id, ret);
1862                        return ret;
1863                }
1864
1865                if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
1866                        ret = config_vf_ext_resource(hr_dev, vf_id);
1867                        if (ret) {
1868                                dev_err(hr_dev->dev,
1869                                        "failed to config vf-%u ext res, ret = %d.\n",
1870                                        vf_id, ret);
1871                                return ret;
1872                        }
1873                }
1874        }
1875
1876        return 0;
1877}
1878
1879static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1880{
1881        struct hns_roce_cmq_desc desc;
1882        struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
1883        struct hns_roce_caps *caps = &hr_dev->caps;
1884
1885        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1886
1887        hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
1888                     caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1889        hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
1890                     caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1891        hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
1892                     to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
1893
1894        hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
1895                     caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1896        hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
1897                     caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1898        hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
1899                     to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
1900
1901        hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
1902                     caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1903        hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
1904                     caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1905        hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
1906                     to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
1907
1908        hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
1909                     caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1910        hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
1911                     caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1912        hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
1913                     to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
1914
1915        hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
1916                     caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1917        hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
1918                     caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1919        hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
1920                     to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
1921
1922        return hns_roce_cmq_send(hr_dev, &desc, 1);
1923}
1924
1925/* Use default caps when hns_roce_query_pf_caps() failed or init VF profile */
1926static void set_default_caps(struct hns_roce_dev *hr_dev)
1927{
1928        struct hns_roce_caps *caps = &hr_dev->caps;
1929
1930        caps->num_qps           = HNS_ROCE_V2_MAX_QP_NUM;
1931        caps->max_wqes          = HNS_ROCE_V2_MAX_WQE_NUM;
1932        caps->num_cqs           = HNS_ROCE_V2_MAX_CQ_NUM;
1933        caps->num_srqs          = HNS_ROCE_V2_MAX_SRQ_NUM;
1934        caps->min_cqes          = HNS_ROCE_MIN_CQE_NUM;
1935        caps->max_cqes          = HNS_ROCE_V2_MAX_CQE_NUM;
1936        caps->max_sq_sg         = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1937        caps->max_extend_sg     = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1938        caps->max_rq_sg         = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1939
1940        caps->num_uars          = HNS_ROCE_V2_UAR_NUM;
1941        caps->phy_num_uars      = HNS_ROCE_V2_PHY_UAR_NUM;
1942        caps->num_aeq_vectors   = HNS_ROCE_V2_AEQE_VEC_NUM;
1943        caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1944        caps->num_comp_vectors  = 0;
1945
1946        caps->num_mtpts         = HNS_ROCE_V2_MAX_MTPT_NUM;
1947        caps->num_pds           = HNS_ROCE_V2_MAX_PD_NUM;
1948        caps->num_qpc_timer     = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1949        caps->num_cqc_timer     = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1950
1951        caps->max_qp_init_rdma  = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1952        caps->max_qp_dest_rdma  = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1953        caps->max_sq_desc_sz    = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1954        caps->max_rq_desc_sz    = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1955        caps->max_srq_desc_sz   = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1956        caps->irrl_entry_sz     = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1957        caps->trrl_entry_sz     = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1958        caps->cqc_entry_sz      = HNS_ROCE_V2_CQC_ENTRY_SZ;
1959        caps->srqc_entry_sz     = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1960        caps->mtpt_entry_sz     = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1961        caps->idx_entry_sz      = HNS_ROCE_V2_IDX_ENTRY_SZ;
1962        caps->page_size_cap     = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1963        caps->reserved_lkey     = 0;
1964        caps->reserved_pds      = 0;
1965        caps->reserved_mrws     = 1;
1966        caps->reserved_uars     = 0;
1967        caps->reserved_cqs      = 0;
1968        caps->reserved_srqs     = 0;
1969        caps->reserved_qps      = HNS_ROCE_V2_RSV_QPS;
1970
1971        caps->qpc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1972        caps->srqc_hop_num      = HNS_ROCE_CONTEXT_HOP_NUM;
1973        caps->cqc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1974        caps->mpt_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
1975        caps->sccc_hop_num      = HNS_ROCE_SCCC_HOP_NUM;
1976
1977        caps->mtt_hop_num       = HNS_ROCE_MTT_HOP_NUM;
1978        caps->wqe_sq_hop_num    = HNS_ROCE_SQWQE_HOP_NUM;
1979        caps->wqe_sge_hop_num   = HNS_ROCE_EXT_SGE_HOP_NUM;
1980        caps->wqe_rq_hop_num    = HNS_ROCE_RQWQE_HOP_NUM;
1981        caps->cqe_hop_num       = HNS_ROCE_CQE_HOP_NUM;
1982        caps->srqwqe_hop_num    = HNS_ROCE_SRQWQE_HOP_NUM;
1983        caps->idx_hop_num       = HNS_ROCE_IDX_HOP_NUM;
1984        caps->chunk_sz          = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1985
1986        caps->flags             = HNS_ROCE_CAP_FLAG_REREG_MR |
1987                                  HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1988                                  HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
1989                                  HNS_ROCE_CAP_FLAG_QP_RECORD_DB;
1990
1991        caps->pkey_table_len[0] = 1;
1992        caps->ceqe_depth        = HNS_ROCE_V2_COMP_EQE_NUM;
1993        caps->aeqe_depth        = HNS_ROCE_V2_ASYNC_EQE_NUM;
1994        caps->local_ca_ack_delay = 0;
1995        caps->max_mtu = IB_MTU_4096;
1996
1997        caps->max_srq_wrs       = HNS_ROCE_V2_MAX_SRQ_WR;
1998        caps->max_srq_sges      = HNS_ROCE_V2_MAX_SRQ_SGE;
1999
2000        caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
2001                       HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
2002                       HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
2003
2004        caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
2005
2006        if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2007                caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
2008        } else {
2009                caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
2010
2011                /* The following configuration are only valid for HIP08 */
2012                caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
2013                caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
2014                caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
2015        }
2016}
2017
2018static void calc_pg_sz(u32 obj_num, u32 obj_size, u32 hop_num, u32 ctx_bt_num,
2019                       u32 *buf_page_size, u32 *bt_page_size, u32 hem_type)
2020{
2021        u64 obj_per_chunk;
2022        u64 bt_chunk_size = PAGE_SIZE;
2023        u64 buf_chunk_size = PAGE_SIZE;
2024        u64 obj_per_chunk_default = buf_chunk_size / obj_size;
2025
2026        *buf_page_size = 0;
2027        *bt_page_size = 0;
2028
2029        switch (hop_num) {
2030        case 3:
2031                obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2032                                (bt_chunk_size / BA_BYTE_LEN) *
2033                                (bt_chunk_size / BA_BYTE_LEN) *
2034                                 obj_per_chunk_default;
2035                break;
2036        case 2:
2037                obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2038                                (bt_chunk_size / BA_BYTE_LEN) *
2039                                 obj_per_chunk_default;
2040                break;
2041        case 1:
2042                obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
2043                                obj_per_chunk_default;
2044                break;
2045        case HNS_ROCE_HOP_NUM_0:
2046                obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
2047                break;
2048        default:
2049                pr_err("table %u not support hop_num = %u!\n", hem_type,
2050                       hop_num);
2051                return;
2052        }
2053
2054        if (hem_type >= HEM_TYPE_MTT)
2055                *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2056        else
2057                *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
2058}
2059
2060static void set_hem_page_size(struct hns_roce_dev *hr_dev)
2061{
2062        struct hns_roce_caps *caps = &hr_dev->caps;
2063
2064        /* EQ */
2065        caps->eqe_ba_pg_sz = 0;
2066        caps->eqe_buf_pg_sz = 0;
2067
2068        /* Link Table */
2069        caps->llm_buf_pg_sz = 0;
2070
2071        /* MR */
2072        caps->mpt_ba_pg_sz = 0;
2073        caps->mpt_buf_pg_sz = 0;
2074        caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
2075        caps->pbl_buf_pg_sz = 0;
2076        calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
2077                   caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
2078                   HEM_TYPE_MTPT);
2079
2080        /* QP */
2081        caps->qpc_ba_pg_sz = 0;
2082        caps->qpc_buf_pg_sz = 0;
2083        caps->qpc_timer_ba_pg_sz = 0;
2084        caps->qpc_timer_buf_pg_sz = 0;
2085        caps->sccc_ba_pg_sz = 0;
2086        caps->sccc_buf_pg_sz = 0;
2087        caps->mtt_ba_pg_sz = 0;
2088        caps->mtt_buf_pg_sz = 0;
2089        calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
2090                   caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
2091                   HEM_TYPE_QPC);
2092
2093        if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
2094                calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
2095                           caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
2096                           &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
2097
2098        /* CQ */
2099        caps->cqc_ba_pg_sz = 0;
2100        caps->cqc_buf_pg_sz = 0;
2101        caps->cqc_timer_ba_pg_sz = 0;
2102        caps->cqc_timer_buf_pg_sz = 0;
2103        caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
2104        caps->cqe_buf_pg_sz = 0;
2105        calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
2106                   caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
2107                   HEM_TYPE_CQC);
2108        calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
2109                   1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
2110
2111        /* SRQ */
2112        if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
2113                caps->srqc_ba_pg_sz = 0;
2114                caps->srqc_buf_pg_sz = 0;
2115                caps->srqwqe_ba_pg_sz = 0;
2116                caps->srqwqe_buf_pg_sz = 0;
2117                caps->idx_ba_pg_sz = 0;
2118                caps->idx_buf_pg_sz = 0;
2119                calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
2120                           caps->srqc_hop_num, caps->srqc_bt_num,
2121                           &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
2122                           HEM_TYPE_SRQC);
2123                calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
2124                           caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
2125                           &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
2126                calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
2127                           caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
2128                           &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
2129        }
2130
2131        /* GMV */
2132        caps->gmv_ba_pg_sz = 0;
2133        caps->gmv_buf_pg_sz = 0;
2134}
2135
2136/* Apply all loaded caps before setting to hardware */
2137static void apply_func_caps(struct hns_roce_dev *hr_dev)
2138{
2139        struct hns_roce_caps *caps = &hr_dev->caps;
2140        struct hns_roce_v2_priv *priv = hr_dev->priv;
2141
2142        /* The following configurations don't need to be got from firmware. */
2143        caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
2144        caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
2145        caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
2146
2147        caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
2148        caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
2149        caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2150        caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
2151
2152        caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
2153        caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
2154
2155        caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
2156        caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
2157        caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
2158
2159        if (!caps->num_comp_vectors)
2160                caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1,
2161                                  (u32)priv->handle->rinfo.num_vectors - 2);
2162
2163        if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
2164                caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
2165                caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
2166
2167                /* The following configurations will be overwritten */
2168                caps->qpc_sz = HNS_ROCE_V3_QPC_SZ;
2169                caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE;
2170                caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ;
2171
2172                /* The following configurations are not got from firmware */
2173                caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;
2174
2175                caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
2176                caps->gid_table_len[0] = caps->gmv_bt_num *
2177                                        (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
2178
2179                caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
2180                                                          caps->gmv_entry_sz);
2181        } else {
2182                u32 func_num = max_t(u32, 1, hr_dev->func_num);
2183
2184                caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
2185                caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
2186                caps->gid_table_len[0] /= func_num;
2187        }
2188
2189        if (hr_dev->is_vf) {
2190                caps->default_aeq_arm_st = 0x3;
2191                caps->default_ceq_arm_st = 0x3;
2192                caps->default_ceq_max_cnt = 0x1;
2193                caps->default_ceq_period = 0x10;
2194                caps->default_aeq_max_cnt = 0x1;
2195                caps->default_aeq_period = 0x10;
2196        }
2197
2198        set_hem_page_size(hr_dev);
2199}
2200
2201static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
2202{
2203        struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
2204        struct hns_roce_caps *caps = &hr_dev->caps;
2205        struct hns_roce_query_pf_caps_a *resp_a;
2206        struct hns_roce_query_pf_caps_b *resp_b;
2207        struct hns_roce_query_pf_caps_c *resp_c;
2208        struct hns_roce_query_pf_caps_d *resp_d;
2209        struct hns_roce_query_pf_caps_e *resp_e;
2210        int ctx_hop_num;
2211        int pbl_hop_num;
2212        int ret;
2213        int i;
2214
2215        for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
2216                hns_roce_cmq_setup_basic_desc(&desc[i],
2217                                              HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
2218                                              true);
2219                if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
2220                        desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2221                else
2222                        desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2223        }
2224
2225        ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
2226        if (ret)
2227                return ret;
2228
2229        resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
2230        resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
2231        resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
2232        resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
2233        resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
2234
2235        caps->local_ca_ack_delay     = resp_a->local_ca_ack_delay;
2236        caps->max_sq_sg              = le16_to_cpu(resp_a->max_sq_sg);
2237        caps->max_sq_inline          = le16_to_cpu(resp_a->max_sq_inline);
2238        caps->max_rq_sg              = le16_to_cpu(resp_a->max_rq_sg);
2239        caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
2240        caps->max_extend_sg          = le32_to_cpu(resp_a->max_extend_sg);
2241        caps->num_qpc_timer          = le16_to_cpu(resp_a->num_qpc_timer);
2242        caps->num_cqc_timer          = le16_to_cpu(resp_a->num_cqc_timer);
2243        caps->max_srq_sges           = le16_to_cpu(resp_a->max_srq_sges);
2244        caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
2245        caps->num_aeq_vectors        = resp_a->num_aeq_vectors;
2246        caps->num_other_vectors      = resp_a->num_other_vectors;
2247        caps->max_sq_desc_sz         = resp_a->max_sq_desc_sz;
2248        caps->max_rq_desc_sz         = resp_a->max_rq_desc_sz;
2249        caps->max_srq_desc_sz        = resp_a->max_srq_desc_sz;
2250        caps->cqe_sz                 = resp_a->cqe_sz;
2251
2252        caps->mtpt_entry_sz          = resp_b->mtpt_entry_sz;
2253        caps->irrl_entry_sz          = resp_b->irrl_entry_sz;
2254        caps->trrl_entry_sz          = resp_b->trrl_entry_sz;
2255        caps->cqc_entry_sz           = resp_b->cqc_entry_sz;
2256        caps->srqc_entry_sz          = resp_b->srqc_entry_sz;
2257        caps->idx_entry_sz           = resp_b->idx_entry_sz;
2258        caps->sccc_sz                = resp_b->sccc_sz;
2259        caps->max_mtu                = resp_b->max_mtu;
2260        caps->qpc_sz                 = le16_to_cpu(resp_b->qpc_sz);
2261        caps->min_cqes               = resp_b->min_cqes;
2262        caps->min_wqes               = resp_b->min_wqes;
2263        caps->page_size_cap          = le32_to_cpu(resp_b->page_size_cap);
2264        caps->pkey_table_len[0]      = resp_b->pkey_table_len;
2265        caps->phy_num_uars           = resp_b->phy_num_uars;
2266        ctx_hop_num                  = resp_b->ctx_hop_num;
2267        pbl_hop_num                  = resp_b->pbl_hop_num;
2268
2269        caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
2270                                            V2_QUERY_PF_CAPS_C_NUM_PDS_M,
2271                                            V2_QUERY_PF_CAPS_C_NUM_PDS_S);
2272        caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
2273                                     V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
2274                                     V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
2275        caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
2276                       HNS_ROCE_CAP_FLAGS_EX_SHIFT;
2277
2278        caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
2279                                            V2_QUERY_PF_CAPS_C_NUM_CQS_M,
2280                                            V2_QUERY_PF_CAPS_C_NUM_CQS_S);
2281        caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
2282                                                V2_QUERY_PF_CAPS_C_MAX_GID_M,
2283                                                V2_QUERY_PF_CAPS_C_MAX_GID_S);
2284
2285        caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
2286                                             V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
2287                                             V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
2288        caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
2289                                              V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
2290                                              V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
2291        caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
2292                                            V2_QUERY_PF_CAPS_C_NUM_QPS_M,
2293                                            V2_QUERY_PF_CAPS_C_NUM_QPS_S);
2294        caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
2295                                                V2_QUERY_PF_CAPS_C_MAX_ORD_M,
2296                                                V2_QUERY_PF_CAPS_C_MAX_ORD_S);
2297        caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
2298        caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
2299        caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
2300                                             V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
2301                                             V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
2302        caps->cong_type = roce_get_field(resp_d->wq_hop_num_max_srqs,
2303                                         V2_QUERY_PF_CAPS_D_CONG_TYPE_M,
2304                                         V2_QUERY_PF_CAPS_D_CONG_TYPE_S);
2305        caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
2306
2307        caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
2308                                               V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
2309                                               V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
2310        caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
2311                                                V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
2312                                                V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
2313
2314        caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
2315                                               V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
2316                                               V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
2317        caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2318                                            V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
2319                                            V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
2320        caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
2321                                            V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
2322                                            V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
2323        caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
2324                                            V2_QUERY_PF_CAPS_D_RSV_PDS_M,
2325                                            V2_QUERY_PF_CAPS_D_RSV_PDS_S);
2326        caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
2327                                             V2_QUERY_PF_CAPS_D_NUM_UARS_M,
2328                                             V2_QUERY_PF_CAPS_D_NUM_UARS_S);
2329        caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
2330                                            V2_QUERY_PF_CAPS_D_RSV_QPS_M,
2331                                            V2_QUERY_PF_CAPS_D_RSV_QPS_S);
2332        caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
2333                                             V2_QUERY_PF_CAPS_D_RSV_UARS_M,
2334                                             V2_QUERY_PF_CAPS_D_RSV_UARS_S);
2335        caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2336                                             V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
2337                                             V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
2338        caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
2339                                         V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
2340                                         V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
2341        caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
2342                                            V2_QUERY_PF_CAPS_E_RSV_CQS_M,
2343                                            V2_QUERY_PF_CAPS_E_RSV_CQS_S);
2344        caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
2345                                             V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
2346                                             V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
2347        caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
2348                                             V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
2349                                             V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
2350        caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
2351        caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
2352        caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
2353        caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
2354
2355        caps->qpc_hop_num = ctx_hop_num;
2356        caps->sccc_hop_num = ctx_hop_num;
2357        caps->srqc_hop_num = ctx_hop_num;
2358        caps->cqc_hop_num = ctx_hop_num;
2359        caps->mpt_hop_num = ctx_hop_num;
2360        caps->mtt_hop_num = pbl_hop_num;
2361        caps->cqe_hop_num = pbl_hop_num;
2362        caps->srqwqe_hop_num = pbl_hop_num;
2363        caps->idx_hop_num = pbl_hop_num;
2364        caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2365                                          V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
2366                                          V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
2367        caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2368                                          V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
2369                                          V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
2370        caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
2371                                          V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
2372                                          V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
2373
2374        return 0;
2375}
2376
2377static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
2378{
2379        struct hns_roce_cmq_desc desc;
2380        struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
2381
2382        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
2383                                      false);
2384
2385        hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
2386        hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
2387
2388        return hns_roce_cmq_send(hr_dev, &desc, 1);
2389}
2390
2391static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
2392{
2393        struct hns_roce_caps *caps = &hr_dev->caps;
2394        int ret;
2395
2396        if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
2397                return 0;
2398
2399        ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
2400                                    caps->qpc_sz);
2401        if (ret) {
2402                dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
2403                return ret;
2404        }
2405
2406        ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
2407                                    caps->sccc_sz);
2408        if (ret)
2409                dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
2410
2411        return ret;
2412}
2413
2414static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
2415{
2416        struct device *dev = hr_dev->dev;
2417        int ret;
2418
2419        hr_dev->func_num = 1;
2420
2421        set_default_caps(hr_dev);
2422
2423        ret = hns_roce_query_vf_resource(hr_dev);
2424        if (ret) {
2425                dev_err(dev, "failed to query VF resource, ret = %d.\n", ret);
2426                return ret;
2427        }
2428
2429        apply_func_caps(hr_dev);
2430
2431        ret = hns_roce_v2_set_bt(hr_dev);
2432        if (ret)
2433                dev_err(dev, "failed to config VF BA table, ret = %d.\n", ret);
2434
2435        return ret;
2436}
2437
2438static int hns_roce_v2_pf_profile(struct hns_roce_dev *hr_dev)
2439{
2440        struct device *dev = hr_dev->dev;
2441        int ret;
2442
2443        ret = hns_roce_query_func_info(hr_dev);
2444        if (ret) {
2445                dev_err(dev, "failed to query func info, ret = %d.\n", ret);
2446                return ret;
2447        }
2448
2449        ret = hns_roce_config_global_param(hr_dev);
2450        if (ret) {
2451                dev_err(dev, "failed to config global param, ret = %d.\n", ret);
2452                return ret;
2453        }
2454
2455        ret = hns_roce_set_vf_switch_param(hr_dev);
2456        if (ret) {
2457                dev_err(dev, "failed to set switch param, ret = %d.\n", ret);
2458                return ret;
2459        }
2460
2461        ret = hns_roce_query_pf_caps(hr_dev);
2462        if (ret)
2463                set_default_caps(hr_dev);
2464
2465        ret = hns_roce_query_pf_resource(hr_dev);
2466        if (ret) {
2467                dev_err(dev, "failed to query pf resource, ret = %d.\n", ret);
2468                return ret;
2469        }
2470
2471        apply_func_caps(hr_dev);
2472
2473        ret = hns_roce_alloc_vf_resource(hr_dev);
2474        if (ret) {
2475                dev_err(dev, "failed to alloc vf resource, ret = %d.\n", ret);
2476                return ret;
2477        }
2478
2479        ret = hns_roce_v2_set_bt(hr_dev);
2480        if (ret) {
2481                dev_err(dev, "failed to config BA table, ret = %d.\n", ret);
2482                return ret;
2483        }
2484
2485        /* Configure the size of QPC, SCCC, etc. */
2486        return hns_roce_config_entry_size(hr_dev);
2487}
2488
2489static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
2490{
2491        struct device *dev = hr_dev->dev;
2492        int ret;
2493
2494        ret = hns_roce_cmq_query_hw_info(hr_dev);
2495        if (ret) {
2496                dev_err(dev, "failed to query hardware info, ret = %d.\n", ret);
2497                return ret;
2498        }
2499
2500        ret = hns_roce_query_fw_ver(hr_dev);
2501        if (ret) {
2502                dev_err(dev, "failed to query firmware info, ret = %d.\n", ret);
2503                return ret;
2504        }
2505
2506        hr_dev->vendor_part_id = hr_dev->pci_dev->device;
2507        hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
2508
2509        if (hr_dev->is_vf)
2510                return hns_roce_v2_vf_profile(hr_dev);
2511        else
2512                return hns_roce_v2_pf_profile(hr_dev);
2513}
2514
2515static void config_llm_table(struct hns_roce_buf *data_buf, void *cfg_buf)
2516{
2517        u32 i, next_ptr, page_num;
2518        __le64 *entry = cfg_buf;
2519        dma_addr_t addr;
2520        u64 val;
2521
2522        page_num = data_buf->npages;
2523        for (i = 0; i < page_num; i++) {
2524                addr = hns_roce_buf_page(data_buf, i);
2525                if (i == (page_num - 1))
2526                        next_ptr = 0;
2527                else
2528                        next_ptr = i + 1;
2529
2530                val = HNS_ROCE_EXT_LLM_ENTRY(addr, (u64)next_ptr);
2531                entry[i] = cpu_to_le64(val);
2532        }
2533}
2534
2535static int set_llm_cfg_to_hw(struct hns_roce_dev *hr_dev,
2536                             struct hns_roce_link_table *table)
2537{
2538        struct hns_roce_cmq_desc desc[2];
2539        struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
2540        struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
2541        struct hns_roce_buf *buf = table->buf;
2542        enum hns_roce_opcode_type opcode;
2543        dma_addr_t addr;
2544
2545        opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2546        hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
2547        desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2548        hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
2549
2550        hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map));
2551        hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map));
2552        hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages);
2553        hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift));
2554        hr_reg_enable(r_a, CFG_LLM_A_INIT_EN);
2555
2556        addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, 0));
2557        hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_L, lower_32_bits(addr));
2558        hr_reg_write(r_a, CFG_LLM_A_HEAD_BA_H, upper_32_bits(addr));
2559        hr_reg_write(r_a, CFG_LLM_A_HEAD_NXTPTR, 1);
2560        hr_reg_write(r_a, CFG_LLM_A_HEAD_PTR, 0);
2561
2562        addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1));
2563        hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_L, lower_32_bits(addr));
2564        hr_reg_write(r_b, CFG_LLM_B_TAIL_BA_H, upper_32_bits(addr));
2565        hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1);
2566
2567        return hns_roce_cmq_send(hr_dev, desc, 2);
2568}
2569
2570static struct hns_roce_link_table *
2571alloc_link_table_buf(struct hns_roce_dev *hr_dev)
2572{
2573        struct hns_roce_v2_priv *priv = hr_dev->priv;
2574        struct hns_roce_link_table *link_tbl;
2575        u32 pg_shift, size, min_size;
2576
2577        link_tbl = &priv->ext_llm;
2578        pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT;
2579        size = hr_dev->caps.num_qps * HNS_ROCE_V2_EXT_LLM_ENTRY_SZ;
2580        min_size = HNS_ROCE_EXT_LLM_MIN_PAGES(hr_dev->caps.sl_num) << pg_shift;
2581
2582        /* Alloc data table */
2583        size = max(size, min_size);
2584        link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0);
2585        if (IS_ERR(link_tbl->buf))
2586                return ERR_PTR(-ENOMEM);
2587
2588        /* Alloc config table */
2589        size = link_tbl->buf->npages * sizeof(u64);
2590        link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size,
2591                                                 &link_tbl->table.map,
2592                                                 GFP_KERNEL);
2593        if (!link_tbl->table.buf) {
2594                hns_roce_buf_free(hr_dev, link_tbl->buf);
2595                return ERR_PTR(-ENOMEM);
2596        }
2597
2598        return link_tbl;
2599}
2600
2601static void free_link_table_buf(struct hns_roce_dev *hr_dev,
2602                                struct hns_roce_link_table *tbl)
2603{
2604        if (tbl->buf) {
2605                u32 size = tbl->buf->npages * sizeof(u64);
2606
2607                dma_free_coherent(hr_dev->dev, size, tbl->table.buf,
2608                                  tbl->table.map);
2609        }
2610
2611        hns_roce_buf_free(hr_dev, tbl->buf);
2612}
2613
2614static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev)
2615{
2616        struct hns_roce_link_table *link_tbl;
2617        int ret;
2618
2619        link_tbl = alloc_link_table_buf(hr_dev);
2620        if (IS_ERR(link_tbl))
2621                return -ENOMEM;
2622
2623        if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) {
2624                ret = -EINVAL;
2625                goto err_alloc;
2626        }
2627
2628        config_llm_table(link_tbl->buf, link_tbl->table.buf);
2629        ret = set_llm_cfg_to_hw(hr_dev, link_tbl);
2630        if (ret)
2631                goto err_alloc;
2632
2633        return 0;
2634
2635err_alloc:
2636        free_link_table_buf(hr_dev, link_tbl);
2637        return ret;
2638}
2639
2640static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev)
2641{
2642        struct hns_roce_v2_priv *priv = hr_dev->priv;
2643
2644        free_link_table_buf(hr_dev, &priv->ext_llm);
2645}
2646
2647static void free_dip_list(struct hns_roce_dev *hr_dev)
2648{
2649        struct hns_roce_dip *hr_dip;
2650        struct hns_roce_dip *tmp;
2651        unsigned long flags;
2652
2653        spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
2654
2655        list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
2656                list_del(&hr_dip->node);
2657                kfree(hr_dip);
2658        }
2659
2660        spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
2661}
2662
2663static int get_hem_table(struct hns_roce_dev *hr_dev)
2664{
2665        unsigned int qpc_count;
2666        unsigned int cqc_count;
2667        unsigned int gmv_count;
2668        int ret;
2669        int i;
2670
2671        /* Alloc memory for source address table buffer space chunk */
2672        for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
2673             gmv_count++) {
2674                ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
2675                if (ret)
2676                        goto err_gmv_failed;
2677        }
2678
2679        if (hr_dev->is_vf)
2680                return 0;
2681
2682        /* Alloc memory for QPC Timer buffer space chunk */
2683        for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2684             qpc_count++) {
2685                ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2686                                         qpc_count);
2687                if (ret) {
2688                        dev_err(hr_dev->dev, "QPC Timer get failed\n");
2689                        goto err_qpc_timer_failed;
2690                }
2691        }
2692
2693        /* Alloc memory for CQC Timer buffer space chunk */
2694        for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2695             cqc_count++) {
2696                ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2697                                         cqc_count);
2698                if (ret) {
2699                        dev_err(hr_dev->dev, "CQC Timer get failed\n");
2700                        goto err_cqc_timer_failed;
2701                }
2702        }
2703
2704        return 0;
2705
2706err_cqc_timer_failed:
2707        for (i = 0; i < cqc_count; i++)
2708                hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2709
2710err_qpc_timer_failed:
2711        for (i = 0; i < qpc_count; i++)
2712                hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2713
2714err_gmv_failed:
2715        for (i = 0; i < gmv_count; i++)
2716                hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2717
2718        return ret;
2719}
2720
2721static void put_hem_table(struct hns_roce_dev *hr_dev)
2722{
2723        int i;
2724
2725        for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
2726                hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
2727
2728        if (hr_dev->is_vf)
2729                return;
2730
2731        for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
2732                hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2733
2734        for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
2735                hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2736}
2737
2738static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2739{
2740        int ret;
2741
2742        /* The hns ROCEE requires the extdb info to be cleared before using */
2743        ret = hns_roce_clear_extdb_list_info(hr_dev);
2744        if (ret)
2745                return ret;
2746
2747        ret = get_hem_table(hr_dev);
2748        if (ret)
2749                return ret;
2750
2751        if (hr_dev->is_vf)
2752                return 0;
2753
2754        ret = hns_roce_init_link_table(hr_dev);
2755        if (ret) {
2756                dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret);
2757                goto err_llm_init_failed;
2758        }
2759
2760        return 0;
2761
2762err_llm_init_failed:
2763        put_hem_table(hr_dev);
2764
2765        return ret;
2766}
2767
2768static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2769{
2770        hns_roce_function_clear(hr_dev);
2771
2772        if (!hr_dev->is_vf)
2773                hns_roce_free_link_table(hr_dev);
2774
2775        if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
2776                free_dip_list(hr_dev);
2777}
2778
2779static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2780                              u64 out_param, u32 in_modifier, u8 op_modifier,
2781                              u16 op, u16 token, int event)
2782{
2783        struct hns_roce_cmq_desc desc;
2784        struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2785
2786        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2787
2788        mb->in_param_l = cpu_to_le32(in_param);
2789        mb->in_param_h = cpu_to_le32(in_param >> 32);
2790        mb->out_param_l = cpu_to_le32(out_param);
2791        mb->out_param_h = cpu_to_le32(out_param >> 32);
2792        mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2793        mb->token_event_en = cpu_to_le32(event << 16 | token);
2794
2795        return hns_roce_cmq_send(hr_dev, &desc, 1);
2796}
2797
2798static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
2799                                 u8 *complete_status)
2800{
2801        struct hns_roce_mbox_status *mb_st;
2802        struct hns_roce_cmq_desc desc;
2803        unsigned long end;
2804        int ret = -EBUSY;
2805        u32 status;
2806        bool busy;
2807
2808        mb_st = (struct hns_roce_mbox_status *)desc.data;
2809        end = msecs_to_jiffies(timeout) + jiffies;
2810        while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
2811                status = 0;
2812                hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
2813                                              true);
2814                ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
2815                if (!ret) {
2816                        status = le32_to_cpu(mb_st->mb_status_hw_run);
2817                        /* No pending message exists in ROCEE mbox. */
2818                        if (!(status & MB_ST_HW_RUN_M))
2819                                break;
2820                } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
2821                        break;
2822                }
2823
2824                if (time_after(jiffies, end)) {
2825                        dev_err_ratelimited(hr_dev->dev,
2826                                            "failed to wait mbox status 0x%x\n",
2827                                            status);
2828                        return -ETIMEDOUT;
2829                }
2830
2831                cond_resched();
2832                ret = -EBUSY;
2833        }
2834
2835        if (!ret) {
2836                *complete_status = (u8)(status & MB_ST_COMPLETE_M);
2837        } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
2838                /* Ignore all errors if the mbox is unavailable. */
2839                ret = 0;
2840                *complete_status = MB_ST_COMPLETE_M;
2841        }
2842
2843        return ret;
2844}
2845
2846static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2847                        u64 out_param, u32 in_modifier, u8 op_modifier,
2848                        u16 op, u16 token, int event)
2849{
2850        u8 status = 0;
2851        int ret;
2852
2853        /* Waiting for the mbox to be idle */
2854        ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
2855                                    &status);
2856        if (unlikely(ret)) {
2857                dev_err_ratelimited(hr_dev->dev,
2858                                    "failed to check post mbox status = 0x%x, ret = %d.\n",
2859                                    status, ret);
2860                return ret;
2861        }
2862
2863        /* Post new message to mbox */
2864        ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2865                                 op_modifier, op, token, event);
2866        if (ret)
2867                dev_err_ratelimited(hr_dev->dev,
2868                                    "failed to post mailbox, ret = %d.\n", ret);
2869
2870        return ret;
2871}
2872
2873static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev, unsigned int timeout)
2874{
2875        u8 status = 0;
2876        int ret;
2877
2878        ret = v2_wait_mbox_complete(hr_dev, timeout, &status);
2879        if (!ret) {
2880                if (status != MB_ST_COMPLETE_SUCC)
2881                        return -EBUSY;
2882        } else {
2883                dev_err_ratelimited(hr_dev->dev,
2884                                    "failed to check mbox status = 0x%x, ret = %d.\n",
2885                                    status, ret);
2886        }
2887
2888        return ret;
2889}
2890
2891static void copy_gid(void *dest, const union ib_gid *gid)
2892{
2893#define GID_SIZE 4
2894        const union ib_gid *src = gid;
2895        __le32 (*p)[GID_SIZE] = dest;
2896        int i;
2897
2898        if (!gid)
2899                src = &zgid;
2900
2901        for (i = 0; i < GID_SIZE; i++)
2902                (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]);
2903}
2904
2905static int config_sgid_table(struct hns_roce_dev *hr_dev,
2906                             int gid_index, const union ib_gid *gid,
2907                             enum hns_roce_sgid_type sgid_type)
2908{
2909        struct hns_roce_cmq_desc desc;
2910        struct hns_roce_cfg_sgid_tb *sgid_tb =
2911                                    (struct hns_roce_cfg_sgid_tb *)desc.data;
2912
2913        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2914
2915        roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
2916                       CFG_SGID_TB_TABLE_IDX_S, gid_index);
2917        roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
2918                       CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2919
2920        copy_gid(&sgid_tb->vf_sgid_l, gid);
2921
2922        return hns_roce_cmq_send(hr_dev, &desc, 1);
2923}
2924
2925static int config_gmv_table(struct hns_roce_dev *hr_dev,
2926                            int gid_index, const union ib_gid *gid,
2927                            enum hns_roce_sgid_type sgid_type,
2928                            const struct ib_gid_attr *attr)
2929{
2930        struct hns_roce_cmq_desc desc[2];
2931        struct hns_roce_cfg_gmv_tb_a *tb_a =
2932                                (struct hns_roce_cfg_gmv_tb_a *)desc[0].data;
2933        struct hns_roce_cfg_gmv_tb_b *tb_b =
2934                                (struct hns_roce_cfg_gmv_tb_b *)desc[1].data;
2935
2936        u16 vlan_id = VLAN_CFI_MASK;
2937        u8 mac[ETH_ALEN] = {};
2938        int ret;
2939
2940        if (gid) {
2941                ret = rdma_read_gid_l2_fields(attr, &vlan_id, mac);
2942                if (ret)
2943                        return ret;
2944        }
2945
2946        hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_CFG_GMV_TBL, false);
2947        desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2948
2949        hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_CFG_GMV_TBL, false);
2950
2951        copy_gid(&tb_a->vf_sgid_l, gid);
2952
2953        roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_SGID_TYPE_M,
2954                       CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type);
2955        roce_set_bit(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_EN_S,
2956                     vlan_id < VLAN_CFI_MASK);
2957        roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_ID_M,
2958                       CFG_GMV_TB_VF_VLAN_ID_S, vlan_id);
2959
2960        tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac);
2961        roce_set_field(tb_b->vf_smac_h, CFG_GMV_TB_SMAC_H_M,
2962                       CFG_GMV_TB_SMAC_H_S, *(u16 *)&mac[4]);
2963
2964        roce_set_field(tb_b->table_idx_rsv, CFG_GMV_TB_SGID_IDX_M,
2965                       CFG_GMV_TB_SGID_IDX_S, gid_index);
2966
2967        return hns_roce_cmq_send(hr_dev, desc, 2);
2968}
2969
2970static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
2971                               int gid_index, const union ib_gid *gid,
2972                               const struct ib_gid_attr *attr)
2973{
2974        enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2975        int ret;
2976
2977        if (gid) {
2978                if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2979                        if (ipv6_addr_v4mapped((void *)gid))
2980                                sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2981                        else
2982                                sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2983                } else if (attr->gid_type == IB_GID_TYPE_ROCE) {
2984                        sgid_type = GID_TYPE_FLAG_ROCE_V1;
2985                }
2986        }
2987
2988        if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
2989                ret = config_gmv_table(hr_dev, gid_index, gid, sgid_type, attr);
2990        else
2991                ret = config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2992
2993        if (ret)
2994                ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n",
2995                          ret);
2996
2997        return ret;
2998}
2999
3000static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
3001                               u8 *addr)
3002{
3003        struct hns_roce_cmq_desc desc;
3004        struct hns_roce_cfg_smac_tb *smac_tb =
3005                                    (struct hns_roce_cfg_smac_tb *)desc.data;
3006        u16 reg_smac_h;
3007        u32 reg_smac_l;
3008
3009        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
3010
3011        reg_smac_l = *(u32 *)(&addr[0]);
3012        reg_smac_h = *(u16 *)(&addr[4]);
3013
3014        roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
3015                       CFG_SMAC_TB_IDX_S, phy_port);
3016        roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
3017                       CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
3018        smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
3019
3020        return hns_roce_cmq_send(hr_dev, &desc, 1);
3021}
3022
3023static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
3024                        struct hns_roce_v2_mpt_entry *mpt_entry,
3025                        struct hns_roce_mr *mr)
3026{
3027        u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
3028        struct ib_device *ibdev = &hr_dev->ib_dev;
3029        dma_addr_t pbl_ba;
3030        int i, count;
3031
3032        count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
3033                                  ARRAY_SIZE(pages), &pbl_ba);
3034        if (count < 1) {
3035                ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
3036                          count);
3037                return -ENOBUFS;
3038        }
3039
3040        /* Aligned to the hardware address access unit */
3041        for (i = 0; i < count; i++)
3042                pages[i] >>= 6;
3043
3044        mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3045        mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
3046        roce_set_field(mpt_entry->byte_48_mode_ba,
3047                       V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
3048                       upper_32_bits(pbl_ba >> 3));
3049
3050        mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
3051        roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
3052                       V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
3053
3054        mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
3055        roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
3056                       V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
3057        roce_set_field(mpt_entry->byte_64_buf_pa1,
3058                       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3059                       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3060                       to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3061
3062        return 0;
3063}
3064
3065static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
3066                                  void *mb_buf, struct hns_roce_mr *mr,
3067                                  unsigned long mtpt_idx)
3068{
3069        struct hns_roce_v2_mpt_entry *mpt_entry;
3070        int ret;
3071
3072        mpt_entry = mb_buf;
3073        memset(mpt_entry, 0, sizeof(*mpt_entry));
3074
3075        hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
3076        hr_reg_write(mpt_entry, MPT_PD, mr->pd);
3077        hr_reg_enable(mpt_entry, MPT_L_INV_EN);
3078
3079        hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
3080                          mr->access & IB_ACCESS_MW_BIND);
3081        hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
3082                          mr->access & IB_ACCESS_REMOTE_ATOMIC);
3083        hr_reg_write_bool(mpt_entry, MPT_RR_EN,
3084                          mr->access & IB_ACCESS_REMOTE_READ);
3085        hr_reg_write_bool(mpt_entry, MPT_RW_EN,
3086                          mr->access & IB_ACCESS_REMOTE_WRITE);
3087        hr_reg_write_bool(mpt_entry, MPT_LW_EN,
3088                          mr->access & IB_ACCESS_LOCAL_WRITE);
3089
3090        mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3091        mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3092        mpt_entry->lkey = cpu_to_le32(mr->key);
3093        mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3094        mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3095
3096        if (mr->type != MR_TYPE_MR)
3097                hr_reg_enable(mpt_entry, MPT_PA);
3098
3099        if (mr->type == MR_TYPE_DMA)
3100                return 0;
3101
3102        if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0)
3103                hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);
3104
3105        hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
3106                     to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3107        hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);
3108
3109        ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3110
3111        return ret;
3112}
3113
3114static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
3115                                        struct hns_roce_mr *mr, int flags,
3116                                        void *mb_buf)
3117{
3118        struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
3119        u32 mr_access_flags = mr->access;
3120        int ret = 0;
3121
3122        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3123                       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
3124
3125        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3126                       V2_MPT_BYTE_4_PD_S, mr->pd);
3127
3128        if (flags & IB_MR_REREG_ACCESS) {
3129                roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
3130                             V2_MPT_BYTE_8_BIND_EN_S,
3131                             (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
3132                roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
3133                             V2_MPT_BYTE_8_ATOMIC_EN_S,
3134                             mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
3135                roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
3136                             mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
3137                roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
3138                             mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
3139                roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
3140                             mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
3141        }
3142
3143        if (flags & IB_MR_REREG_TRANS) {
3144                mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
3145                mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
3146                mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
3147                mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
3148
3149                ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
3150        }
3151
3152        return ret;
3153}
3154
3155static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
3156                                       void *mb_buf, struct hns_roce_mr *mr)
3157{
3158        struct ib_device *ibdev = &hr_dev->ib_dev;
3159        struct hns_roce_v2_mpt_entry *mpt_entry;
3160        dma_addr_t pbl_ba = 0;
3161
3162        mpt_entry = mb_buf;
3163        memset(mpt_entry, 0, sizeof(*mpt_entry));
3164
3165        if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
3166                ibdev_err(ibdev, "failed to find frmr mtr.\n");
3167                return -ENOBUFS;
3168        }
3169
3170        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3171                       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
3172        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
3173                       V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
3174        roce_set_field(mpt_entry->byte_4_pd_hop_st,
3175                       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
3176                       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
3177                       to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
3178        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3179                       V2_MPT_BYTE_4_PD_S, mr->pd);
3180
3181        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
3182        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
3183        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3184
3185        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
3186        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
3187        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
3188        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
3189
3190        mpt_entry->pbl_size = cpu_to_le32(mr->npages);
3191
3192        mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
3193        roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
3194                       V2_MPT_BYTE_48_PBL_BA_H_S,
3195                       upper_32_bits(pbl_ba >> 3));
3196
3197        roce_set_field(mpt_entry->byte_64_buf_pa1,
3198                       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3199                       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3200                       to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
3201
3202        return 0;
3203}
3204
3205static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
3206{
3207        struct hns_roce_v2_mpt_entry *mpt_entry;
3208
3209        mpt_entry = mb_buf;
3210        memset(mpt_entry, 0, sizeof(*mpt_entry));
3211
3212        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
3213                       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
3214        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
3215                       V2_MPT_BYTE_4_PD_S, mw->pdn);
3216        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
3217                       V2_MPT_BYTE_4_PBL_HOP_NUM_S,
3218                       mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
3219                                                               mw->pbl_hop_num);
3220        roce_set_field(mpt_entry->byte_4_pd_hop_st,
3221                       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
3222                       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
3223                       mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
3224
3225        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
3226        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
3227        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
3228
3229        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
3230        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
3231        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
3232        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
3233                     mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
3234
3235        roce_set_field(mpt_entry->byte_64_buf_pa1,
3236                       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
3237                       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
3238                       mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
3239
3240        mpt_entry->lkey = cpu_to_le32(mw->rkey);
3241
3242        return 0;
3243}
3244
3245static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
3246{
3247        return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
3248}
3249
3250static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
3251{
3252        struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
3253
3254        /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
3255        return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe :
3256                                                                         NULL;
3257}
3258
3259static inline void update_cq_db(struct hns_roce_dev *hr_dev,
3260                                struct hns_roce_cq *hr_cq)
3261{
3262        if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
3263                *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
3264        } else {
3265                struct hns_roce_v2_db cq_db = {};
3266
3267                hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3268                hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB);
3269                hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3270                hr_reg_write(&cq_db, DB_CQ_CMD_SN, 1);
3271
3272                hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3273        }
3274}
3275
3276static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3277                                   struct hns_roce_srq *srq)
3278{
3279        struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3280        struct hns_roce_v2_cqe *cqe, *dest;
3281        u32 prod_index;
3282        int nfreed = 0;
3283        int wqe_index;
3284        u8 owner_bit;
3285
3286        for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
3287             ++prod_index) {
3288                if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
3289                        break;
3290        }
3291
3292        /*
3293         * Now backwards through the CQ, removing CQ entries
3294         * that match our QP by overwriting them with next entries.
3295         */
3296        while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
3297                cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
3298                if (hr_reg_read(cqe, CQE_LCL_QPN) == qpn) {
3299                        if (srq && hr_reg_read(cqe, CQE_S_R)) {
3300                                wqe_index = hr_reg_read(cqe, CQE_WQE_IDX);
3301                                hns_roce_free_srq_wqe(srq, wqe_index);
3302                        }
3303                        ++nfreed;
3304                } else if (nfreed) {
3305                        dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
3306                                          hr_cq->ib_cq.cqe);
3307                        owner_bit = hr_reg_read(dest, CQE_OWNER);
3308                        memcpy(dest, cqe, sizeof(*cqe));
3309                        hr_reg_write(dest, CQE_OWNER, owner_bit);
3310                }
3311        }
3312
3313        if (nfreed) {
3314                hr_cq->cons_index += nfreed;
3315                update_cq_db(hr_dev, hr_cq);
3316        }
3317}
3318
3319static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
3320                                 struct hns_roce_srq *srq)
3321{
3322        spin_lock_irq(&hr_cq->lock);
3323        __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
3324        spin_unlock_irq(&hr_cq->lock);
3325}
3326
3327static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
3328                                  struct hns_roce_cq *hr_cq, void *mb_buf,
3329                                  u64 *mtts, dma_addr_t dma_handle)
3330{
3331        struct hns_roce_v2_cq_context *cq_context;
3332
3333        cq_context = mb_buf;
3334        memset(cq_context, 0, sizeof(*cq_context));
3335
3336        hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
3337        hr_reg_write(cq_context, CQC_ARM_ST, REG_NXT_CEQE);
3338        hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
3339        hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
3340        hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
3341
3342        if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE)
3343                hr_reg_write(cq_context, CQC_CQE_SIZE, CQE_SIZE_64B);
3344
3345        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
3346                hr_reg_enable(cq_context, CQC_STASH);
3347
3348        hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_L,
3349                     to_hr_hw_page_addr(mtts[0]));
3350        hr_reg_write(cq_context, CQC_CQE_CUR_BLK_ADDR_H,
3351                     upper_32_bits(to_hr_hw_page_addr(mtts[0])));
3352        hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num ==
3353                     HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
3354        hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_L,
3355                     to_hr_hw_page_addr(mtts[1]));
3356        hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H,
3357                     upper_32_bits(to_hr_hw_page_addr(mtts[1])));
3358        hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ,
3359                     to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
3360        hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
3361                     to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
3362        hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3);
3363        hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3)));
3364        hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
3365                          hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB);
3366        hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L,
3367                     ((u32)hr_cq->db.dma) >> 1);
3368        hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H,
3369                     hr_cq->db.dma >> 32);
3370        hr_reg_write(cq_context, CQC_CQ_MAX_CNT,
3371                     HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
3372        hr_reg_write(cq_context, CQC_CQ_PERIOD,
3373                     HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
3374}
3375
3376static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
3377                                     enum ib_cq_notify_flags flags)
3378{
3379        struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3380        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3381        struct hns_roce_v2_db cq_db = {};
3382        u32 notify_flag;
3383
3384        /*
3385         * flags = 0, then notify_flag : next
3386         * flags = 1, then notify flag : solocited
3387         */
3388        notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
3389                      V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
3390
3391        hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn);
3392        hr_reg_write(&cq_db, DB_CMD, HNS_ROCE_V2_CQ_DB_NOTIFY);
3393        hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index);
3394        hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn);
3395        hr_reg_write(&cq_db, DB_CQ_NOTIFY, notify_flag);
3396
3397        hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
3398
3399        return 0;
3400}
3401
3402static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
3403                                        struct hns_roce_qp *qp,
3404                                        struct ib_wc *wc)
3405{
3406        struct hns_roce_rinl_sge *sge_list;
3407        u32 wr_num, wr_cnt, sge_num;
3408        u32 sge_cnt, data_len, size;
3409        void *wqe_buf;
3410
3411        wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
3412        wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
3413
3414        sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
3415        sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
3416        wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
3417        data_len = wc->byte_len;
3418
3419        for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
3420                size = min(sge_list[sge_cnt].len, data_len);
3421                memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
3422
3423                data_len -= size;
3424                wqe_buf += size;
3425        }
3426
3427        if (unlikely(data_len)) {
3428                wc->status = IB_WC_LOC_LEN_ERR;
3429                return -EAGAIN;
3430        }
3431
3432        return 0;
3433}
3434
3435static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
3436                   int num_entries, struct ib_wc *wc)
3437{
3438        unsigned int left;
3439        int npolled = 0;
3440
3441        left = wq->head - wq->tail;
3442        if (left == 0)
3443                return 0;
3444
3445        left = min_t(unsigned int, (unsigned int)num_entries, left);
3446        while (npolled < left) {
3447                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3448                wc->status = IB_WC_WR_FLUSH_ERR;
3449                wc->vendor_err = 0;
3450                wc->qp = &hr_qp->ibqp;
3451
3452                wq->tail++;
3453                wc++;
3454                npolled++;
3455        }
3456
3457        return npolled;
3458}
3459
3460static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
3461                                  struct ib_wc *wc)
3462{
3463        struct hns_roce_qp *hr_qp;
3464        int npolled = 0;
3465
3466        list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
3467                npolled += sw_comp(hr_qp, &hr_qp->sq,
3468                                   num_entries - npolled, wc + npolled);
3469                if (npolled >= num_entries)
3470                        goto out;
3471        }
3472
3473        list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
3474                npolled += sw_comp(hr_qp, &hr_qp->rq,
3475                                   num_entries - npolled, wc + npolled);
3476                if (npolled >= num_entries)
3477                        goto out;
3478        }
3479
3480out:
3481        return npolled;
3482}
3483
3484static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
3485                           struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe,
3486                           struct ib_wc *wc)
3487{
3488        static const struct {
3489                u32 cqe_status;
3490                enum ib_wc_status wc_status;
3491        } map[] = {
3492                { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
3493                { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
3494                { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
3495                { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
3496                { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
3497                { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
3498                { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
3499                { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
3500                { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
3501                { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
3502                { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
3503                { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
3504                  IB_WC_RETRY_EXC_ERR },
3505                { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
3506                { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
3507                { HNS_ROCE_CQE_V2_GENERAL_ERR, IB_WC_GENERAL_ERR}
3508        };
3509
3510        u32 cqe_status = hr_reg_read(cqe, CQE_STATUS);
3511        int i;
3512
3513        wc->status = IB_WC_GENERAL_ERR;
3514        for (i = 0; i < ARRAY_SIZE(map); i++)
3515                if (cqe_status == map[i].cqe_status) {
3516                        wc->status = map[i].wc_status;
3517                        break;
3518                }
3519
3520        if (likely(wc->status == IB_WC_SUCCESS ||
3521                   wc->status == IB_WC_WR_FLUSH_ERR))
3522                return;
3523
3524        ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
3525        print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
3526                       cq->cqe_size, false);
3527        wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
3528
3529        /*
3530         * For hns ROCEE, GENERAL_ERR is an error type that is not defined in
3531         * the standard protocol, the driver must ignore it and needn't to set
3532         * the QP to an error state.
3533         */
3534        if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
3535                return;
3536
3537        flush_cqe(hr_dev, qp);
3538}
3539
3540static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
3541                      struct hns_roce_qp **cur_qp)
3542{
3543        struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3544        struct hns_roce_qp *hr_qp = *cur_qp;
3545        u32 qpn;
3546
3547        qpn = hr_reg_read(cqe, CQE_LCL_QPN);
3548
3549        if (!hr_qp || qpn != hr_qp->qpn) {
3550                hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
3551                if (unlikely(!hr_qp)) {
3552                        ibdev_err(&hr_dev->ib_dev,
3553                                  "CQ %06lx with entry for unknown QPN %06x\n",
3554                                  hr_cq->cqn, qpn);
3555                        return -EINVAL;
3556                }
3557                *cur_qp = hr_qp;
3558        }
3559
3560        return 0;
3561}
3562
3563/*
3564 * mapped-value = 1 + real-value
3565 * The ib wc opcode's real value is start from 0, In order to distinguish
3566 * between initialized and uninitialized map values, we plus 1 to the actual
3567 * value when defining the mapping, so that the validity can be identified by
3568 * checking whether the mapped value is greater than 0.
3569 */
3570#define HR_WC_OP_MAP(hr_key, ib_key) \
3571                [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
3572
3573static const u32 wc_send_op_map[] = {
3574        HR_WC_OP_MAP(SEND,                      SEND),
3575        HR_WC_OP_MAP(SEND_WITH_INV,             SEND),
3576        HR_WC_OP_MAP(SEND_WITH_IMM,             SEND),
3577        HR_WC_OP_MAP(RDMA_READ,                 RDMA_READ),
3578        HR_WC_OP_MAP(RDMA_WRITE,                RDMA_WRITE),
3579        HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM,       RDMA_WRITE),
3580        HR_WC_OP_MAP(LOCAL_INV,                 LOCAL_INV),
3581        HR_WC_OP_MAP(ATOM_CMP_AND_SWAP,         COMP_SWAP),
3582        HR_WC_OP_MAP(ATOM_FETCH_AND_ADD,        FETCH_ADD),
3583        HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP,     MASKED_COMP_SWAP),
3584        HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD,    MASKED_FETCH_ADD),
3585        HR_WC_OP_MAP(FAST_REG_PMR,              REG_MR),
3586        HR_WC_OP_MAP(BIND_MW,                   REG_MR),
3587};
3588
3589static int to_ib_wc_send_op(u32 hr_opcode)
3590{
3591        if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
3592                return -EINVAL;
3593
3594        return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
3595                                           -EINVAL;
3596}
3597
3598static const u32 wc_recv_op_map[] = {
3599        HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM,               WITH_IMM),
3600        HR_WC_OP_MAP(SEND,                              RECV),
3601        HR_WC_OP_MAP(SEND_WITH_IMM,                     WITH_IMM),
3602        HR_WC_OP_MAP(SEND_WITH_INV,                     RECV),
3603};
3604
3605static int to_ib_wc_recv_op(u32 hr_opcode)
3606{
3607        if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
3608                return -EINVAL;
3609
3610        return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
3611                                           -EINVAL;
3612}
3613
3614static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3615{
3616        u32 hr_opcode;
3617        int ib_opcode;
3618
3619        wc->wc_flags = 0;
3620
3621        hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3622        switch (hr_opcode) {
3623        case HNS_ROCE_V2_WQE_OP_RDMA_READ:
3624                wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3625                break;
3626        case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
3627        case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
3628                wc->wc_flags |= IB_WC_WITH_IMM;
3629                break;
3630        case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
3631                wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3632                break;
3633        case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
3634        case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
3635        case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
3636        case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
3637                wc->byte_len  = 8;
3638                break;
3639        default:
3640                break;
3641        }
3642
3643        ib_opcode = to_ib_wc_send_op(hr_opcode);
3644        if (ib_opcode < 0)
3645                wc->status = IB_WC_GENERAL_ERR;
3646        else
3647                wc->opcode = ib_opcode;
3648}
3649
3650static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
3651                                     struct hns_roce_v2_cqe *cqe)
3652{
3653        return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
3654               (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
3655                hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3656                hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3657               hr_reg_read(cqe, CQE_RQ_INLINE);
3658}
3659
3660static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
3661{
3662        struct hns_roce_qp *qp = to_hr_qp(wc->qp);
3663        u32 hr_opcode;
3664        int ib_opcode;
3665        int ret;
3666
3667        wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3668
3669        hr_opcode = hr_reg_read(cqe, CQE_OPCODE);
3670        switch (hr_opcode) {
3671        case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3672        case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3673                wc->wc_flags = IB_WC_WITH_IMM;
3674                wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
3675                break;
3676        case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3677                wc->wc_flags = IB_WC_WITH_INVALIDATE;
3678                wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3679                break;
3680        default:
3681                wc->wc_flags = 0;
3682        }
3683
3684        ib_opcode = to_ib_wc_recv_op(hr_opcode);
3685        if (ib_opcode < 0)
3686                wc->status = IB_WC_GENERAL_ERR;
3687        else
3688                wc->opcode = ib_opcode;
3689
3690        if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
3691                ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
3692                if (unlikely(ret))
3693                        return ret;
3694        }
3695
3696        wc->sl = hr_reg_read(cqe, CQE_SL);
3697        wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
3698        wc->slid = 0;
3699        wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0;
3700        wc->port_num = hr_reg_read(cqe, CQE_PORTN);
3701        wc->pkey_index = 0;
3702
3703        if (hr_reg_read(cqe, CQE_VID_VLD)) {
3704                wc->vlan_id = hr_reg_read(cqe, CQE_VID);
3705                wc->wc_flags |= IB_WC_WITH_VLAN;
3706        } else {
3707                wc->vlan_id = 0xffff;
3708        }
3709
3710        wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE);
3711
3712        return 0;
3713}
3714
3715static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
3716                                struct hns_roce_qp **cur_qp, struct ib_wc *wc)
3717{
3718        struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
3719        struct hns_roce_qp *qp = *cur_qp;
3720        struct hns_roce_srq *srq = NULL;
3721        struct hns_roce_v2_cqe *cqe;
3722        struct hns_roce_wq *wq;
3723        int is_send;
3724        u16 wqe_idx;
3725        int ret;
3726
3727        cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
3728        if (!cqe)
3729                return -EAGAIN;
3730
3731        ++hr_cq->cons_index;
3732        /* Memory barrier */
3733        rmb();
3734
3735        ret = get_cur_qp(hr_cq, cqe, &qp);
3736        if (ret)
3737                return ret;
3738
3739        wc->qp = &qp->ibqp;
3740        wc->vendor_err = 0;
3741
3742        wqe_idx = hr_reg_read(cqe, CQE_WQE_IDX);
3743
3744        is_send = !hr_reg_read(cqe, CQE_S_R);
3745        if (is_send) {
3746                wq = &qp->sq;
3747
3748                /* If sg_signal_bit is set, tail pointer will be updated to
3749                 * the WQE corresponding to the current CQE.
3750                 */
3751                if (qp->sq_signal_bits)
3752                        wq->tail += (wqe_idx - (u16)wq->tail) &
3753                                    (wq->wqe_cnt - 1);
3754
3755                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3756                ++wq->tail;
3757
3758                fill_send_wc(wc, cqe);
3759        } else {
3760                if (qp->ibqp.srq) {
3761                        srq = to_hr_srq(qp->ibqp.srq);
3762                        wc->wr_id = srq->wrid[wqe_idx];
3763                        hns_roce_free_srq_wqe(srq, wqe_idx);
3764                } else {
3765                        wq = &qp->rq;
3766                        wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3767                        ++wq->tail;
3768                }
3769
3770                ret = fill_recv_wc(wc, cqe);
3771        }
3772
3773        get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
3774        if (unlikely(wc->status != IB_WC_SUCCESS))
3775                return 0;
3776
3777        return ret;
3778}
3779
3780static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3781                               struct ib_wc *wc)
3782{
3783        struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3784        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3785        struct hns_roce_qp *cur_qp = NULL;
3786        unsigned long flags;
3787        int npolled;
3788
3789        spin_lock_irqsave(&hr_cq->lock, flags);
3790
3791        /*
3792         * When the device starts to reset, the state is RST_DOWN. At this time,
3793         * there may still be some valid CQEs in the hardware that are not
3794         * polled. Therefore, it is not allowed to switch to the software mode
3795         * immediately. When the state changes to UNINIT, CQE no longer exists
3796         * in the hardware, and then switch to software mode.
3797         */
3798        if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
3799                npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
3800                goto out;
3801        }
3802
3803        for (npolled = 0; npolled < num_entries; ++npolled) {
3804                if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
3805                        break;
3806        }
3807
3808        if (npolled)
3809                update_cq_db(hr_dev, hr_cq);
3810
3811out:
3812        spin_unlock_irqrestore(&hr_cq->lock, flags);
3813
3814        return npolled;
3815}
3816
3817static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
3818                              int step_idx, u16 *mbox_op)
3819{
3820        u16 op;
3821
3822        switch (type) {
3823        case HEM_TYPE_QPC:
3824                op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3825                break;
3826        case HEM_TYPE_MTPT:
3827                op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3828                break;
3829        case HEM_TYPE_CQC:
3830                op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3831                break;
3832        case HEM_TYPE_SRQC:
3833                op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3834                break;
3835        case HEM_TYPE_SCCC:
3836                op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3837                break;
3838        case HEM_TYPE_QPC_TIMER:
3839                op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3840                break;
3841        case HEM_TYPE_CQC_TIMER:
3842                op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3843                break;
3844        default:
3845                dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
3846                return -EINVAL;
3847        }
3848
3849        *mbox_op = op + step_idx;
3850
3851        return 0;
3852}
3853
3854static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
3855                               dma_addr_t base_addr)
3856{
3857        struct hns_roce_cmq_desc desc;
3858        struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
3859        u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
3860        u64 addr = to_hr_hw_page_addr(base_addr);
3861
3862        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
3863
3864        hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
3865        hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
3866        hr_reg_write(req, CFG_GMV_BT_IDX, idx);
3867
3868        return hns_roce_cmq_send(hr_dev, &desc, 1);
3869}
3870
3871static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
3872                         dma_addr_t base_addr, u32 hem_type, int step_idx)
3873{
3874        int ret;
3875        u16 op;
3876
3877        if (unlikely(hem_type == HEM_TYPE_GMV))
3878                return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
3879
3880        if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
3881                return 0;
3882
3883        ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &op);
3884        if (ret < 0)
3885                return ret;
3886
3887        return config_hem_ba_to_hw(hr_dev, obj, base_addr, op);
3888}
3889
3890static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3891                               struct hns_roce_hem_table *table, int obj,
3892                               int step_idx)
3893{
3894        struct hns_roce_hem_iter iter;
3895        struct hns_roce_hem_mhop mhop;
3896        struct hns_roce_hem *hem;
3897        unsigned long mhop_obj = obj;
3898        int i, j, k;
3899        int ret = 0;
3900        u64 hem_idx = 0;
3901        u64 l1_idx = 0;
3902        u64 bt_ba = 0;
3903        u32 chunk_ba_num;
3904        u32 hop_num;
3905
3906        if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3907                return 0;
3908
3909        hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3910        i = mhop.l0_idx;
3911        j = mhop.l1_idx;
3912        k = mhop.l2_idx;
3913        hop_num = mhop.hop_num;
3914        chunk_ba_num = mhop.bt_chunk_size / 8;
3915
3916        if (hop_num == 2) {
3917                hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3918                          k;
3919                l1_idx = i * chunk_ba_num + j;
3920        } else if (hop_num == 1) {
3921                hem_idx = i * chunk_ba_num + j;
3922        } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3923                hem_idx = i;
3924        }
3925
3926        if (table->type == HEM_TYPE_SCCC)
3927                obj = mhop.l0_idx;
3928
3929        if (check_whether_last_step(hop_num, step_idx)) {
3930                hem = table->hem[hem_idx];
3931                for (hns_roce_hem_first(hem, &iter);
3932                     !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3933                        bt_ba = hns_roce_hem_addr(&iter);
3934                        ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
3935                                            step_idx);
3936                }
3937        } else {
3938                if (step_idx == 0)
3939                        bt_ba = table->bt_l0_dma_addr[i];
3940                else if (step_idx == 1 && hop_num == 2)
3941                        bt_ba = table->bt_l1_dma_addr[l1_idx];
3942
3943                ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx);
3944        }
3945
3946        return ret;
3947}
3948
3949static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3950                                 struct hns_roce_hem_table *table, int obj,
3951                                 int step_idx)
3952{
3953        struct device *dev = hr_dev->dev;
3954        struct hns_roce_cmd_mailbox *mailbox;
3955        int ret;
3956        u16 op = 0xff;
3957
3958        if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3959                return 0;
3960
3961        switch (table->type) {
3962        case HEM_TYPE_QPC:
3963                op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3964                break;
3965        case HEM_TYPE_MTPT:
3966                op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3967                break;
3968        case HEM_TYPE_CQC:
3969                op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3970                break;
3971        case HEM_TYPE_SRQC:
3972                op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3973                break;
3974        case HEM_TYPE_SCCC:
3975        case HEM_TYPE_QPC_TIMER:
3976        case HEM_TYPE_CQC_TIMER:
3977        case HEM_TYPE_GMV:
3978                return 0;
3979        default:
3980                dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
3981                         table->type);
3982                return 0;
3983        }
3984
3985        op += step_idx;
3986
3987        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3988        if (IS_ERR(mailbox))
3989                return PTR_ERR(mailbox);
3990
3991        /* configure the tag and op */
3992        ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3993                                HNS_ROCE_CMD_TIMEOUT_MSECS);
3994
3995        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3996        return ret;
3997}
3998
3999static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
4000                                 struct hns_roce_v2_qp_context *context,
4001                                 struct hns_roce_v2_qp_context *qpc_mask,
4002                                 struct hns_roce_qp *hr_qp)
4003{
4004        struct hns_roce_cmd_mailbox *mailbox;
4005        int qpc_size;
4006        int ret;
4007
4008        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4009        if (IS_ERR(mailbox))
4010                return PTR_ERR(mailbox);
4011
4012        /* The qpc size of HIP08 is only 256B, which is half of HIP09 */
4013        qpc_size = hr_dev->caps.qpc_sz;
4014        memcpy(mailbox->buf, context, qpc_size);
4015        memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size);
4016
4017        ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
4018                                HNS_ROCE_CMD_MODIFY_QPC,
4019                                HNS_ROCE_CMD_TIMEOUT_MSECS);
4020
4021        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4022
4023        return ret;
4024}
4025
4026static void set_access_flags(struct hns_roce_qp *hr_qp,
4027                             struct hns_roce_v2_qp_context *context,
4028                             struct hns_roce_v2_qp_context *qpc_mask,
4029                             const struct ib_qp_attr *attr, int attr_mask)
4030{
4031        u8 dest_rd_atomic;
4032        u32 access_flags;
4033
4034        dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
4035                         attr->max_dest_rd_atomic : hr_qp->resp_depth;
4036
4037        access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
4038                       attr->qp_access_flags : hr_qp->atomic_rd_en;
4039
4040        if (!dest_rd_atomic)
4041                access_flags &= IB_ACCESS_REMOTE_WRITE;
4042
4043        hr_reg_write_bool(context, QPC_RRE,
4044                          access_flags & IB_ACCESS_REMOTE_READ);
4045        hr_reg_clear(qpc_mask, QPC_RRE);
4046
4047        hr_reg_write_bool(context, QPC_RWE,
4048                          access_flags & IB_ACCESS_REMOTE_WRITE);
4049        hr_reg_clear(qpc_mask, QPC_RWE);
4050
4051        hr_reg_write_bool(context, QPC_ATE,
4052                          access_flags & IB_ACCESS_REMOTE_ATOMIC);
4053        hr_reg_clear(qpc_mask, QPC_ATE);
4054        hr_reg_write_bool(context, QPC_EXT_ATE,
4055                          access_flags & IB_ACCESS_REMOTE_ATOMIC);
4056        hr_reg_clear(qpc_mask, QPC_EXT_ATE);
4057}
4058
4059static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
4060                            struct hns_roce_v2_qp_context *context,
4061                            struct hns_roce_v2_qp_context *qpc_mask)
4062{
4063        hr_reg_write(context, QPC_SGE_SHIFT,
4064                     to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
4065                                             hr_qp->sge.sge_shift));
4066
4067        hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt));
4068
4069        hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt));
4070}
4071
4072static inline int get_cqn(struct ib_cq *ib_cq)
4073{
4074        return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
4075}
4076
4077static inline int get_pdn(struct ib_pd *ib_pd)
4078{
4079        return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
4080}
4081
4082static void modify_qp_reset_to_init(struct ib_qp *ibqp,
4083                                    const struct ib_qp_attr *attr,
4084                                    int attr_mask,
4085                                    struct hns_roce_v2_qp_context *context,
4086                                    struct hns_roce_v2_qp_context *qpc_mask)
4087{
4088        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4089        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4090
4091        /*
4092         * In v2 engine, software pass context and context mask to hardware
4093         * when modifying qp. If software need modify some fields in context,
4094         * we should set all bits of the relevant fields in context mask to
4095         * 0 at the same time, else set them to 0x1.
4096         */
4097        hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4098
4099        hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4100
4101        hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
4102
4103        set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
4104
4105        /* No VLAN need to set 0xFFF */
4106        hr_reg_write(context, QPC_VLAN_ID, 0xfff);
4107
4108        if (ibqp->qp_type == IB_QPT_XRC_TGT) {
4109                context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
4110
4111                hr_reg_enable(context, QPC_XRC_QP_TYPE);
4112        }
4113
4114        if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
4115                hr_reg_enable(context, QPC_RQ_RECORD_EN);
4116
4117        hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_L,
4118                     lower_32_bits(hr_qp->rdb.dma) >> 1);
4119        hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
4120                     upper_32_bits(hr_qp->rdb.dma));
4121
4122        if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
4123                hr_reg_write_bool(context, QPC_RQIE,
4124                             hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
4125
4126        hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4127
4128        if (ibqp->srq) {
4129                hr_reg_enable(context, QPC_SRQ_EN);
4130                hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4131        }
4132
4133        hr_reg_enable(context, QPC_FRE);
4134
4135        hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4136
4137        if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
4138                return;
4139
4140        if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH)
4141                hr_reg_enable(&context->ext, QPCEX_STASH);
4142}
4143
4144static void modify_qp_init_to_init(struct ib_qp *ibqp,
4145                                   const struct ib_qp_attr *attr, int attr_mask,
4146                                   struct hns_roce_v2_qp_context *context,
4147                                   struct hns_roce_v2_qp_context *qpc_mask)
4148{
4149        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4150
4151        /*
4152         * In v2 engine, software pass context and context mask to hardware
4153         * when modifying qp. If software need modify some fields in context,
4154         * we should set all bits of the relevant fields in context mask to
4155         * 0 at the same time, else set them to 0x1.
4156         */
4157        hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type));
4158        hr_reg_clear(qpc_mask, QPC_TST);
4159
4160        hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd));
4161        hr_reg_clear(qpc_mask, QPC_PD);
4162
4163        hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
4164        hr_reg_clear(qpc_mask, QPC_RX_CQN);
4165
4166        hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq));
4167        hr_reg_clear(qpc_mask, QPC_TX_CQN);
4168
4169        if (ibqp->srq) {
4170                hr_reg_enable(context, QPC_SRQ_EN);
4171                hr_reg_clear(qpc_mask, QPC_SRQ_EN);
4172                hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn);
4173                hr_reg_clear(qpc_mask, QPC_SRQN);
4174        }
4175
4176        if (attr_mask & IB_QP_DEST_QPN) {
4177                hr_reg_write(context, QPC_DQPN, hr_qp->qpn);
4178                hr_reg_clear(qpc_mask, QPC_DQPN);
4179        }
4180}
4181
4182static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
4183                            struct hns_roce_qp *hr_qp,
4184                            struct hns_roce_v2_qp_context *context,
4185                            struct hns_roce_v2_qp_context *qpc_mask)
4186{
4187        u64 mtts[MTT_MIN_COUNT] = { 0 };
4188        u64 wqe_sge_ba;
4189        int count;
4190
4191        /* Search qp buf's mtts */
4192        count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
4193                                  MTT_MIN_COUNT, &wqe_sge_ba);
4194        if (hr_qp->rq.wqe_cnt && count < 1) {
4195                ibdev_err(&hr_dev->ib_dev,
4196                          "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
4197                return -EINVAL;
4198        }
4199
4200        context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
4201        qpc_mask->wqe_sge_ba = 0;
4202
4203        /*
4204         * In v2 engine, software pass context and context mask to hardware
4205         * when modifying qp. If software need modify some fields in context,
4206         * we should set all bits of the relevant fields in context mask to
4207         * 0 at the same time, else set them to 0x1.
4208         */
4209        hr_reg_write(context, QPC_WQE_SGE_BA_H, wqe_sge_ba >> (32 + 3));
4210        hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_H);
4211
4212        hr_reg_write(context, QPC_SQ_HOP_NUM,
4213                     to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
4214                                      hr_qp->sq.wqe_cnt));
4215        hr_reg_clear(qpc_mask, QPC_SQ_HOP_NUM);
4216
4217        hr_reg_write(context, QPC_SGE_HOP_NUM,
4218                     to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
4219                                      hr_qp->sge.sge_cnt));
4220        hr_reg_clear(qpc_mask, QPC_SGE_HOP_NUM);
4221
4222        hr_reg_write(context, QPC_RQ_HOP_NUM,
4223                     to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
4224                                      hr_qp->rq.wqe_cnt));
4225
4226        hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM);
4227
4228        hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ,
4229                     to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
4230        hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ);
4231
4232        hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ,
4233                     to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
4234        hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ);
4235
4236        context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
4237        qpc_mask->rq_cur_blk_addr = 0;
4238
4239        hr_reg_write(context, QPC_RQ_CUR_BLK_ADDR_H,
4240                     upper_32_bits(to_hr_hw_page_addr(mtts[0])));
4241        hr_reg_clear(qpc_mask, QPC_RQ_CUR_BLK_ADDR_H);
4242
4243        context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
4244        qpc_mask->rq_nxt_blk_addr = 0;
4245
4246        hr_reg_write(context, QPC_RQ_NXT_BLK_ADDR_H,
4247                     upper_32_bits(to_hr_hw_page_addr(mtts[1])));
4248        hr_reg_clear(qpc_mask, QPC_RQ_NXT_BLK_ADDR_H);
4249
4250        return 0;
4251}
4252
4253static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
4254                            struct hns_roce_qp *hr_qp,
4255                            struct hns_roce_v2_qp_context *context,
4256                            struct hns_roce_v2_qp_context *qpc_mask)
4257{
4258        struct ib_device *ibdev = &hr_dev->ib_dev;
4259        u64 sge_cur_blk = 0;
4260        u64 sq_cur_blk = 0;
4261        int count;
4262
4263        /* search qp buf's mtts */
4264        count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
4265        if (count < 1) {
4266                ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
4267                          hr_qp->qpn);
4268                return -EINVAL;
4269        }
4270        if (hr_qp->sge.sge_cnt > 0) {
4271                count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
4272                                          hr_qp->sge.offset,
4273                                          &sge_cur_blk, 1, NULL);
4274                if (count < 1) {
4275                        ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
4276                                  hr_qp->qpn);
4277                        return -EINVAL;
4278                }
4279        }
4280
4281        /*
4282         * In v2 engine, software pass context and context mask to hardware
4283         * when modifying qp. If software need modify some fields in context,
4284         * we should set all bits of the relevant fields in context mask to
4285         * 0 at the same time, else set them to 0x1.
4286         */
4287        hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_L,
4288                     lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4289        hr_reg_write(context, QPC_SQ_CUR_BLK_ADDR_H,
4290                     upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4291        hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_L);
4292        hr_reg_clear(qpc_mask, QPC_SQ_CUR_BLK_ADDR_H);
4293
4294        hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_L,
4295                     lower_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4296        hr_reg_write(context, QPC_SQ_CUR_SGE_BLK_ADDR_H,
4297                     upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
4298        hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_L);
4299        hr_reg_clear(qpc_mask, QPC_SQ_CUR_SGE_BLK_ADDR_H);
4300
4301        hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_L,
4302                     lower_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4303        hr_reg_write(context, QPC_RX_SQ_CUR_BLK_ADDR_H,
4304                     upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
4305        hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_L);
4306        hr_reg_clear(qpc_mask, QPC_RX_SQ_CUR_BLK_ADDR_H);
4307
4308        return 0;
4309}
4310
4311static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
4312                                  const struct ib_qp_attr *attr)
4313{
4314        if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
4315                return IB_MTU_4096;
4316
4317        return attr->path_mtu;
4318}
4319
4320static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
4321                                 const struct ib_qp_attr *attr, int attr_mask,
4322                                 struct hns_roce_v2_qp_context *context,
4323                                 struct hns_roce_v2_qp_context *qpc_mask)
4324{
4325        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4326        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4327        struct ib_device *ibdev = &hr_dev->ib_dev;
4328        dma_addr_t trrl_ba;
4329        dma_addr_t irrl_ba;
4330        enum ib_mtu ib_mtu;
4331        u8 lp_pktn_ini;
4332        u64 *mtts;
4333        u8 *dmac;
4334        u8 *smac;
4335        u32 port;
4336        int mtu;
4337        int ret;
4338
4339        ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
4340        if (ret) {
4341                ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
4342                return ret;
4343        }
4344
4345        /* Search IRRL's mtts */
4346        mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
4347                                   hr_qp->qpn, &irrl_ba);
4348        if (!mtts) {
4349                ibdev_err(ibdev, "failed to find qp irrl_table.\n");
4350                return -EINVAL;
4351        }
4352
4353        /* Search TRRL's mtts */
4354        mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
4355                                   hr_qp->qpn, &trrl_ba);
4356        if (!mtts) {
4357                ibdev_err(ibdev, "failed to find qp trrl_table.\n");
4358                return -EINVAL;
4359        }
4360
4361        if (attr_mask & IB_QP_ALT_PATH) {
4362                ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
4363                          attr_mask);
4364                return -EINVAL;
4365        }
4366
4367        hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4);
4368        hr_reg_clear(qpc_mask, QPC_TRRL_BA_L);
4369        context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
4370        qpc_mask->trrl_ba = 0;
4371        hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4));
4372        hr_reg_clear(qpc_mask, QPC_TRRL_BA_H);
4373
4374        context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
4375        qpc_mask->irrl_ba = 0;
4376        hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6));
4377        hr_reg_clear(qpc_mask, QPC_IRRL_BA_H);
4378
4379        hr_reg_enable(context, QPC_RMT_E2E);
4380        hr_reg_clear(qpc_mask, QPC_RMT_E2E);
4381
4382        hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits);
4383        hr_reg_clear(qpc_mask, QPC_SIG_TYPE);
4384
4385        port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
4386
4387        smac = (u8 *)hr_dev->dev_addr[port];
4388        dmac = (u8 *)attr->ah_attr.roce.dmac;
4389        /* when dmac equals smac or loop_idc is 1, it should loopback */
4390        if (ether_addr_equal_unaligned(dmac, smac) ||
4391            hr_dev->loop_idc == 0x1) {
4392                hr_reg_write(context, QPC_LBI, hr_dev->loop_idc);
4393                hr_reg_clear(qpc_mask, QPC_LBI);
4394        }
4395
4396        if (attr_mask & IB_QP_DEST_QPN) {
4397                hr_reg_write(context, QPC_DQPN, attr->dest_qp_num);
4398                hr_reg_clear(qpc_mask, QPC_DQPN);
4399        }
4400
4401        memcpy(&(context->dmac), dmac, sizeof(u32));
4402        hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
4403        qpc_mask->dmac = 0;
4404        hr_reg_clear(qpc_mask, QPC_DMAC_H);
4405
4406        ib_mtu = get_mtu(ibqp, attr);
4407        hr_qp->path_mtu = ib_mtu;
4408
4409        mtu = ib_mtu_enum_to_int(ib_mtu);
4410        if (WARN_ON(mtu < 0))
4411                return -EINVAL;
4412
4413        if (attr_mask & IB_QP_PATH_MTU) {
4414                hr_reg_write(context, QPC_MTU, ib_mtu);
4415                hr_reg_clear(qpc_mask, QPC_MTU);
4416        }
4417
4418#define MAX_LP_MSG_LEN 65536
4419        /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
4420        lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
4421
4422        hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
4423        hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
4424
4425        /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
4426        hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini);
4427        hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
4428
4429        hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
4430        hr_reg_clear(qpc_mask, QPC_RX_REQ_MSN);
4431        hr_reg_clear(qpc_mask, QPC_RX_REQ_LAST_OPTYPE);
4432
4433        context->rq_rnr_timer = 0;
4434        qpc_mask->rq_rnr_timer = 0;
4435
4436        hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX);
4437        hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX);
4438
4439        /* rocee send 2^lp_sgen_ini segs every time */
4440        hr_reg_write(context, QPC_LP_SGEN_INI, 3);
4441        hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
4442
4443        return 0;
4444}
4445
4446static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
4447                                const struct ib_qp_attr *attr, int attr_mask,
4448                                struct hns_roce_v2_qp_context *context,
4449                                struct hns_roce_v2_qp_context *qpc_mask)
4450{
4451        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4452        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4453        struct ib_device *ibdev = &hr_dev->ib_dev;
4454        int ret;
4455
4456        /* Not support alternate path and path migration */
4457        if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4458                ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4459                return -EINVAL;
4460        }
4461
4462        ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
4463        if (ret) {
4464                ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
4465                return ret;
4466        }
4467
4468        /*
4469         * Set some fields in context to zero, Because the default values
4470         * of all fields in context are zero, we need not set them to 0 again.
4471         * but we should set the relevant fields of context mask to 0.
4472         */
4473        hr_reg_clear(qpc_mask, QPC_IRRL_SGE_IDX);
4474
4475        hr_reg_clear(qpc_mask, QPC_RX_ACK_MSN);
4476
4477        hr_reg_clear(qpc_mask, QPC_ACK_LAST_OPTYPE);
4478        hr_reg_clear(qpc_mask, QPC_IRRL_PSN_VLD);
4479        hr_reg_clear(qpc_mask, QPC_IRRL_PSN);
4480
4481        hr_reg_clear(qpc_mask, QPC_IRRL_TAIL_REAL);
4482
4483        hr_reg_clear(qpc_mask, QPC_RETRY_MSG_MSN);
4484
4485        hr_reg_clear(qpc_mask, QPC_RNR_RETRY_FLAG);
4486
4487        hr_reg_clear(qpc_mask, QPC_CHECK_FLG);
4488
4489        hr_reg_write(context, QPC_LSN, 0x100);
4490        hr_reg_clear(qpc_mask, QPC_LSN);
4491
4492        hr_reg_clear(qpc_mask, QPC_V2_IRRL_HEAD);
4493
4494        return 0;
4495}
4496
4497static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
4498{
4499        if (!fl)
4500                fl = rdma_calc_flow_label(lqpn, rqpn);
4501
4502        return rdma_flow_label_to_udp_sport(fl);
4503}
4504
4505static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4506                           u32 *dip_idx)
4507{
4508        const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4509        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4510        struct hns_roce_dip *hr_dip;
4511        unsigned long flags;
4512        int ret = 0;
4513
4514        spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
4515
4516        list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
4517                if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16))
4518                        goto out;
4519        }
4520
4521        /* If no dgid is found, a new dip and a mapping between dgid and
4522         * dip_idx will be created.
4523         */
4524        hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
4525        if (!hr_dip) {
4526                ret = -ENOMEM;
4527                goto out;
4528        }
4529
4530        memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4531        hr_dip->dip_idx = *dip_idx = ibqp->qp_num;
4532        list_add_tail(&hr_dip->node, &hr_dev->dip_list);
4533
4534out:
4535        spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
4536        return ret;
4537}
4538
4539enum {
4540        CONG_DCQCN,
4541        CONG_WINDOW,
4542};
4543
4544enum {
4545        UNSUPPORT_CONG_LEVEL,
4546        SUPPORT_CONG_LEVEL,
4547};
4548
4549enum {
4550        CONG_LDCP,
4551        CONG_HC3,
4552};
4553
4554enum {
4555        DIP_INVALID,
4556        DIP_VALID,
4557};
4558
4559enum {
4560        WND_LIMIT,
4561        WND_UNLIMIT,
4562};
4563
4564static int check_cong_type(struct ib_qp *ibqp,
4565                           struct hns_roce_congestion_algorithm *cong_alg)
4566{
4567        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4568
4569        /* different congestion types match different configurations */
4570        switch (hr_dev->caps.cong_type) {
4571        case CONG_TYPE_DCQCN:
4572                cong_alg->alg_sel = CONG_DCQCN;
4573                cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4574                cong_alg->dip_vld = DIP_INVALID;
4575                cong_alg->wnd_mode_sel = WND_LIMIT;
4576                break;
4577        case CONG_TYPE_LDCP:
4578                cong_alg->alg_sel = CONG_WINDOW;
4579                cong_alg->alg_sub_sel = CONG_LDCP;
4580                cong_alg->dip_vld = DIP_INVALID;
4581                cong_alg->wnd_mode_sel = WND_UNLIMIT;
4582                break;
4583        case CONG_TYPE_HC3:
4584                cong_alg->alg_sel = CONG_WINDOW;
4585                cong_alg->alg_sub_sel = CONG_HC3;
4586                cong_alg->dip_vld = DIP_INVALID;
4587                cong_alg->wnd_mode_sel = WND_LIMIT;
4588                break;
4589        case CONG_TYPE_DIP:
4590                cong_alg->alg_sel = CONG_DCQCN;
4591                cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
4592                cong_alg->dip_vld = DIP_VALID;
4593                cong_alg->wnd_mode_sel = WND_LIMIT;
4594                break;
4595        default:
4596                ibdev_err(&hr_dev->ib_dev,
4597                          "error type(%u) for congestion selection.\n",
4598                          hr_dev->caps.cong_type);
4599                return -EINVAL;
4600        }
4601
4602        return 0;
4603}
4604
4605static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
4606                           struct hns_roce_v2_qp_context *context,
4607                           struct hns_roce_v2_qp_context *qpc_mask)
4608{
4609        const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4610        struct hns_roce_congestion_algorithm cong_field;
4611        struct ib_device *ibdev = ibqp->device;
4612        struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
4613        u32 dip_idx = 0;
4614        int ret;
4615
4616        if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
4617            grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
4618                return 0;
4619
4620        ret = check_cong_type(ibqp, &cong_field);
4621        if (ret)
4622                return ret;
4623
4624        hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
4625                     hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
4626        hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
4627        hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
4628        hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
4629        hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
4630                     cong_field.alg_sub_sel);
4631        hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL);
4632        hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
4633        hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD);
4634        hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN,
4635                     cong_field.wnd_mode_sel);
4636        hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN);
4637
4638        /* if dip is disabled, there is no need to set dip idx */
4639        if (cong_field.dip_vld == 0)
4640                return 0;
4641
4642        ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
4643        if (ret) {
4644                ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
4645                return ret;
4646        }
4647
4648        hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
4649        hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
4650
4651        return 0;
4652}
4653
4654static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4655                                const struct ib_qp_attr *attr,
4656                                int attr_mask,
4657                                struct hns_roce_v2_qp_context *context,
4658                                struct hns_roce_v2_qp_context *qpc_mask)
4659{
4660        const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4661        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4662        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4663        struct ib_device *ibdev = &hr_dev->ib_dev;
4664        const struct ib_gid_attr *gid_attr = NULL;
4665        int is_roce_protocol;
4666        u16 vlan_id = 0xffff;
4667        bool is_udp = false;
4668        u8 ib_port;
4669        u8 hr_port;
4670        int ret;
4671
4672        ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4673        hr_port = ib_port - 1;
4674        is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4675                           rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4676
4677        if (is_roce_protocol) {
4678                gid_attr = attr->ah_attr.grh.sgid_attr;
4679                ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4680                if (ret)
4681                        return ret;
4682
4683                if (gid_attr)
4684                        is_udp = (gid_attr->gid_type ==
4685                                 IB_GID_TYPE_ROCE_UDP_ENCAP);
4686        }
4687
4688        /* Only HIP08 needs to set the vlan_en bits in QPC */
4689        if (vlan_id < VLAN_N_VID &&
4690            hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
4691                hr_reg_enable(context, QPC_RQ_VLAN_EN);
4692                hr_reg_clear(qpc_mask, QPC_RQ_VLAN_EN);
4693                hr_reg_enable(context, QPC_SQ_VLAN_EN);
4694                hr_reg_clear(qpc_mask, QPC_SQ_VLAN_EN);
4695        }
4696
4697        hr_reg_write(context, QPC_VLAN_ID, vlan_id);
4698        hr_reg_clear(qpc_mask, QPC_VLAN_ID);
4699
4700        if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4701                ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
4702                          grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4703                return -EINVAL;
4704        }
4705
4706        if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4707                ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
4708                return -EINVAL;
4709        }
4710
4711        hr_reg_write(context, QPC_UDPSPN,
4712                     is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
4713                                            attr->dest_qp_num) : 0);
4714
4715        hr_reg_clear(qpc_mask, QPC_UDPSPN);
4716
4717        hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index);
4718
4719        hr_reg_clear(qpc_mask, QPC_GMV_IDX);
4720
4721        hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit);
4722        hr_reg_clear(qpc_mask, QPC_HOPLIMIT);
4723
4724        ret = fill_cong_field(ibqp, attr, context, qpc_mask);
4725        if (ret)
4726                return ret;
4727
4728        hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh));
4729        hr_reg_clear(qpc_mask, QPC_TC);
4730
4731        hr_reg_write(context, QPC_FL, grh->flow_label);
4732        hr_reg_clear(qpc_mask, QPC_FL);
4733        memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4734        memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4735
4736        hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4737        if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
4738                ibdev_err(ibdev,
4739                          "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
4740                          hr_qp->sl, MAX_SERVICE_LEVEL);
4741                return -EINVAL;
4742        }
4743
4744        hr_reg_write(context, QPC_SL, hr_qp->sl);
4745        hr_reg_clear(qpc_mask, QPC_SL);
4746
4747        return 0;
4748}
4749
4750static bool check_qp_state(enum ib_qp_state cur_state,
4751                           enum ib_qp_state new_state)
4752{
4753        static const bool sm[][IB_QPS_ERR + 1] = {
4754                [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
4755                                   [IB_QPS_INIT] = true },
4756                [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
4757                                  [IB_QPS_INIT] = true,
4758                                  [IB_QPS_RTR] = true,
4759                                  [IB_QPS_ERR] = true },
4760                [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
4761                                 [IB_QPS_RTS] = true,
4762                                 [IB_QPS_ERR] = true },
4763                [IB_QPS_RTS] = { [IB_QPS_RESET] = true,
4764                                 [IB_QPS_RTS] = true,
4765                                 [IB_QPS_ERR] = true },
4766                [IB_QPS_SQD] = {},
4767                [IB_QPS_SQE] = {},
4768                [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
4769        };
4770
4771        return sm[cur_state][new_state];
4772}
4773
4774static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4775                                      const struct ib_qp_attr *attr,
4776                                      int attr_mask,
4777                                      enum ib_qp_state cur_state,
4778                                      enum ib_qp_state new_state,
4779                                      struct hns_roce_v2_qp_context *context,
4780                                      struct hns_roce_v2_qp_context *qpc_mask)
4781{
4782        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4783        int ret = 0;
4784
4785        if (!check_qp_state(cur_state, new_state)) {
4786                ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
4787                return -EINVAL;
4788        }
4789
4790        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4791                memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
4792                modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4793                                        qpc_mask);
4794        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4795                modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4796                                       qpc_mask);
4797        } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4798                ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4799                                            qpc_mask);
4800        } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4801                ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4802                                           qpc_mask);
4803        }
4804
4805        return ret;
4806}
4807
4808static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4809                                      const struct ib_qp_attr *attr,
4810                                      int attr_mask,
4811                                      struct hns_roce_v2_qp_context *context,
4812                                      struct hns_roce_v2_qp_context *qpc_mask)
4813{
4814        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4815        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4816        int ret = 0;
4817
4818        if (attr_mask & IB_QP_AV) {
4819                ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4820                                           qpc_mask);
4821                if (ret)
4822                        return ret;
4823        }
4824
4825        if (attr_mask & IB_QP_TIMEOUT) {
4826                if (attr->timeout < 31) {
4827                        hr_reg_write(context, QPC_AT, attr->timeout);
4828                        hr_reg_clear(qpc_mask, QPC_AT);
4829                } else {
4830                        ibdev_warn(&hr_dev->ib_dev,
4831                                   "Local ACK timeout shall be 0 to 30.\n");
4832                }
4833        }
4834
4835        if (attr_mask & IB_QP_RETRY_CNT) {
4836                hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt);
4837                hr_reg_clear(qpc_mask, QPC_RETRY_NUM_INIT);
4838
4839                hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt);
4840                hr_reg_clear(qpc_mask, QPC_RETRY_CNT);
4841        }
4842
4843        if (attr_mask & IB_QP_RNR_RETRY) {
4844                hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry);
4845                hr_reg_clear(qpc_mask, QPC_RNR_NUM_INIT);
4846
4847                hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry);
4848                hr_reg_clear(qpc_mask, QPC_RNR_CNT);
4849        }
4850
4851        if (attr_mask & IB_QP_SQ_PSN) {
4852                hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn);
4853                hr_reg_clear(qpc_mask, QPC_SQ_CUR_PSN);
4854
4855                hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn);
4856                hr_reg_clear(qpc_mask, QPC_SQ_MAX_PSN);
4857
4858                hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn);
4859                hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_L);
4860
4861                hr_reg_write(context, QPC_RETRY_MSG_PSN_H,
4862                             attr->sq_psn >> RETRY_MSG_PSN_SHIFT);
4863                hr_reg_clear(qpc_mask, QPC_RETRY_MSG_PSN_H);
4864
4865                hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn);
4866                hr_reg_clear(qpc_mask, QPC_RETRY_MSG_FPKT_PSN);
4867
4868                hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn);
4869                hr_reg_clear(qpc_mask, QPC_RX_ACK_EPSN);
4870        }
4871
4872        if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4873             attr->max_dest_rd_atomic) {
4874                hr_reg_write(context, QPC_RR_MAX,
4875                             fls(attr->max_dest_rd_atomic - 1));
4876                hr_reg_clear(qpc_mask, QPC_RR_MAX);
4877        }
4878
4879        if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4880                hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1));
4881                hr_reg_clear(qpc_mask, QPC_SR_MAX);
4882        }
4883
4884        if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4885                set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4886
4887        if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4888                hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer);
4889                hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
4890        }
4891
4892        if (attr_mask & IB_QP_RQ_PSN) {
4893                hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn);
4894                hr_reg_clear(qpc_mask, QPC_RX_REQ_EPSN);
4895
4896                hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1);
4897                hr_reg_clear(qpc_mask, QPC_RAQ_PSN);
4898        }
4899
4900        if (attr_mask & IB_QP_QKEY) {
4901                context->qkey_xrcd = cpu_to_le32(attr->qkey);
4902                qpc_mask->qkey_xrcd = 0;
4903                hr_qp->qkey = attr->qkey;
4904        }
4905
4906        return ret;
4907}
4908
4909static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4910                                          const struct ib_qp_attr *attr,
4911                                          int attr_mask)
4912{
4913        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4914        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4915
4916        if (attr_mask & IB_QP_ACCESS_FLAGS)
4917                hr_qp->atomic_rd_en = attr->qp_access_flags;
4918
4919        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4920                hr_qp->resp_depth = attr->max_dest_rd_atomic;
4921        if (attr_mask & IB_QP_PORT) {
4922                hr_qp->port = attr->port_num - 1;
4923                hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4924        }
4925}
4926
4927static void clear_qp(struct hns_roce_qp *hr_qp)
4928{
4929        struct ib_qp *ibqp = &hr_qp->ibqp;
4930
4931        if (ibqp->send_cq)
4932                hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4933                                     hr_qp->qpn, NULL);
4934
4935        if (ibqp->recv_cq  && ibqp->recv_cq != ibqp->send_cq)
4936                hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
4937                                     hr_qp->qpn, ibqp->srq ?
4938                                     to_hr_srq(ibqp->srq) : NULL);
4939
4940        if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
4941                *hr_qp->rdb.db_record = 0;
4942
4943        hr_qp->rq.head = 0;
4944        hr_qp->rq.tail = 0;
4945        hr_qp->sq.head = 0;
4946        hr_qp->sq.tail = 0;
4947        hr_qp->next_sge = 0;
4948}
4949
4950static void v2_set_flushed_fields(struct ib_qp *ibqp,
4951                                  struct hns_roce_v2_qp_context *context,
4952                                  struct hns_roce_v2_qp_context *qpc_mask)
4953{
4954        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4955        unsigned long sq_flag = 0;
4956        unsigned long rq_flag = 0;
4957
4958        if (ibqp->qp_type == IB_QPT_XRC_TGT)
4959                return;
4960
4961        spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
4962        hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head);
4963        hr_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);
4964        hr_qp->state = IB_QPS_ERR;
4965        spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
4966
4967        if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI) /* no RQ */
4968                return;
4969
4970        spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
4971        hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head);
4972        hr_reg_clear(qpc_mask, QPC_RQ_PRODUCER_IDX);
4973        spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
4974}
4975
4976static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4977                                 const struct ib_qp_attr *attr,
4978                                 int attr_mask, enum ib_qp_state cur_state,
4979                                 enum ib_qp_state new_state)
4980{
4981        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4982        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4983        struct hns_roce_v2_qp_context ctx[2];
4984        struct hns_roce_v2_qp_context *context = ctx;
4985        struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
4986        struct ib_device *ibdev = &hr_dev->ib_dev;
4987        int ret;
4988
4989        if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
4990                return -EOPNOTSUPP;
4991
4992        /*
4993         * In v2 engine, software pass context and context mask to hardware
4994         * when modifying qp. If software need modify some fields in context,
4995         * we should set all bits of the relevant fields in context mask to
4996         * 0 at the same time, else set them to 0x1.
4997         */
4998        memset(context, 0, hr_dev->caps.qpc_sz);
4999        memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
5000
5001        ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
5002                                         new_state, context, qpc_mask);
5003        if (ret)
5004                goto out;
5005
5006        /* When QP state is err, SQ and RQ WQE should be flushed */
5007        if (new_state == IB_QPS_ERR)
5008                v2_set_flushed_fields(ibqp, context, qpc_mask);
5009
5010        /* Configure the optional fields */
5011        ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
5012                                         qpc_mask);
5013        if (ret)
5014                goto out;
5015
5016        hr_reg_write_bool(context, QPC_INV_CREDIT,
5017                          to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC ||
5018                          ibqp->srq);
5019        hr_reg_clear(qpc_mask, QPC_INV_CREDIT);
5020
5021        /* Every status migrate must change state */
5022        hr_reg_write(context, QPC_QP_ST, new_state);
5023        hr_reg_clear(qpc_mask, QPC_QP_ST);
5024
5025        /* SW pass context to HW */
5026        ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
5027        if (ret) {
5028                ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
5029                goto out;
5030        }
5031
5032        hr_qp->state = new_state;
5033
5034        hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
5035
5036        if (new_state == IB_QPS_RESET && !ibqp->uobject)
5037                clear_qp(hr_qp);
5038
5039out:
5040        return ret;
5041}
5042
5043static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
5044{
5045        static const enum ib_qp_state map[] = {
5046                [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
5047                [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
5048                [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
5049                [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
5050                [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
5051                [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
5052                [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
5053                [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
5054        };
5055
5056        return (state < ARRAY_SIZE(map)) ? map[state] : -1;
5057}
5058
5059static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
5060                                 struct hns_roce_qp *hr_qp,
5061                                 struct hns_roce_v2_qp_context *hr_context)
5062{
5063        struct hns_roce_cmd_mailbox *mailbox;
5064        int ret;
5065
5066        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5067        if (IS_ERR(mailbox))
5068                return PTR_ERR(mailbox);
5069
5070        ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
5071                                HNS_ROCE_CMD_QUERY_QPC,
5072                                HNS_ROCE_CMD_TIMEOUT_MSECS);
5073        if (ret)
5074                goto out;
5075
5076        memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
5077
5078out:
5079        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5080        return ret;
5081}
5082
5083static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
5084                                int qp_attr_mask,
5085                                struct ib_qp_init_attr *qp_init_attr)
5086{
5087        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5088        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5089        struct hns_roce_v2_qp_context context = {};
5090        struct ib_device *ibdev = &hr_dev->ib_dev;
5091        int tmp_qp_state;
5092        int state;
5093        int ret;
5094
5095        memset(qp_attr, 0, sizeof(*qp_attr));
5096        memset(qp_init_attr, 0, sizeof(*qp_init_attr));
5097
5098        mutex_lock(&hr_qp->mutex);
5099
5100        if (hr_qp->state == IB_QPS_RESET) {
5101                qp_attr->qp_state = IB_QPS_RESET;
5102                ret = 0;
5103                goto done;
5104        }
5105
5106        ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
5107        if (ret) {
5108                ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
5109                ret = -EINVAL;
5110                goto out;
5111        }
5112
5113        state = hr_reg_read(&context, QPC_QP_ST);
5114        tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
5115        if (tmp_qp_state == -1) {
5116                ibdev_err(ibdev, "Illegal ib_qp_state\n");
5117                ret = -EINVAL;
5118                goto out;
5119        }
5120        hr_qp->state = (u8)tmp_qp_state;
5121        qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
5122        qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU);
5123        qp_attr->path_mig_state = IB_MIG_ARMED;
5124        qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
5125        if (hr_qp->ibqp.qp_type == IB_QPT_UD)
5126                qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
5127
5128        qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN);
5129        qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN);
5130        qp_attr->dest_qp_num = (u8)hr_reg_read(&context, QPC_DQPN);
5131        qp_attr->qp_access_flags =
5132                ((hr_reg_read(&context, QPC_RRE)) << V2_QP_RRE_S) |
5133                ((hr_reg_read(&context, QPC_RWE)) << V2_QP_RWE_S) |
5134                ((hr_reg_read(&context, QPC_ATE)) << V2_QP_ATE_S);
5135
5136        if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
5137            hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5138            hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
5139                struct ib_global_route *grh =
5140                        rdma_ah_retrieve_grh(&qp_attr->ah_attr);
5141
5142                rdma_ah_set_sl(&qp_attr->ah_attr,
5143                               hr_reg_read(&context, QPC_SL));
5144                grh->flow_label = hr_reg_read(&context, QPC_FL);
5145                grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX);
5146                grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT);
5147                grh->traffic_class = hr_reg_read(&context, QPC_TC);
5148
5149                memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
5150        }
5151
5152        qp_attr->port_num = hr_qp->port + 1;
5153        qp_attr->sq_draining = 0;
5154        qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX);
5155        qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
5156
5157        qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
5158        qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
5159        qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
5160        qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
5161
5162done:
5163        qp_attr->cur_qp_state = qp_attr->qp_state;
5164        qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
5165        qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
5166        qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
5167
5168        qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
5169        qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
5170
5171        qp_init_attr->qp_context = ibqp->qp_context;
5172        qp_init_attr->qp_type = ibqp->qp_type;
5173        qp_init_attr->recv_cq = ibqp->recv_cq;
5174        qp_init_attr->send_cq = ibqp->send_cq;
5175        qp_init_attr->srq = ibqp->srq;
5176        qp_init_attr->cap = qp_attr->cap;
5177        qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits;
5178
5179out:
5180        mutex_unlock(&hr_qp->mutex);
5181        return ret;
5182}
5183
5184static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
5185{
5186        return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
5187                 hr_qp->ibqp.qp_type == IB_QPT_UD ||
5188                 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
5189                 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
5190                hr_qp->state != IB_QPS_RESET);
5191}
5192
5193static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
5194                                         struct hns_roce_qp *hr_qp,
5195                                         struct ib_udata *udata)
5196{
5197        struct ib_device *ibdev = &hr_dev->ib_dev;
5198        struct hns_roce_cq *send_cq, *recv_cq;
5199        unsigned long flags;
5200        int ret = 0;
5201
5202        if (modify_qp_is_ok(hr_qp)) {
5203                /* Modify qp to reset before destroying qp */
5204                ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
5205                                            hr_qp->state, IB_QPS_RESET);
5206                if (ret)
5207                        ibdev_err(ibdev,
5208                                  "failed to modify QP to RST, ret = %d.\n",
5209                                  ret);
5210        }
5211
5212        send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
5213        recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
5214
5215        spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
5216        hns_roce_lock_cqs(send_cq, recv_cq);
5217
5218        if (!udata) {
5219                if (recv_cq)
5220                        __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
5221                                               (hr_qp->ibqp.srq ?
5222                                                to_hr_srq(hr_qp->ibqp.srq) :
5223                                                NULL));
5224
5225                if (send_cq && send_cq != recv_cq)
5226                        __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
5227
5228        }
5229
5230        hns_roce_qp_remove(hr_dev, hr_qp);
5231
5232        hns_roce_unlock_cqs(send_cq, recv_cq);
5233        spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
5234
5235        return ret;
5236}
5237
5238static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
5239{
5240        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
5241        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
5242        int ret;
5243
5244        ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
5245        if (ret)
5246                ibdev_err(&hr_dev->ib_dev,
5247                          "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
5248                          hr_qp->qpn, ret);
5249
5250        hns_roce_qp_destroy(hr_dev, hr_qp, udata);
5251
5252        return 0;
5253}
5254
5255static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
5256                                            struct hns_roce_qp *hr_qp)
5257{
5258        struct ib_device *ibdev = &hr_dev->ib_dev;
5259        struct hns_roce_sccc_clr_done *resp;
5260        struct hns_roce_sccc_clr *clr;
5261        struct hns_roce_cmq_desc desc;
5262        int ret, i;
5263
5264        if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
5265                return 0;
5266
5267        mutex_lock(&hr_dev->qp_table.scc_mutex);
5268
5269        /* set scc ctx clear done flag */
5270        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
5271        ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
5272        if (ret) {
5273                ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
5274                goto out;
5275        }
5276
5277        /* clear scc context */
5278        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
5279        clr = (struct hns_roce_sccc_clr *)desc.data;
5280        clr->qpn = cpu_to_le32(hr_qp->qpn);
5281        ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
5282        if (ret) {
5283                ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
5284                goto out;
5285        }
5286
5287        /* query scc context clear is done or not */
5288        resp = (struct hns_roce_sccc_clr_done *)desc.data;
5289        for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
5290                hns_roce_cmq_setup_basic_desc(&desc,
5291                                              HNS_ROCE_OPC_QUERY_SCCC, true);
5292                ret = hns_roce_cmq_send(hr_dev, &desc, 1);
5293                if (ret) {
5294                        ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
5295                                  ret);
5296                        goto out;
5297                }
5298
5299                if (resp->clr_done)
5300                        goto out;
5301
5302                msleep(20);
5303        }
5304
5305        ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
5306        ret = -ETIMEDOUT;
5307
5308out:
5309        mutex_unlock(&hr_dev->qp_table.scc_mutex);
5310        return ret;
5311}
5312
5313#define DMA_IDX_SHIFT 3
5314#define DMA_WQE_SHIFT 3
5315
5316static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
5317                                              struct hns_roce_srq_context *ctx)
5318{
5319        struct hns_roce_idx_que *idx_que = &srq->idx_que;
5320        struct ib_device *ibdev = srq->ibsrq.device;
5321        struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5322        u64 mtts_idx[MTT_MIN_COUNT] = {};
5323        dma_addr_t dma_handle_idx = 0;
5324        int ret;
5325
5326        /* Get physical address of idx que buf */
5327        ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
5328                                ARRAY_SIZE(mtts_idx), &dma_handle_idx);
5329        if (ret < 1) {
5330                ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
5331                          ret);
5332                return -ENOBUFS;
5333        }
5334
5335        hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
5336                     to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
5337
5338        hr_reg_write(ctx, SRQC_IDX_BT_BA_L, dma_handle_idx >> DMA_IDX_SHIFT);
5339        hr_reg_write(ctx, SRQC_IDX_BT_BA_H,
5340                     upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));
5341
5342        hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
5343                     to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
5344        hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
5345                     to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
5346
5347        hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
5348                     to_hr_hw_page_addr(mtts_idx[0]));
5349        hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_H,
5350                     upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
5351
5352        hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_L,
5353                     to_hr_hw_page_addr(mtts_idx[1]));
5354        hr_reg_write(ctx, SRQC_IDX_NXT_BLK_ADDR_H,
5355                     upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
5356
5357        return 0;
5358}
5359
5360static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
5361{
5362        struct ib_device *ibdev = srq->ibsrq.device;
5363        struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
5364        struct hns_roce_srq_context *ctx = mb_buf;
5365        u64 mtts_wqe[MTT_MIN_COUNT] = {};
5366        dma_addr_t dma_handle_wqe = 0;
5367        int ret;
5368
5369        memset(ctx, 0, sizeof(*ctx));
5370
5371        /* Get the physical address of srq buf */
5372        ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
5373                                ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
5374        if (ret < 1) {
5375                ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
5376                          ret);
5377                return -ENOBUFS;
5378        }
5379
5380        hr_reg_write(ctx, SRQC_SRQ_ST, 1);
5381        hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
5382                          srq->ibsrq.srq_type == IB_SRQT_XRC);
5383        hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
5384        hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
5385        hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
5386        hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
5387        hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
5388        hr_reg_write(ctx, SRQC_RQWS,
5389                     srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1));
5390
5391        hr_reg_write(ctx, SRQC_WQE_HOP_NUM,
5392                     to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
5393                                      srq->wqe_cnt));
5394
5395        hr_reg_write(ctx, SRQC_WQE_BT_BA_L, dma_handle_wqe >> DMA_WQE_SHIFT);
5396        hr_reg_write(ctx, SRQC_WQE_BT_BA_H,
5397                     upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));
5398
5399        hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
5400                     to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
5401        hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
5402                     to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
5403
5404        return hns_roce_v2_write_srqc_index_queue(srq, ctx);
5405}
5406
5407static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5408                                  struct ib_srq_attr *srq_attr,
5409                                  enum ib_srq_attr_mask srq_attr_mask,
5410                                  struct ib_udata *udata)
5411{
5412        struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5413        struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5414        struct hns_roce_srq_context *srq_context;
5415        struct hns_roce_srq_context *srqc_mask;
5416        struct hns_roce_cmd_mailbox *mailbox;
5417        int ret;
5418
5419        /* Resizing SRQs is not supported yet */
5420        if (srq_attr_mask & IB_SRQ_MAX_WR)
5421                return -EINVAL;
5422
5423        if (srq_attr_mask & IB_SRQ_LIMIT) {
5424                if (srq_attr->srq_limit > srq->wqe_cnt)
5425                        return -EINVAL;
5426
5427                mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5428                if (IS_ERR(mailbox))
5429                        return PTR_ERR(mailbox);
5430
5431                srq_context = mailbox->buf;
5432                srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5433
5434                memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5435
5436                hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit);
5437                hr_reg_clear(srqc_mask, SRQC_LIMIT_WL);
5438
5439                ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
5440                                        HNS_ROCE_CMD_MODIFY_SRQC,
5441                                        HNS_ROCE_CMD_TIMEOUT_MSECS);
5442                hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5443                if (ret) {
5444                        ibdev_err(&hr_dev->ib_dev,
5445                                  "failed to handle cmd of modifying SRQ, ret = %d.\n",
5446                                  ret);
5447                        return ret;
5448                }
5449        }
5450
5451        return 0;
5452}
5453
5454static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5455{
5456        struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5457        struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5458        struct hns_roce_srq_context *srq_context;
5459        struct hns_roce_cmd_mailbox *mailbox;
5460        int ret;
5461
5462        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5463        if (IS_ERR(mailbox))
5464                return PTR_ERR(mailbox);
5465
5466        srq_context = mailbox->buf;
5467        ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
5468                                HNS_ROCE_CMD_QUERY_SRQC,
5469                                HNS_ROCE_CMD_TIMEOUT_MSECS);
5470        if (ret) {
5471                ibdev_err(&hr_dev->ib_dev,
5472                          "failed to process cmd of querying SRQ, ret = %d.\n",
5473                          ret);
5474                goto out;
5475        }
5476
5477        attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL);
5478        attr->max_wr = srq->wqe_cnt;
5479        attr->max_sge = srq->max_gs - srq->rsv_sge;
5480
5481out:
5482        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5483        return ret;
5484}
5485
5486static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
5487{
5488        struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
5489        struct hns_roce_v2_cq_context *cq_context;
5490        struct hns_roce_cq *hr_cq = to_hr_cq(cq);
5491        struct hns_roce_v2_cq_context *cqc_mask;
5492        struct hns_roce_cmd_mailbox *mailbox;
5493        int ret;
5494
5495        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5496        if (IS_ERR(mailbox))
5497                return PTR_ERR(mailbox);
5498
5499        cq_context = mailbox->buf;
5500        cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
5501
5502        memset(cqc_mask, 0xff, sizeof(*cqc_mask));
5503
5504        hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
5505        hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
5506        hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
5507        hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
5508
5509        ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
5510                                HNS_ROCE_CMD_MODIFY_CQC,
5511                                HNS_ROCE_CMD_TIMEOUT_MSECS);
5512        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5513        if (ret)
5514                ibdev_err(&hr_dev->ib_dev,
5515                          "failed to process cmd when modifying CQ, ret = %d.\n",
5516                          ret);
5517
5518        return ret;
5519}
5520
5521static void hns_roce_irq_work_handle(struct work_struct *work)
5522{
5523        struct hns_roce_work *irq_work =
5524                                container_of(work, struct hns_roce_work, work);
5525        struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
5526
5527        switch (irq_work->event_type) {
5528        case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5529                ibdev_info(ibdev, "Path migrated succeeded.\n");
5530                break;
5531        case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5532                ibdev_warn(ibdev, "Path migration failed.\n");
5533                break;
5534        case HNS_ROCE_EVENT_TYPE_COMM_EST:
5535                break;
5536        case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5537                ibdev_warn(ibdev, "Send queue drained.\n");
5538                break;
5539        case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5540                ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
5541                          irq_work->queue_num, irq_work->sub_type);
5542                break;
5543        case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5544                ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
5545                          irq_work->queue_num);
5546                break;
5547        case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5548                ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
5549                          irq_work->queue_num, irq_work->sub_type);
5550                break;
5551        case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5552                ibdev_warn(ibdev, "SRQ limit reach.\n");
5553                break;
5554        case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5555                ibdev_warn(ibdev, "SRQ last wqe reach.\n");
5556                break;
5557        case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5558                ibdev_err(ibdev, "SRQ catas error.\n");
5559                break;
5560        case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5561                ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
5562                break;
5563        case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5564                ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
5565                break;
5566        case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5567                ibdev_warn(ibdev, "DB overflow.\n");
5568                break;
5569        case HNS_ROCE_EVENT_TYPE_FLR:
5570                ibdev_warn(ibdev, "Function level reset.\n");
5571                break;
5572        case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5573                ibdev_err(ibdev, "xrc domain violation error.\n");
5574                break;
5575        case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5576                ibdev_err(ibdev, "invalid xrceth error.\n");
5577                break;
5578        default:
5579                break;
5580        }
5581
5582        kfree(irq_work);
5583}
5584
5585static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
5586                                      struct hns_roce_eq *eq, u32 queue_num)
5587{
5588        struct hns_roce_work *irq_work;
5589
5590        irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
5591        if (!irq_work)
5592                return;
5593
5594        INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
5595        irq_work->hr_dev = hr_dev;
5596        irq_work->event_type = eq->event_type;
5597        irq_work->sub_type = eq->sub_type;
5598        irq_work->queue_num = queue_num;
5599        queue_work(hr_dev->irq_workq, &(irq_work->work));
5600}
5601
5602static void update_eq_db(struct hns_roce_eq *eq)
5603{
5604        struct hns_roce_dev *hr_dev = eq->hr_dev;
5605        struct hns_roce_v2_db eq_db = {};
5606
5607        if (eq->type_flag == HNS_ROCE_AEQ) {
5608                hr_reg_write(&eq_db, EQ_DB_CMD,
5609                             eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5610                             HNS_ROCE_EQ_DB_CMD_AEQ :
5611                             HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
5612        } else {
5613                hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn);
5614
5615                hr_reg_write(&eq_db, EQ_DB_CMD,
5616                             eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
5617                             HNS_ROCE_EQ_DB_CMD_CEQ :
5618                             HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
5619        }
5620
5621        hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index);
5622
5623        hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
5624}
5625
5626static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5627{
5628        struct hns_roce_aeqe *aeqe;
5629
5630        aeqe = hns_roce_buf_offset(eq->mtr.kmem,
5631                                   (eq->cons_index & (eq->entries - 1)) *
5632                                   eq->eqe_size);
5633
5634        return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5635                !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5636}
5637
5638static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5639                               struct hns_roce_eq *eq)
5640{
5641        struct device *dev = hr_dev->dev;
5642        struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5643        int aeqe_found = 0;
5644        int event_type;
5645        u32 queue_num;
5646        int sub_type;
5647
5648        while (aeqe) {
5649                /* Make sure we read AEQ entry after we have checked the
5650                 * ownership bit
5651                 */
5652                dma_rmb();
5653
5654                event_type = roce_get_field(aeqe->asyn,
5655                                            HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5656                                            HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5657                sub_type = roce_get_field(aeqe->asyn,
5658                                          HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5659                                          HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5660                queue_num = roce_get_field(aeqe->event.queue_event.num,
5661                                           HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5662                                           HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5663
5664                switch (event_type) {
5665                case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5666                case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5667                case HNS_ROCE_EVENT_TYPE_COMM_EST:
5668                case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5669                case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5670                case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5671                case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5672                case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5673                case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
5674                case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
5675                        hns_roce_qp_event(hr_dev, queue_num, event_type);
5676                        break;
5677                case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5678                case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5679                        hns_roce_srq_event(hr_dev, queue_num, event_type);
5680                        break;
5681                case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5682                case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5683                        hns_roce_cq_event(hr_dev, queue_num, event_type);
5684                        break;
5685                case HNS_ROCE_EVENT_TYPE_MB:
5686                        hns_roce_cmd_event(hr_dev,
5687                                        le16_to_cpu(aeqe->event.cmd.token),
5688                                        aeqe->event.cmd.status,
5689                                        le64_to_cpu(aeqe->event.cmd.out_param));
5690                        break;
5691                case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5692                case HNS_ROCE_EVENT_TYPE_FLR:
5693                        break;
5694                default:
5695                        dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5696                                event_type, eq->eqn, eq->cons_index);
5697                        break;
5698                }
5699
5700                eq->event_type = event_type;
5701                eq->sub_type = sub_type;
5702                ++eq->cons_index;
5703                aeqe_found = 1;
5704
5705                hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
5706
5707                aeqe = next_aeqe_sw_v2(eq);
5708        }
5709
5710        update_eq_db(eq);
5711        return aeqe_found;
5712}
5713
5714static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5715{
5716        struct hns_roce_ceqe *ceqe;
5717
5718        ceqe = hns_roce_buf_offset(eq->mtr.kmem,
5719                                   (eq->cons_index & (eq->entries - 1)) *
5720                                   eq->eqe_size);
5721
5722        return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5723                (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5724}
5725
5726static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5727                               struct hns_roce_eq *eq)
5728{
5729        struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5730        int ceqe_found = 0;
5731        u32 cqn;
5732
5733        while (ceqe) {
5734                /* Make sure we read CEQ entry after we have checked the
5735                 * ownership bit
5736                 */
5737                dma_rmb();
5738
5739                cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
5740                                     HNS_ROCE_V2_CEQE_COMP_CQN_S);
5741
5742                hns_roce_cq_completion(hr_dev, cqn);
5743
5744                ++eq->cons_index;
5745                ceqe_found = 1;
5746
5747                ceqe = next_ceqe_sw_v2(eq);
5748        }
5749
5750        update_eq_db(eq);
5751
5752        return ceqe_found;
5753}
5754
5755static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5756{
5757        struct hns_roce_eq *eq = eq_ptr;
5758        struct hns_roce_dev *hr_dev = eq->hr_dev;
5759        int int_work;
5760
5761        if (eq->type_flag == HNS_ROCE_CEQ)
5762                /* Completion event interrupt */
5763                int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5764        else
5765                /* Asychronous event interrupt */
5766                int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5767
5768        return IRQ_RETVAL(int_work);
5769}
5770
5771static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5772{
5773        struct hns_roce_dev *hr_dev = dev_id;
5774        struct device *dev = hr_dev->dev;
5775        int int_work = 0;
5776        u32 int_st;
5777        u32 int_en;
5778
5779        /* Abnormal interrupt */
5780        int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5781        int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5782
5783        if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5784                struct pci_dev *pdev = hr_dev->pci_dev;
5785                struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5786                const struct hnae3_ae_ops *ops = ae_dev->ops;
5787
5788                dev_err(dev, "AEQ overflow!\n");
5789
5790                int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
5791                roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5792
5793                /* Set reset level for reset_event() */
5794                if (ops->set_default_reset_request)
5795                        ops->set_default_reset_request(ae_dev,
5796                                                       HNAE3_FUNC_RESET);
5797                if (ops->reset_event)
5798                        ops->reset_event(pdev, NULL);
5799
5800                int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5801                roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5802
5803                int_work = 1;
5804        } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) {
5805                dev_err(dev, "RAS interrupt!\n");
5806
5807                int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S;
5808                roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5809
5810                int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5811                roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5812
5813                int_work = 1;
5814        } else {
5815                dev_err(dev, "There is no abnormal irq found!\n");
5816        }
5817
5818        return IRQ_RETVAL(int_work);
5819}
5820
5821static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5822                                        int eq_num, u32 enable_flag)
5823{
5824        int i;
5825
5826        for (i = 0; i < eq_num; i++)
5827                roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5828                           i * EQ_REG_OFFSET, enable_flag);
5829
5830        roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
5831        roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
5832}
5833
5834static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5835{
5836        struct device *dev = hr_dev->dev;
5837        int ret;
5838
5839        if (eqn < hr_dev->caps.num_comp_vectors)
5840                ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5841                                        0, HNS_ROCE_CMD_DESTROY_CEQC,
5842                                        HNS_ROCE_CMD_TIMEOUT_MSECS);
5843        else
5844                ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5845                                        0, HNS_ROCE_CMD_DESTROY_AEQC,
5846                                        HNS_ROCE_CMD_TIMEOUT_MSECS);
5847        if (ret)
5848                dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5849}
5850
5851static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5852{
5853        hns_roce_mtr_destroy(hr_dev, &eq->mtr);
5854}
5855
5856static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5857{
5858        eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5859        eq->cons_index = 0;
5860        eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5861        eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5862        eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5863        eq->shift = ilog2((unsigned int)eq->entries);
5864}
5865
5866static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
5867                      void *mb_buf)
5868{
5869        u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
5870        struct hns_roce_eq_context *eqc;
5871        u64 bt_ba = 0;
5872        int count;
5873
5874        eqc = mb_buf;
5875        memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5876
5877        init_eq_config(hr_dev, eq);
5878
5879        /* if not multi-hop, eqe buffer only use one trunk */
5880        count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
5881                                  &bt_ba);
5882        if (count < 1) {
5883                dev_err(hr_dev->dev, "failed to find EQE mtr\n");
5884                return -ENOBUFS;
5885        }
5886
5887        hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
5888        hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
5889        hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
5890        hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
5891        hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
5892        hr_reg_write(eqc, EQC_EQN, eq->eqn);
5893        hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
5894        hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
5895                     to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
5896        hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
5897                     to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
5898        hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
5899        hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
5900
5901        hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
5902        hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
5903        hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
5904        hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
5905        hr_reg_write(eqc, EQC_SHIFT, eq->shift);
5906        hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
5907        hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
5908        hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
5909        hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
5910        hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
5911        hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
5912        hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
5913        hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE);
5914
5915        return 0;
5916}
5917
5918static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5919{
5920        struct hns_roce_buf_attr buf_attr = {};
5921        int err;
5922
5923        if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
5924                eq->hop_num = 0;
5925        else
5926                eq->hop_num = hr_dev->caps.eqe_hop_num;
5927
5928        buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT;
5929        buf_attr.region[0].size = eq->entries * eq->eqe_size;
5930        buf_attr.region[0].hopnum = eq->hop_num;
5931        buf_attr.region_count = 1;
5932
5933        err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
5934                                  hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
5935                                  0);
5936        if (err)
5937                dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
5938
5939        return err;
5940}
5941
5942static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5943                                 struct hns_roce_eq *eq,
5944                                 unsigned int eq_cmd)
5945{
5946        struct hns_roce_cmd_mailbox *mailbox;
5947        int ret;
5948
5949        /* Allocate mailbox memory */
5950        mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5951        if (IS_ERR_OR_NULL(mailbox))
5952                return -ENOMEM;
5953
5954        ret = alloc_eq_buf(hr_dev, eq);
5955        if (ret)
5956                goto free_cmd_mbox;
5957
5958        ret = config_eqc(hr_dev, eq, mailbox->buf);
5959        if (ret)
5960                goto err_cmd_mbox;
5961
5962        ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5963                                eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5964        if (ret) {
5965                dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
5966                goto err_cmd_mbox;
5967        }
5968
5969        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5970
5971        return 0;
5972
5973err_cmd_mbox:
5974        free_eq_buf(hr_dev, eq);
5975
5976free_cmd_mbox:
5977        hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5978
5979        return ret;
5980}
5981
5982static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
5983                                  int comp_num, int aeq_num, int other_num)
5984{
5985        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5986        int i, j;
5987        int ret;
5988
5989        for (i = 0; i < irq_num; i++) {
5990                hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5991                                               GFP_KERNEL);
5992                if (!hr_dev->irq_names[i]) {
5993                        ret = -ENOMEM;
5994                        goto err_kzalloc_failed;
5995                }
5996        }
5997
5998        /* irq contains: abnormal + AEQ + CEQ */
5999        for (j = 0; j < other_num; j++)
6000                snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6001                         "hns-abn-%d", j);
6002
6003        for (j = other_num; j < (other_num + aeq_num); j++)
6004                snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6005                         "hns-aeq-%d", j - other_num);
6006
6007        for (j = (other_num + aeq_num); j < irq_num; j++)
6008                snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
6009                         "hns-ceq-%d", j - other_num - aeq_num);
6010
6011        for (j = 0; j < irq_num; j++) {
6012                if (j < other_num)
6013                        ret = request_irq(hr_dev->irq[j],
6014                                          hns_roce_v2_msix_interrupt_abn,
6015                                          0, hr_dev->irq_names[j], hr_dev);
6016
6017                else if (j < (other_num + comp_num))
6018                        ret = request_irq(eq_table->eq[j - other_num].irq,
6019                                          hns_roce_v2_msix_interrupt_eq,
6020                                          0, hr_dev->irq_names[j + aeq_num],
6021                                          &eq_table->eq[j - other_num]);
6022                else
6023                        ret = request_irq(eq_table->eq[j - other_num].irq,
6024                                          hns_roce_v2_msix_interrupt_eq,
6025                                          0, hr_dev->irq_names[j - comp_num],
6026                                          &eq_table->eq[j - other_num]);
6027                if (ret) {
6028                        dev_err(hr_dev->dev, "Request irq error!\n");
6029                        goto err_request_failed;
6030                }
6031        }
6032
6033        return 0;
6034
6035err_request_failed:
6036        for (j -= 1; j >= 0; j--)
6037                if (j < other_num)
6038                        free_irq(hr_dev->irq[j], hr_dev);
6039                else
6040                        free_irq(eq_table->eq[j - other_num].irq,
6041                                 &eq_table->eq[j - other_num]);
6042
6043err_kzalloc_failed:
6044        for (i -= 1; i >= 0; i--)
6045                kfree(hr_dev->irq_names[i]);
6046
6047        return ret;
6048}
6049
6050static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
6051{
6052        int irq_num;
6053        int eq_num;
6054        int i;
6055
6056        eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6057        irq_num = eq_num + hr_dev->caps.num_other_vectors;
6058
6059        for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
6060                free_irq(hr_dev->irq[i], hr_dev);
6061
6062        for (i = 0; i < eq_num; i++)
6063                free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
6064
6065        for (i = 0; i < irq_num; i++)
6066                kfree(hr_dev->irq_names[i]);
6067}
6068
6069static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
6070{
6071        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6072        struct device *dev = hr_dev->dev;
6073        struct hns_roce_eq *eq;
6074        unsigned int eq_cmd;
6075        int irq_num;
6076        int eq_num;
6077        int other_num;
6078        int comp_num;
6079        int aeq_num;
6080        int i;
6081        int ret;
6082
6083        other_num = hr_dev->caps.num_other_vectors;
6084        comp_num = hr_dev->caps.num_comp_vectors;
6085        aeq_num = hr_dev->caps.num_aeq_vectors;
6086
6087        eq_num = comp_num + aeq_num;
6088        irq_num = eq_num + other_num;
6089
6090        eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
6091        if (!eq_table->eq)
6092                return -ENOMEM;
6093
6094        /* create eq */
6095        for (i = 0; i < eq_num; i++) {
6096                eq = &eq_table->eq[i];
6097                eq->hr_dev = hr_dev;
6098                eq->eqn = i;
6099                if (i < comp_num) {
6100                        /* CEQ */
6101                        eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
6102                        eq->type_flag = HNS_ROCE_CEQ;
6103                        eq->entries = hr_dev->caps.ceqe_depth;
6104                        eq->eqe_size = hr_dev->caps.ceqe_size;
6105                        eq->irq = hr_dev->irq[i + other_num + aeq_num];
6106                        eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
6107                        eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
6108                } else {
6109                        /* AEQ */
6110                        eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
6111                        eq->type_flag = HNS_ROCE_AEQ;
6112                        eq->entries = hr_dev->caps.aeqe_depth;
6113                        eq->eqe_size = hr_dev->caps.aeqe_size;
6114                        eq->irq = hr_dev->irq[i - comp_num + other_num];
6115                        eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
6116                        eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
6117                }
6118
6119                ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
6120                if (ret) {
6121                        dev_err(dev, "eq create failed.\n");
6122                        goto err_create_eq_fail;
6123                }
6124        }
6125
6126        /* enable irq */
6127        hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
6128
6129        ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
6130                                     aeq_num, other_num);
6131        if (ret) {
6132                dev_err(dev, "Request irq failed.\n");
6133                goto err_request_irq_fail;
6134        }
6135
6136        hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
6137        if (!hr_dev->irq_workq) {
6138                dev_err(dev, "Create irq workqueue failed!\n");
6139                ret = -ENOMEM;
6140                goto err_create_wq_fail;
6141        }
6142
6143        return 0;
6144
6145err_create_wq_fail:
6146        __hns_roce_free_irq(hr_dev);
6147
6148err_request_irq_fail:
6149        hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6150
6151err_create_eq_fail:
6152        for (i -= 1; i >= 0; i--)
6153                free_eq_buf(hr_dev, &eq_table->eq[i]);
6154        kfree(eq_table->eq);
6155
6156        return ret;
6157}
6158
6159static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6160{
6161        struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6162        int eq_num;
6163        int i;
6164
6165        eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6166
6167        /* Disable irq */
6168        hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6169
6170        __hns_roce_free_irq(hr_dev);
6171        destroy_workqueue(hr_dev->irq_workq);
6172
6173        for (i = 0; i < eq_num; i++) {
6174                hns_roce_v2_destroy_eqc(hr_dev, i);
6175
6176                free_eq_buf(hr_dev, &eq_table->eq[i]);
6177        }
6178
6179        kfree(eq_table->eq);
6180}
6181
6182static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6183        .query_cqc_info = hns_roce_v2_query_cqc_info,
6184};
6185
6186static const struct ib_device_ops hns_roce_v2_dev_ops = {
6187        .destroy_qp = hns_roce_v2_destroy_qp,
6188        .modify_cq = hns_roce_v2_modify_cq,
6189        .poll_cq = hns_roce_v2_poll_cq,
6190        .post_recv = hns_roce_v2_post_recv,
6191        .post_send = hns_roce_v2_post_send,
6192        .query_qp = hns_roce_v2_query_qp,
6193        .req_notify_cq = hns_roce_v2_req_notify_cq,
6194};
6195
6196static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6197        .modify_srq = hns_roce_v2_modify_srq,
6198        .post_srq_recv = hns_roce_v2_post_srq_recv,
6199        .query_srq = hns_roce_v2_query_srq,
6200};
6201
6202static const struct hns_roce_hw hns_roce_hw_v2 = {
6203        .cmq_init = hns_roce_v2_cmq_init,
6204        .cmq_exit = hns_roce_v2_cmq_exit,
6205        .hw_profile = hns_roce_v2_profile,
6206        .hw_init = hns_roce_v2_init,
6207        .hw_exit = hns_roce_v2_exit,
6208        .post_mbox = v2_post_mbox,
6209        .poll_mbox_done = v2_poll_mbox_done,
6210        .chk_mbox_avail = v2_chk_mbox_is_avail,
6211        .set_gid = hns_roce_v2_set_gid,
6212        .set_mac = hns_roce_v2_set_mac,
6213        .write_mtpt = hns_roce_v2_write_mtpt,
6214        .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6215        .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6216        .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6217        .write_cqc = hns_roce_v2_write_cqc,
6218        .set_hem = hns_roce_v2_set_hem,
6219        .clear_hem = hns_roce_v2_clear_hem,
6220        .modify_qp = hns_roce_v2_modify_qp,
6221        .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6222        .init_eq = hns_roce_v2_init_eq_table,
6223        .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6224        .write_srqc = hns_roce_v2_write_srqc,
6225        .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6226        .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6227};
6228
6229static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6230        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6231        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6232        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6233        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6234        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6235        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
6236        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
6237         HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
6238        /* required last entry */
6239        {0, }
6240};
6241
6242MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6243
6244static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6245                                  struct hnae3_handle *handle)
6246{
6247        struct hns_roce_v2_priv *priv = hr_dev->priv;
6248        const struct pci_device_id *id;
6249        int i;
6250
6251        hr_dev->pci_dev = handle->pdev;
6252        id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
6253        hr_dev->is_vf = id->driver_data;
6254        hr_dev->dev = &handle->pdev->dev;
6255        hr_dev->hw = &hns_roce_hw_v2;
6256        hr_dev->dfx = &hns_roce_dfx_hw_v2;
6257        hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6258        hr_dev->odb_offset = hr_dev->sdb_offset;
6259
6260        /* Get info from NIC driver. */
6261        hr_dev->reg_base = handle->rinfo.roce_io_base;
6262        hr_dev->mem_base = handle->rinfo.roce_mem_base;
6263        hr_dev->caps.num_ports = 1;
6264        hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6265        hr_dev->iboe.phy_port[0] = 0;
6266
6267        addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6268                            hr_dev->iboe.netdevs[0]->dev_addr);
6269
6270        for (i = 0; i < handle->rinfo.num_vectors; i++)
6271                hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6272                                                i + handle->rinfo.base_vector);
6273
6274        /* cmd issue mode: 0 is poll, 1 is event */
6275        hr_dev->cmd_mod = 1;
6276        hr_dev->loop_idc = 0;
6277
6278        hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6279        priv->handle = handle;
6280}
6281
6282static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6283{
6284        struct hns_roce_dev *hr_dev;
6285        int ret;
6286
6287        hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6288        if (!hr_dev)
6289                return -ENOMEM;
6290
6291        hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6292        if (!hr_dev->priv) {
6293                ret = -ENOMEM;
6294                goto error_failed_kzalloc;
6295        }
6296
6297        hns_roce_hw_v2_get_cfg(hr_dev, handle);
6298
6299        ret = hns_roce_init(hr_dev);
6300        if (ret) {
6301                dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6302                goto error_failed_get_cfg;
6303        }
6304
6305        handle->priv = hr_dev;
6306
6307        return 0;
6308
6309error_failed_get_cfg:
6310        kfree(hr_dev->priv);
6311
6312error_failed_kzalloc:
6313        ib_dealloc_device(&hr_dev->ib_dev);
6314
6315        return ret;
6316}
6317
6318static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6319                                           bool reset)
6320{
6321        struct hns_roce_dev *hr_dev = handle->priv;
6322
6323        if (!hr_dev)
6324                return;
6325
6326        handle->priv = NULL;
6327
6328        hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6329        hns_roce_handle_device_err(hr_dev);
6330
6331        hns_roce_exit(hr_dev);
6332        kfree(hr_dev->priv);
6333        ib_dealloc_device(&hr_dev->ib_dev);
6334}
6335
6336static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6337{
6338        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6339        const struct pci_device_id *id;
6340        struct device *dev = &handle->pdev->dev;
6341        int ret;
6342
6343        handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6344
6345        if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6346                handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6347                goto reset_chk_err;
6348        }
6349
6350        id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6351        if (!id)
6352                return 0;
6353
6354        if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09)
6355                return 0;
6356
6357        ret = __hns_roce_hw_v2_init_instance(handle);
6358        if (ret) {
6359                handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6360                dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6361                if (ops->ae_dev_resetting(handle) ||
6362                    ops->get_hw_reset_stat(handle))
6363                        goto reset_chk_err;
6364                else
6365                        return ret;
6366        }
6367
6368        handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6369
6370
6371        return 0;
6372
6373reset_chk_err:
6374        dev_err(dev, "Device is busy in resetting state.\n"
6375                     "please retry later.\n");
6376
6377        return -EBUSY;
6378}
6379
6380static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6381                                           bool reset)
6382{
6383        if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6384                return;
6385
6386        handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6387
6388        __hns_roce_hw_v2_uninit_instance(handle, reset);
6389
6390        handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6391}
6392static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6393{
6394        struct hns_roce_dev *hr_dev;
6395
6396        if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6397                set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6398                return 0;
6399        }
6400
6401        handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6402        clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6403
6404        hr_dev = handle->priv;
6405        if (!hr_dev)
6406                return 0;
6407
6408        hr_dev->is_reset = true;
6409        hr_dev->active = false;
6410        hr_dev->dis_db = true;
6411
6412        hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6413
6414        return 0;
6415}
6416
6417static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6418{
6419        struct device *dev = &handle->pdev->dev;
6420        int ret;
6421
6422        if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6423                               &handle->rinfo.state)) {
6424                handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6425                return 0;
6426        }
6427
6428        handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6429
6430        dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6431        ret = __hns_roce_hw_v2_init_instance(handle);
6432        if (ret) {
6433                /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6434                 * callback function, RoCE Engine reinitialize. If RoCE reinit
6435                 * failed, we should inform NIC driver.
6436                 */
6437                handle->priv = NULL;
6438                dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6439        } else {
6440                handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6441                dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6442        }
6443
6444        return ret;
6445}
6446
6447static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6448{
6449        if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6450                return 0;
6451
6452        handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6453        dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6454        msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6455        __hns_roce_hw_v2_uninit_instance(handle, false);
6456
6457        return 0;
6458}
6459
6460static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6461                                       enum hnae3_reset_notify_type type)
6462{
6463        int ret = 0;
6464
6465        switch (type) {
6466        case HNAE3_DOWN_CLIENT:
6467                ret = hns_roce_hw_v2_reset_notify_down(handle);
6468                break;
6469        case HNAE3_INIT_CLIENT:
6470                ret = hns_roce_hw_v2_reset_notify_init(handle);
6471                break;
6472        case HNAE3_UNINIT_CLIENT:
6473                ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6474                break;
6475        default:
6476                break;
6477        }
6478
6479        return ret;
6480}
6481
6482static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6483        .init_instance = hns_roce_hw_v2_init_instance,
6484        .uninit_instance = hns_roce_hw_v2_uninit_instance,
6485        .reset_notify = hns_roce_hw_v2_reset_notify,
6486};
6487
6488static struct hnae3_client hns_roce_hw_v2_client = {
6489        .name = "hns_roce_hw_v2",
6490        .type = HNAE3_CLIENT_ROCE,
6491        .ops = &hns_roce_hw_v2_ops,
6492};
6493
6494static int __init hns_roce_hw_v2_init(void)
6495{
6496        return hnae3_register_client(&hns_roce_hw_v2_client);
6497}
6498
6499static void __exit hns_roce_hw_v2_exit(void)
6500{
6501        hnae3_unregister_client(&hns_roce_hw_v2_client);
6502}
6503
6504module_init(hns_roce_hw_v2_init);
6505module_exit(hns_roce_hw_v2_exit);
6506
6507MODULE_LICENSE("Dual BSD/GPL");
6508MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6509MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6510MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6511MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
6512