linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33
  34#include <net/addrconf.h>
  35#include <linux/etherdevice.h>
  36#include <linux/mlx5/vport.h>
  37
  38#include "mlx5_core.h"
  39#include "lib/mlx5.h"
  40#include "fpga/conn.h"
  41
  42#define MLX5_FPGA_PKEY 0xFFFF
  43#define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */
  44#define MLX5_FPGA_RECV_SIZE 2048
  45#define MLX5_FPGA_PORT_NUM 1
  46#define MLX5_FPGA_CQ_BUDGET 64
  47
  48static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
  49                                  struct mlx5_fpga_dma_buf *buf)
  50{
  51        struct device *dma_device;
  52        int err = 0;
  53
  54        if (unlikely(!buf->sg[0].data))
  55                goto out;
  56
  57        dma_device = &conn->fdev->mdev->pdev->dev;
  58        buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
  59                                             buf->sg[0].size, buf->dma_dir);
  60        err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
  61        if (unlikely(err)) {
  62                mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err);
  63                err = -ENOMEM;
  64                goto out;
  65        }
  66
  67        if (!buf->sg[1].data)
  68                goto out;
  69
  70        buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data,
  71                                             buf->sg[1].size, buf->dma_dir);
  72        err = dma_mapping_error(dma_device, buf->sg[1].dma_addr);
  73        if (unlikely(err)) {
  74                mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err);
  75                dma_unmap_single(dma_device, buf->sg[0].dma_addr,
  76                                 buf->sg[0].size, buf->dma_dir);
  77                err = -ENOMEM;
  78        }
  79
  80out:
  81        return err;
  82}
  83
  84static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
  85                                     struct mlx5_fpga_dma_buf *buf)
  86{
  87        struct device *dma_device;
  88
  89        dma_device = &conn->fdev->mdev->pdev->dev;
  90        if (buf->sg[1].data)
  91                dma_unmap_single(dma_device, buf->sg[1].dma_addr,
  92                                 buf->sg[1].size, buf->dma_dir);
  93
  94        if (likely(buf->sg[0].data))
  95                dma_unmap_single(dma_device, buf->sg[0].dma_addr,
  96                                 buf->sg[0].size, buf->dma_dir);
  97}
  98
  99static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
 100                                    struct mlx5_fpga_dma_buf *buf)
 101{
 102        struct mlx5_wqe_data_seg *data;
 103        unsigned int ix;
 104        int err = 0;
 105
 106        err = mlx5_fpga_conn_map_buf(conn, buf);
 107        if (unlikely(err))
 108                goto out;
 109
 110        if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
 111                mlx5_fpga_conn_unmap_buf(conn, buf);
 112                return -EBUSY;
 113        }
 114
 115        ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
 116        data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
 117        data->byte_count = cpu_to_be32(buf->sg[0].size);
 118        data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
 119        data->addr = cpu_to_be64(buf->sg[0].dma_addr);
 120
 121        conn->qp.rq.pc++;
 122        conn->qp.rq.bufs[ix] = buf;
 123
 124        /* Make sure that descriptors are written before doorbell record. */
 125        dma_wmb();
 126        *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
 127out:
 128        return err;
 129}
 130
 131static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
 132{
 133        /* ensure wqe is visible to device before updating doorbell record */
 134        dma_wmb();
 135        *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
 136        /* Make sure that doorbell record is visible before ringing */
 137        wmb();
 138        mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL);
 139}
 140
 141static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
 142                                     struct mlx5_fpga_dma_buf *buf)
 143{
 144        struct mlx5_wqe_ctrl_seg *ctrl;
 145        struct mlx5_wqe_data_seg *data;
 146        unsigned int ix, sgi;
 147        int size = 1;
 148
 149        ix = conn->qp.sq.pc & (conn->qp.sq.size - 1);
 150
 151        ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix);
 152        data = (void *)(ctrl + 1);
 153
 154        for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) {
 155                if (!buf->sg[sgi].data)
 156                        break;
 157                data->byte_count = cpu_to_be32(buf->sg[sgi].size);
 158                data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
 159                data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
 160                data++;
 161                size++;
 162        }
 163
 164        ctrl->imm = 0;
 165        ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
 166        ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
 167                                             MLX5_OPCODE_SEND);
 168        ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8));
 169
 170        conn->qp.sq.pc++;
 171        conn->qp.sq.bufs[ix] = buf;
 172        mlx5_fpga_conn_notify_hw(conn, ctrl);
 173}
 174
 175int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
 176                        struct mlx5_fpga_dma_buf *buf)
 177{
 178        unsigned long flags;
 179        int err;
 180
 181        if (!conn->qp.active)
 182                return -ENOTCONN;
 183
 184        err = mlx5_fpga_conn_map_buf(conn, buf);
 185        if (err)
 186                return err;
 187
 188        spin_lock_irqsave(&conn->qp.sq.lock, flags);
 189
 190        if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) {
 191                list_add_tail(&buf->list, &conn->qp.sq.backlog);
 192                goto out_unlock;
 193        }
 194
 195        mlx5_fpga_conn_post_send(conn, buf);
 196
 197out_unlock:
 198        spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
 199        return err;
 200}
 201
 202static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
 203{
 204        struct mlx5_fpga_dma_buf *buf;
 205        int err;
 206
 207        buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0);
 208        if (!buf)
 209                return -ENOMEM;
 210
 211        buf->sg[0].data = (void *)(buf + 1);
 212        buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
 213        buf->dma_dir = DMA_FROM_DEVICE;
 214
 215        err = mlx5_fpga_conn_post_recv(conn, buf);
 216        if (err)
 217                kfree(buf);
 218
 219        return err;
 220}
 221
 222static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
 223                                      struct mlx5_core_mkey *mkey)
 224{
 225        int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
 226        void *mkc;
 227        u32 *in;
 228        int err;
 229
 230        in = kvzalloc(inlen, GFP_KERNEL);
 231        if (!in)
 232                return -ENOMEM;
 233
 234        mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
 235        MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
 236        MLX5_SET(mkc, mkc, lw, 1);
 237        MLX5_SET(mkc, mkc, lr, 1);
 238
 239        MLX5_SET(mkc, mkc, pd, pdn);
 240        MLX5_SET(mkc, mkc, length64, 1);
 241        MLX5_SET(mkc, mkc, qpn, 0xffffff);
 242
 243        err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
 244
 245        kvfree(in);
 246        return err;
 247}
 248
 249static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
 250                                  struct mlx5_cqe64 *cqe, u8 status)
 251{
 252        struct mlx5_fpga_dma_buf *buf;
 253        int ix, err;
 254
 255        ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
 256        buf = conn->qp.rq.bufs[ix];
 257        conn->qp.rq.bufs[ix] = NULL;
 258        if (!status)
 259                buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
 260        conn->qp.rq.cc++;
 261
 262        if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
 263                mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
 264                               buf, conn->fpga_qpn, status);
 265        else
 266                mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
 267                              buf, conn->fpga_qpn, status);
 268
 269        mlx5_fpga_conn_unmap_buf(conn, buf);
 270
 271        if (unlikely(status || !conn->qp.active)) {
 272                conn->qp.active = false;
 273                kfree(buf);
 274                return;
 275        }
 276
 277        mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n",
 278                      buf->sg[0].size);
 279        conn->recv_cb(conn->cb_arg, buf);
 280
 281        buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
 282        err = mlx5_fpga_conn_post_recv(conn, buf);
 283        if (unlikely(err)) {
 284                mlx5_fpga_warn(conn->fdev,
 285                               "Failed to re-post recv buf: %d\n", err);
 286                kfree(buf);
 287        }
 288}
 289
 290static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn,
 291                                  struct mlx5_cqe64 *cqe, u8 status)
 292{
 293        struct mlx5_fpga_dma_buf *buf, *nextbuf;
 294        unsigned long flags;
 295        int ix;
 296
 297        spin_lock_irqsave(&conn->qp.sq.lock, flags);
 298
 299        ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
 300        buf = conn->qp.sq.bufs[ix];
 301        conn->qp.sq.bufs[ix] = NULL;
 302        conn->qp.sq.cc++;
 303
 304        /* Handle backlog still under the spinlock to ensure message post order */
 305        if (unlikely(!list_empty(&conn->qp.sq.backlog))) {
 306                if (likely(conn->qp.active)) {
 307                        nextbuf = list_first_entry(&conn->qp.sq.backlog,
 308                                                   struct mlx5_fpga_dma_buf, list);
 309                        list_del(&nextbuf->list);
 310                        mlx5_fpga_conn_post_send(conn, nextbuf);
 311                }
 312        }
 313
 314        spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
 315
 316        if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
 317                mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
 318                               buf, conn->fpga_qpn, status);
 319        else
 320                mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
 321                              buf, conn->fpga_qpn, status);
 322
 323        mlx5_fpga_conn_unmap_buf(conn, buf);
 324
 325        if (likely(buf->complete))
 326                buf->complete(conn, conn->fdev, buf, status);
 327
 328        if (unlikely(status))
 329                conn->qp.active = false;
 330}
 331
 332static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
 333                                      struct mlx5_cqe64 *cqe)
 334{
 335        u8 opcode, status = 0;
 336
 337        opcode = cqe->op_own >> 4;
 338
 339        switch (opcode) {
 340        case MLX5_CQE_REQ_ERR:
 341                status = ((struct mlx5_err_cqe *)cqe)->syndrome;
 342                /* Fall through */
 343        case MLX5_CQE_REQ:
 344                mlx5_fpga_conn_sq_cqe(conn, cqe, status);
 345                break;
 346
 347        case MLX5_CQE_RESP_ERR:
 348                status = ((struct mlx5_err_cqe *)cqe)->syndrome;
 349                /* Fall through */
 350        case MLX5_CQE_RESP_SEND:
 351                mlx5_fpga_conn_rq_cqe(conn, cqe, status);
 352                break;
 353        default:
 354                mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n",
 355                               opcode);
 356        }
 357}
 358
 359static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
 360{
 361        mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
 362                    conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
 363}
 364
 365static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq,
 366                                    enum mlx5_event event)
 367{
 368        struct mlx5_fpga_conn *conn;
 369
 370        conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
 371        mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn);
 372}
 373
 374static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event)
 375{
 376        struct mlx5_fpga_conn *conn;
 377
 378        conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp);
 379        mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn);
 380}
 381
 382static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
 383                                       unsigned int budget)
 384{
 385        struct mlx5_cqe64 *cqe;
 386
 387        while (budget) {
 388                cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
 389                if (!cqe)
 390                        break;
 391
 392                budget--;
 393                mlx5_cqwq_pop(&conn->cq.wq);
 394                mlx5_fpga_conn_handle_cqe(conn, cqe);
 395                mlx5_cqwq_update_db_record(&conn->cq.wq);
 396        }
 397        if (!budget) {
 398                tasklet_schedule(&conn->cq.tasklet);
 399                return;
 400        }
 401
 402        mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
 403        /* ensure cq space is freed before enabling more cqes */
 404        wmb();
 405        mlx5_fpga_conn_arm_cq(conn);
 406}
 407
 408static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
 409{
 410        struct mlx5_fpga_conn *conn = (void *)data;
 411
 412        if (unlikely(!conn->qp.active))
 413                return;
 414        mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
 415}
 416
 417static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq)
 418{
 419        struct mlx5_fpga_conn *conn;
 420
 421        conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
 422        if (unlikely(!conn->qp.active))
 423                return;
 424        mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
 425}
 426
 427static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
 428{
 429        struct mlx5_fpga_device *fdev = conn->fdev;
 430        struct mlx5_core_dev *mdev = fdev->mdev;
 431        u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
 432        struct mlx5_wq_param wqp;
 433        struct mlx5_cqe64 *cqe;
 434        int inlen, err, eqn;
 435        unsigned int irqn;
 436        void *cqc, *in;
 437        __be64 *pas;
 438        u32 i;
 439
 440        cq_size = roundup_pow_of_two(cq_size);
 441        MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
 442
 443        wqp.buf_numa_node = mdev->priv.numa_node;
 444        wqp.db_numa_node  = mdev->priv.numa_node;
 445
 446        err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq,
 447                               &conn->cq.wq_ctrl);
 448        if (err)
 449                return err;
 450
 451        for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) {
 452                cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i);
 453                cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
 454        }
 455
 456        inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
 457                sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages;
 458        in = kvzalloc(inlen, GFP_KERNEL);
 459        if (!in) {
 460                err = -ENOMEM;
 461                goto err_cqwq;
 462        }
 463
 464        err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
 465        if (err)
 466                goto err_cqwq;
 467
 468        cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
 469        MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
 470        MLX5_SET(cqc, cqc, c_eqn, eqn);
 471        MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
 472        MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift -
 473                           MLX5_ADAPTER_PAGE_SHIFT);
 474        MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
 475
 476        pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
 477        mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas);
 478
 479        err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
 480        kvfree(in);
 481
 482        if (err)
 483                goto err_cqwq;
 484
 485        conn->cq.mcq.cqe_sz     = 64;
 486        conn->cq.mcq.set_ci_db  = conn->cq.wq_ctrl.db.db;
 487        conn->cq.mcq.arm_db     = conn->cq.wq_ctrl.db.db + 1;
 488        *conn->cq.mcq.set_ci_db = 0;
 489        *conn->cq.mcq.arm_db    = 0;
 490        conn->cq.mcq.vector     = 0;
 491        conn->cq.mcq.comp       = mlx5_fpga_conn_cq_complete;
 492        conn->cq.mcq.event      = mlx5_fpga_conn_cq_event;
 493        conn->cq.mcq.irqn       = irqn;
 494        conn->cq.mcq.uar        = fdev->conn_res.uar;
 495        tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
 496                     (unsigned long)conn);
 497
 498        mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
 499
 500        goto out;
 501
 502err_cqwq:
 503        mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
 504out:
 505        return err;
 506}
 507
 508static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
 509{
 510        tasklet_disable(&conn->cq.tasklet);
 511        tasklet_kill(&conn->cq.tasklet);
 512        mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
 513        mlx5_cqwq_destroy(&conn->cq.wq_ctrl);
 514}
 515
 516static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
 517{
 518        struct mlx5_fpga_device *fdev = conn->fdev;
 519        struct mlx5_core_dev *mdev = fdev->mdev;
 520        struct mlx5_wq_param wqp;
 521
 522        wqp.buf_numa_node = mdev->priv.numa_node;
 523        wqp.db_numa_node  = mdev->priv.numa_node;
 524
 525        return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq,
 526                                 &conn->qp.wq_ctrl);
 527}
 528
 529static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
 530                                    unsigned int tx_size, unsigned int rx_size)
 531{
 532        struct mlx5_fpga_device *fdev = conn->fdev;
 533        struct mlx5_core_dev *mdev = fdev->mdev;
 534        u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0};
 535        void *in = NULL, *qpc;
 536        int err, inlen;
 537
 538        conn->qp.rq.pc = 0;
 539        conn->qp.rq.cc = 0;
 540        conn->qp.rq.size = roundup_pow_of_two(rx_size);
 541        conn->qp.sq.pc = 0;
 542        conn->qp.sq.cc = 0;
 543        conn->qp.sq.size = roundup_pow_of_two(tx_size);
 544
 545        MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
 546        MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
 547        MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size));
 548        err = mlx5_fpga_conn_create_wq(conn, temp_qpc);
 549        if (err)
 550                goto out;
 551
 552        conn->qp.rq.bufs = kvzalloc(sizeof(conn->qp.rq.bufs[0]) *
 553                                    conn->qp.rq.size, GFP_KERNEL);
 554        if (!conn->qp.rq.bufs) {
 555                err = -ENOMEM;
 556                goto err_wq;
 557        }
 558
 559        conn->qp.sq.bufs = kvzalloc(sizeof(conn->qp.sq.bufs[0]) *
 560                                    conn->qp.sq.size, GFP_KERNEL);
 561        if (!conn->qp.sq.bufs) {
 562                err = -ENOMEM;
 563                goto err_rq_bufs;
 564        }
 565
 566        inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
 567                MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
 568                conn->qp.wq_ctrl.buf.npages;
 569        in = kvzalloc(inlen, GFP_KERNEL);
 570        if (!in) {
 571                err = -ENOMEM;
 572                goto err_sq_bufs;
 573        }
 574
 575        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 576        MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index);
 577        MLX5_SET(qpc, qpc, log_page_size,
 578                 conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 579        MLX5_SET(qpc, qpc, fre, 1);
 580        MLX5_SET(qpc, qpc, rlky, 1);
 581        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
 582        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 583        MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn);
 584        MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
 585        MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
 586        MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
 587        MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
 588        MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
 589        MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
 590        MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
 591        if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
 592                MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
 593
 594        mlx5_fill_page_array(&conn->qp.wq_ctrl.buf,
 595                             (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
 596
 597        err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
 598        if (err)
 599                goto err_sq_bufs;
 600
 601        conn->qp.mqp.event = mlx5_fpga_conn_event;
 602        mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn);
 603
 604        goto out;
 605
 606err_sq_bufs:
 607        kvfree(conn->qp.sq.bufs);
 608err_rq_bufs:
 609        kvfree(conn->qp.rq.bufs);
 610err_wq:
 611        mlx5_wq_destroy(&conn->qp.wq_ctrl);
 612out:
 613        kvfree(in);
 614        return err;
 615}
 616
 617static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn)
 618{
 619        int ix;
 620
 621        for (ix = 0; ix < conn->qp.rq.size; ix++) {
 622                if (!conn->qp.rq.bufs[ix])
 623                        continue;
 624                mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
 625                kfree(conn->qp.rq.bufs[ix]);
 626                conn->qp.rq.bufs[ix] = NULL;
 627        }
 628}
 629
 630static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
 631{
 632        struct mlx5_fpga_dma_buf *buf, *temp;
 633        int ix;
 634
 635        for (ix = 0; ix < conn->qp.sq.size; ix++) {
 636                buf = conn->qp.sq.bufs[ix];
 637                if (!buf)
 638                        continue;
 639                conn->qp.sq.bufs[ix] = NULL;
 640                mlx5_fpga_conn_unmap_buf(conn, buf);
 641                if (!buf->complete)
 642                        continue;
 643                buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
 644        }
 645        list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) {
 646                mlx5_fpga_conn_unmap_buf(conn, buf);
 647                if (!buf->complete)
 648                        continue;
 649                buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
 650        }
 651}
 652
 653static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
 654{
 655        mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
 656        mlx5_fpga_conn_free_recv_bufs(conn);
 657        mlx5_fpga_conn_flush_send_bufs(conn);
 658        kvfree(conn->qp.sq.bufs);
 659        kvfree(conn->qp.rq.bufs);
 660        mlx5_wq_destroy(&conn->qp.wq_ctrl);
 661}
 662
 663static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
 664{
 665        struct mlx5_core_dev *mdev = conn->fdev->mdev;
 666
 667        mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn);
 668
 669        return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
 670                                   &conn->qp.mqp);
 671}
 672
 673static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
 674{
 675        struct mlx5_fpga_device *fdev = conn->fdev;
 676        struct mlx5_core_dev *mdev = fdev->mdev;
 677        u32 *qpc = NULL;
 678        int err;
 679
 680        mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn);
 681
 682        qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
 683        if (!qpc) {
 684                err = -ENOMEM;
 685                goto out;
 686        }
 687
 688        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
 689        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 690        MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
 691        MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
 692        MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
 693        MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
 694        MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
 695        MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
 696
 697        err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
 698                                  &conn->qp.mqp);
 699        if (err) {
 700                mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
 701                goto out;
 702        }
 703
 704out:
 705        kfree(qpc);
 706        return err;
 707}
 708
 709static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
 710{
 711        struct mlx5_fpga_device *fdev = conn->fdev;
 712        struct mlx5_core_dev *mdev = fdev->mdev;
 713        u32 *qpc = NULL;
 714        int err;
 715
 716        mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
 717
 718        qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
 719        if (!qpc) {
 720                err = -ENOMEM;
 721                goto out;
 722        }
 723
 724        MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
 725        MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
 726        MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn);
 727        MLX5_SET(qpc, qpc, next_rcv_psn,
 728                 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
 729        MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
 730        MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
 731        ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
 732                        MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
 733        MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
 734                 MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port));
 735        MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
 736                 conn->qp.sgid_index);
 737        MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0);
 738        memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
 739               MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
 740               MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
 741
 742        err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
 743                                  &conn->qp.mqp);
 744        if (err) {
 745                mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
 746                goto out;
 747        }
 748
 749out:
 750        kfree(qpc);
 751        return err;
 752}
 753
 754static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
 755{
 756        struct mlx5_fpga_device *fdev = conn->fdev;
 757        struct mlx5_core_dev *mdev = fdev->mdev;
 758        u32 *qpc = NULL;
 759        u32 opt_mask;
 760        int err;
 761
 762        mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
 763
 764        qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
 765        if (!qpc) {
 766                err = -ENOMEM;
 767                goto out;
 768        }
 769
 770        MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
 771        MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
 772        MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */
 773        MLX5_SET(qpc, qpc, next_send_psn,
 774                 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn));
 775        MLX5_SET(qpc, qpc, retry_count, 7);
 776        MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
 777
 778        opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
 779        err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
 780                                  &conn->qp.mqp);
 781        if (err) {
 782                mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
 783                goto out;
 784        }
 785
 786out:
 787        kfree(qpc);
 788        return err;
 789}
 790
 791static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
 792{
 793        struct mlx5_fpga_device *fdev = conn->fdev;
 794        int err;
 795
 796        MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE);
 797        err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
 798                                  MLX5_FPGA_QPC_STATE, &conn->fpga_qpc);
 799        if (err) {
 800                mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err);
 801                goto out;
 802        }
 803
 804        err = mlx5_fpga_conn_reset_qp(conn);
 805        if (err) {
 806                mlx5_fpga_err(fdev, "Failed to change QP state to reset\n");
 807                goto err_fpga_qp;
 808        }
 809
 810        err = mlx5_fpga_conn_init_qp(conn);
 811        if (err) {
 812                mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n");
 813                goto err_fpga_qp;
 814        }
 815        conn->qp.active = true;
 816
 817        while (!mlx5_fpga_conn_post_recv_buf(conn))
 818                ;
 819
 820        err = mlx5_fpga_conn_rtr_qp(conn);
 821        if (err) {
 822                mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n");
 823                goto err_recv_bufs;
 824        }
 825
 826        err = mlx5_fpga_conn_rts_qp(conn);
 827        if (err) {
 828                mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n");
 829                goto err_recv_bufs;
 830        }
 831        goto out;
 832
 833err_recv_bufs:
 834        mlx5_fpga_conn_free_recv_bufs(conn);
 835err_fpga_qp:
 836        MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
 837        if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
 838                                MLX5_FPGA_QPC_STATE, &conn->fpga_qpc))
 839                mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n");
 840out:
 841        return err;
 842}
 843
 844struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
 845                                             struct mlx5_fpga_conn_attr *attr,
 846                                             enum mlx5_ifc_fpga_qp_type qp_type)
 847{
 848        struct mlx5_fpga_conn *ret, *conn;
 849        u8 *remote_mac, *remote_ip;
 850        int err;
 851
 852        if (!attr->recv_cb)
 853                return ERR_PTR(-EINVAL);
 854
 855        conn = kzalloc(sizeof(*conn), GFP_KERNEL);
 856        if (!conn)
 857                return ERR_PTR(-ENOMEM);
 858
 859        conn->fdev = fdev;
 860        INIT_LIST_HEAD(&conn->qp.sq.backlog);
 861
 862        spin_lock_init(&conn->qp.sq.lock);
 863
 864        conn->recv_cb = attr->recv_cb;
 865        conn->cb_arg = attr->cb_arg;
 866
 867        remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
 868        err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac);
 869        if (err) {
 870                mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
 871                ret = ERR_PTR(err);
 872                goto err;
 873        }
 874
 875        /* Build Modified EUI-64 IPv6 address from the MAC address */
 876        remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip);
 877        remote_ip[0] = 0xfe;
 878        remote_ip[1] = 0x80;
 879        addrconf_addr_eui48(&remote_ip[8], remote_mac);
 880
 881        err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index);
 882        if (err) {
 883                mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err);
 884                ret = ERR_PTR(err);
 885                goto err;
 886        }
 887
 888        err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
 889                                     MLX5_ROCE_VERSION_2,
 890                                     MLX5_ROCE_L3_TYPE_IPV6,
 891                                     remote_ip, remote_mac, true, 0,
 892                                     MLX5_FPGA_PORT_NUM);
 893        if (err) {
 894                mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
 895                ret = ERR_PTR(err);
 896                goto err_rsvd_gid;
 897        }
 898        mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index);
 899
 900        /* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe,
 901         * created during processing of the cqe
 902         */
 903        err = mlx5_fpga_conn_create_cq(conn,
 904                                       (attr->tx_size + attr->rx_size) * 2);
 905        if (err) {
 906                mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err);
 907                ret = ERR_PTR(err);
 908                goto err_gid;
 909        }
 910
 911        mlx5_fpga_conn_arm_cq(conn);
 912
 913        err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size);
 914        if (err) {
 915                mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err);
 916                ret = ERR_PTR(err);
 917                goto err_cq;
 918        }
 919
 920        MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
 921        MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type);
 922        MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC);
 923        MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q);
 924        MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0);
 925        MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
 926        MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
 927        MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
 928        MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn);
 929        MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
 930        MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
 931
 932        err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc,
 933                                  &conn->fpga_qpn);
 934        if (err) {
 935                mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err);
 936                ret = ERR_PTR(err);
 937                goto err_qp;
 938        }
 939
 940        err = mlx5_fpga_conn_connect(conn);
 941        if (err) {
 942                ret = ERR_PTR(err);
 943                goto err_conn;
 944        }
 945
 946        mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn);
 947        ret = conn;
 948        goto out;
 949
 950err_conn:
 951        mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
 952err_qp:
 953        mlx5_fpga_conn_destroy_qp(conn);
 954err_cq:
 955        mlx5_fpga_conn_destroy_cq(conn);
 956err_gid:
 957        mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
 958                               NULL, false, 0, MLX5_FPGA_PORT_NUM);
 959err_rsvd_gid:
 960        mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
 961err:
 962        kfree(conn);
 963out:
 964        return ret;
 965}
 966
 967void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
 968{
 969        struct mlx5_fpga_device *fdev = conn->fdev;
 970        struct mlx5_core_dev *mdev = fdev->mdev;
 971        int err = 0;
 972
 973        conn->qp.active = false;
 974        tasklet_disable(&conn->cq.tasklet);
 975        synchronize_irq(conn->cq.mcq.irqn);
 976
 977        mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
 978        err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
 979                                  &conn->qp.mqp);
 980        if (err)
 981                mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err);
 982        mlx5_fpga_conn_destroy_qp(conn);
 983        mlx5_fpga_conn_destroy_cq(conn);
 984
 985        mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
 986                               NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM);
 987        mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
 988        kfree(conn);
 989}
 990
 991int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
 992{
 993        int err;
 994
 995        err = mlx5_nic_vport_enable_roce(fdev->mdev);
 996        if (err) {
 997                mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err);
 998                goto out;
 999        }
1000
1001        fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev);
1002        if (IS_ERR(fdev->conn_res.uar)) {
1003                err = PTR_ERR(fdev->conn_res.uar);
1004                mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err);
1005                goto err_roce;
1006        }
1007        mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n",
1008                      fdev->conn_res.uar->index);
1009
1010        err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn);
1011        if (err) {
1012                mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err);
1013                goto err_uar;
1014        }
1015        mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn);
1016
1017        err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn,
1018                                         &fdev->conn_res.mkey);
1019        if (err) {
1020                mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
1021                goto err_dealloc_pd;
1022        }
1023        mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key);
1024
1025        return 0;
1026
1027err_dealloc_pd:
1028        mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1029err_uar:
1030        mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1031err_roce:
1032        mlx5_nic_vport_disable_roce(fdev->mdev);
1033out:
1034        return err;
1035}
1036
1037void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
1038{
1039        mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
1040        mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1041        mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1042        mlx5_nic_vport_disable_roce(fdev->mdev);
1043}
1044