linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33
  34#include <net/addrconf.h>
  35#include <linux/etherdevice.h>
  36#include <linux/mlx5/vport.h>
  37
  38#include "mlx5_core.h"
  39#include "lib/mlx5.h"
  40#include "fpga/conn.h"
  41
  42#define MLX5_FPGA_PKEY 0xFFFF
  43#define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */
  44#define MLX5_FPGA_RECV_SIZE 2048
  45#define MLX5_FPGA_PORT_NUM 1
  46#define MLX5_FPGA_CQ_BUDGET 64
  47
  48static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
  49                                  struct mlx5_fpga_dma_buf *buf)
  50{
  51        struct device *dma_device;
  52        int err = 0;
  53
  54        if (unlikely(!buf->sg[0].data))
  55                goto out;
  56
  57        dma_device = &conn->fdev->mdev->pdev->dev;
  58        buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
  59                                             buf->sg[0].size, buf->dma_dir);
  60        err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
  61        if (unlikely(err)) {
  62                mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err);
  63                err = -ENOMEM;
  64                goto out;
  65        }
  66
  67        if (!buf->sg[1].data)
  68                goto out;
  69
  70        buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data,
  71                                             buf->sg[1].size, buf->dma_dir);
  72        err = dma_mapping_error(dma_device, buf->sg[1].dma_addr);
  73        if (unlikely(err)) {
  74                mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err);
  75                dma_unmap_single(dma_device, buf->sg[0].dma_addr,
  76                                 buf->sg[0].size, buf->dma_dir);
  77                err = -ENOMEM;
  78        }
  79
  80out:
  81        return err;
  82}
  83
  84static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
  85                                     struct mlx5_fpga_dma_buf *buf)
  86{
  87        struct device *dma_device;
  88
  89        dma_device = &conn->fdev->mdev->pdev->dev;
  90        if (buf->sg[1].data)
  91                dma_unmap_single(dma_device, buf->sg[1].dma_addr,
  92                                 buf->sg[1].size, buf->dma_dir);
  93
  94        if (likely(buf->sg[0].data))
  95                dma_unmap_single(dma_device, buf->sg[0].dma_addr,
  96                                 buf->sg[0].size, buf->dma_dir);
  97}
  98
  99static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
 100                                    struct mlx5_fpga_dma_buf *buf)
 101{
 102        struct mlx5_wqe_data_seg *data;
 103        unsigned int ix;
 104        int err = 0;
 105
 106        err = mlx5_fpga_conn_map_buf(conn, buf);
 107        if (unlikely(err))
 108                goto out;
 109
 110        if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
 111                mlx5_fpga_conn_unmap_buf(conn, buf);
 112                return -EBUSY;
 113        }
 114
 115        ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
 116        data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
 117        data->byte_count = cpu_to_be32(buf->sg[0].size);
 118        data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
 119        data->addr = cpu_to_be64(buf->sg[0].dma_addr);
 120
 121        conn->qp.rq.pc++;
 122        conn->qp.rq.bufs[ix] = buf;
 123
 124        /* Make sure that descriptors are written before doorbell record. */
 125        dma_wmb();
 126        *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
 127out:
 128        return err;
 129}
 130
 131static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
 132{
 133        /* ensure wqe is visible to device before updating doorbell record */
 134        dma_wmb();
 135        *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
 136        /* Make sure that doorbell record is visible before ringing */
 137        wmb();
 138        mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL);
 139}
 140
 141static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
 142                                     struct mlx5_fpga_dma_buf *buf)
 143{
 144        struct mlx5_wqe_ctrl_seg *ctrl;
 145        struct mlx5_wqe_data_seg *data;
 146        unsigned int ix, sgi;
 147        int size = 1;
 148
 149        ix = conn->qp.sq.pc & (conn->qp.sq.size - 1);
 150
 151        ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix);
 152        data = (void *)(ctrl + 1);
 153
 154        for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) {
 155                if (!buf->sg[sgi].data)
 156                        break;
 157                data->byte_count = cpu_to_be32(buf->sg[sgi].size);
 158                data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
 159                data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
 160                data++;
 161                size++;
 162        }
 163
 164        ctrl->imm = 0;
 165        ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
 166        ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
 167                                             MLX5_OPCODE_SEND);
 168        ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8));
 169
 170        conn->qp.sq.pc++;
 171        conn->qp.sq.bufs[ix] = buf;
 172        mlx5_fpga_conn_notify_hw(conn, ctrl);
 173}
 174
 175int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
 176                        struct mlx5_fpga_dma_buf *buf)
 177{
 178        unsigned long flags;
 179        int err;
 180
 181        if (!conn->qp.active)
 182                return -ENOTCONN;
 183
 184        buf->dma_dir = DMA_TO_DEVICE;
 185        err = mlx5_fpga_conn_map_buf(conn, buf);
 186        if (err)
 187                return err;
 188
 189        spin_lock_irqsave(&conn->qp.sq.lock, flags);
 190
 191        if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) {
 192                list_add_tail(&buf->list, &conn->qp.sq.backlog);
 193                goto out_unlock;
 194        }
 195
 196        mlx5_fpga_conn_post_send(conn, buf);
 197
 198out_unlock:
 199        spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
 200        return err;
 201}
 202
 203static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
 204{
 205        struct mlx5_fpga_dma_buf *buf;
 206        int err;
 207
 208        buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0);
 209        if (!buf)
 210                return -ENOMEM;
 211
 212        buf->sg[0].data = (void *)(buf + 1);
 213        buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
 214        buf->dma_dir = DMA_FROM_DEVICE;
 215
 216        err = mlx5_fpga_conn_post_recv(conn, buf);
 217        if (err)
 218                kfree(buf);
 219
 220        return err;
 221}
 222
 223static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
 224                                      struct mlx5_core_mkey *mkey)
 225{
 226        int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
 227        void *mkc;
 228        u32 *in;
 229        int err;
 230
 231        in = kvzalloc(inlen, GFP_KERNEL);
 232        if (!in)
 233                return -ENOMEM;
 234
 235        mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
 236        MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
 237        MLX5_SET(mkc, mkc, lw, 1);
 238        MLX5_SET(mkc, mkc, lr, 1);
 239
 240        MLX5_SET(mkc, mkc, pd, pdn);
 241        MLX5_SET(mkc, mkc, length64, 1);
 242        MLX5_SET(mkc, mkc, qpn, 0xffffff);
 243
 244        err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
 245
 246        kvfree(in);
 247        return err;
 248}
 249
 250static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
 251                                  struct mlx5_cqe64 *cqe, u8 status)
 252{
 253        struct mlx5_fpga_dma_buf *buf;
 254        int ix, err;
 255
 256        ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
 257        buf = conn->qp.rq.bufs[ix];
 258        conn->qp.rq.bufs[ix] = NULL;
 259        conn->qp.rq.cc++;
 260
 261        if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
 262                mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
 263                               buf, conn->fpga_qpn, status);
 264        else
 265                mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
 266                              buf, conn->fpga_qpn, status);
 267
 268        mlx5_fpga_conn_unmap_buf(conn, buf);
 269
 270        if (unlikely(status || !conn->qp.active)) {
 271                conn->qp.active = false;
 272                kfree(buf);
 273                return;
 274        }
 275
 276        buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
 277        mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n",
 278                      buf->sg[0].size);
 279        conn->recv_cb(conn->cb_arg, buf);
 280
 281        buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
 282        err = mlx5_fpga_conn_post_recv(conn, buf);
 283        if (unlikely(err)) {
 284                mlx5_fpga_warn(conn->fdev,
 285                               "Failed to re-post recv buf: %d\n", err);
 286                kfree(buf);
 287        }
 288}
 289
 290static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn,
 291                                  struct mlx5_cqe64 *cqe, u8 status)
 292{
 293        struct mlx5_fpga_dma_buf *buf, *nextbuf;
 294        unsigned long flags;
 295        int ix;
 296
 297        spin_lock_irqsave(&conn->qp.sq.lock, flags);
 298
 299        ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
 300        buf = conn->qp.sq.bufs[ix];
 301        conn->qp.sq.bufs[ix] = NULL;
 302        conn->qp.sq.cc++;
 303
 304        /* Handle backlog still under the spinlock to ensure message post order */
 305        if (unlikely(!list_empty(&conn->qp.sq.backlog))) {
 306                if (likely(conn->qp.active)) {
 307                        nextbuf = list_first_entry(&conn->qp.sq.backlog,
 308                                                   struct mlx5_fpga_dma_buf, list);
 309                        list_del(&nextbuf->list);
 310                        mlx5_fpga_conn_post_send(conn, nextbuf);
 311                }
 312        }
 313
 314        spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
 315
 316        if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
 317                mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
 318                               buf, conn->fpga_qpn, status);
 319        else
 320                mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
 321                              buf, conn->fpga_qpn, status);
 322
 323        mlx5_fpga_conn_unmap_buf(conn, buf);
 324
 325        if (likely(buf->complete))
 326                buf->complete(conn, conn->fdev, buf, status);
 327
 328        if (unlikely(status))
 329                conn->qp.active = false;
 330}
 331
 332static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
 333                                      struct mlx5_cqe64 *cqe)
 334{
 335        u8 opcode, status = 0;
 336
 337        opcode = cqe->op_own >> 4;
 338
 339        switch (opcode) {
 340        case MLX5_CQE_REQ_ERR:
 341                status = ((struct mlx5_err_cqe *)cqe)->syndrome;
 342                /* Fall through */
 343        case MLX5_CQE_REQ:
 344                mlx5_fpga_conn_sq_cqe(conn, cqe, status);
 345                break;
 346
 347        case MLX5_CQE_RESP_ERR:
 348                status = ((struct mlx5_err_cqe *)cqe)->syndrome;
 349                /* Fall through */
 350        case MLX5_CQE_RESP_SEND:
 351                mlx5_fpga_conn_rq_cqe(conn, cqe, status);
 352                break;
 353        default:
 354                mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n",
 355                               opcode);
 356        }
 357}
 358
 359static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
 360{
 361        mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
 362                    conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
 363}
 364
 365static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq,
 366                                    enum mlx5_event event)
 367{
 368        struct mlx5_fpga_conn *conn;
 369
 370        conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
 371        mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn);
 372}
 373
 374static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event)
 375{
 376        struct mlx5_fpga_conn *conn;
 377
 378        conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp);
 379        mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn);
 380}
 381
 382static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
 383                                       unsigned int budget)
 384{
 385        struct mlx5_cqe64 *cqe;
 386
 387        while (budget) {
 388                cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
 389                if (!cqe)
 390                        break;
 391
 392                budget--;
 393                mlx5_cqwq_pop(&conn->cq.wq);
 394                mlx5_fpga_conn_handle_cqe(conn, cqe);
 395                mlx5_cqwq_update_db_record(&conn->cq.wq);
 396        }
 397        if (!budget) {
 398                tasklet_schedule(&conn->cq.tasklet);
 399                return;
 400        }
 401
 402        mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
 403        /* ensure cq space is freed before enabling more cqes */
 404        wmb();
 405        mlx5_fpga_conn_arm_cq(conn);
 406}
 407
 408static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
 409{
 410        struct mlx5_fpga_conn *conn = (void *)data;
 411
 412        if (unlikely(!conn->qp.active))
 413                return;
 414        mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
 415}
 416
 417static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq)
 418{
 419        struct mlx5_fpga_conn *conn;
 420
 421        conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
 422        if (unlikely(!conn->qp.active))
 423                return;
 424        mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
 425}
 426
 427static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
 428{
 429        struct mlx5_fpga_device *fdev = conn->fdev;
 430        struct mlx5_core_dev *mdev = fdev->mdev;
 431        u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
 432        struct mlx5_wq_param wqp;
 433        struct mlx5_cqe64 *cqe;
 434        int inlen, err, eqn;
 435        unsigned int irqn;
 436        void *cqc, *in;
 437        __be64 *pas;
 438        u32 i;
 439
 440        cq_size = roundup_pow_of_two(cq_size);
 441        MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
 442
 443        wqp.buf_numa_node = mdev->priv.numa_node;
 444        wqp.db_numa_node  = mdev->priv.numa_node;
 445
 446        err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq,
 447                               &conn->cq.wq_ctrl);
 448        if (err)
 449                return err;
 450
 451        for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) {
 452                cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i);
 453                cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
 454        }
 455
 456        inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
 457                sizeof(u64) * conn->cq.wq_ctrl.buf.npages;
 458        in = kvzalloc(inlen, GFP_KERNEL);
 459        if (!in) {
 460                err = -ENOMEM;
 461                goto err_cqwq;
 462        }
 463
 464        err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
 465        if (err)
 466                goto err_cqwq;
 467
 468        cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
 469        MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
 470        MLX5_SET(cqc, cqc, c_eqn, eqn);
 471        MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
 472        MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift -
 473                           MLX5_ADAPTER_PAGE_SHIFT);
 474        MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
 475
 476        pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
 477        mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
 478
 479        err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen);
 480        kvfree(in);
 481
 482        if (err)
 483                goto err_cqwq;
 484
 485        conn->cq.mcq.cqe_sz     = 64;
 486        conn->cq.mcq.set_ci_db  = conn->cq.wq_ctrl.db.db;
 487        conn->cq.mcq.arm_db     = conn->cq.wq_ctrl.db.db + 1;
 488        *conn->cq.mcq.set_ci_db = 0;
 489        *conn->cq.mcq.arm_db    = 0;
 490        conn->cq.mcq.vector     = 0;
 491        conn->cq.mcq.comp       = mlx5_fpga_conn_cq_complete;
 492        conn->cq.mcq.event      = mlx5_fpga_conn_cq_event;
 493        conn->cq.mcq.irqn       = irqn;
 494        conn->cq.mcq.uar        = fdev->conn_res.uar;
 495        tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
 496                     (unsigned long)conn);
 497
 498        mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
 499
 500        goto out;
 501
 502err_cqwq:
 503        mlx5_wq_destroy(&conn->cq.wq_ctrl);
 504out:
 505        return err;
 506}
 507
 508static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
 509{
 510        tasklet_disable(&conn->cq.tasklet);
 511        tasklet_kill(&conn->cq.tasklet);
 512        mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
 513        mlx5_wq_destroy(&conn->cq.wq_ctrl);
 514}
 515
 516static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
 517{
 518        struct mlx5_fpga_device *fdev = conn->fdev;
 519        struct mlx5_core_dev *mdev = fdev->mdev;
 520        struct mlx5_wq_param wqp;
 521
 522        wqp.buf_numa_node = mdev->priv.numa_node;
 523        wqp.db_numa_node  = mdev->priv.numa_node;
 524
 525        return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq,
 526                                 &conn->qp.wq_ctrl);
 527}
 528
 529static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
 530                                    unsigned int tx_size, unsigned int rx_size)
 531{
 532        struct mlx5_fpga_device *fdev = conn->fdev;
 533        struct mlx5_core_dev *mdev = fdev->mdev;
 534        u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0};
 535        void *in = NULL, *qpc;
 536        int err, inlen;
 537
 538        conn->qp.rq.pc = 0;
 539        conn->qp.rq.cc = 0;
 540        conn->qp.rq.size = roundup_pow_of_two(rx_size);
 541        conn->qp.sq.pc = 0;
 542        conn->qp.sq.cc = 0;
 543        conn->qp.sq.size = roundup_pow_of_two(tx_size);
 544
 545        MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
 546        MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
 547        MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size));
 548        err = mlx5_fpga_conn_create_wq(conn, temp_qpc);
 549        if (err)
 550                goto out;
 551
 552        conn->qp.rq.bufs = kvcalloc(conn->qp.rq.size,
 553                                    sizeof(conn->qp.rq.bufs[0]),
 554                                    GFP_KERNEL);
 555        if (!conn->qp.rq.bufs) {
 556                err = -ENOMEM;
 557                goto err_wq;
 558        }
 559
 560        conn->qp.sq.bufs = kvcalloc(conn->qp.sq.size,
 561                                    sizeof(conn->qp.sq.bufs[0]),
 562                                    GFP_KERNEL);
 563        if (!conn->qp.sq.bufs) {
 564                err = -ENOMEM;
 565                goto err_rq_bufs;
 566        }
 567
 568        inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
 569                MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
 570                conn->qp.wq_ctrl.buf.npages;
 571        in = kvzalloc(inlen, GFP_KERNEL);
 572        if (!in) {
 573                err = -ENOMEM;
 574                goto err_sq_bufs;
 575        }
 576
 577        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 578        MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index);
 579        MLX5_SET(qpc, qpc, log_page_size,
 580                 conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 581        MLX5_SET(qpc, qpc, fre, 1);
 582        MLX5_SET(qpc, qpc, rlky, 1);
 583        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
 584        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 585        MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn);
 586        MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
 587        MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
 588        MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
 589        MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
 590        MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
 591        MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
 592        MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
 593        if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
 594                MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
 595
 596        mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
 597                                  (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
 598
 599        err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
 600        if (err)
 601                goto err_sq_bufs;
 602
 603        conn->qp.mqp.event = mlx5_fpga_conn_event;
 604        mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn);
 605
 606        goto out;
 607
 608err_sq_bufs:
 609        kvfree(conn->qp.sq.bufs);
 610err_rq_bufs:
 611        kvfree(conn->qp.rq.bufs);
 612err_wq:
 613        mlx5_wq_destroy(&conn->qp.wq_ctrl);
 614out:
 615        kvfree(in);
 616        return err;
 617}
 618
 619static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn)
 620{
 621        int ix;
 622
 623        for (ix = 0; ix < conn->qp.rq.size; ix++) {
 624                if (!conn->qp.rq.bufs[ix])
 625                        continue;
 626                mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
 627                kfree(conn->qp.rq.bufs[ix]);
 628                conn->qp.rq.bufs[ix] = NULL;
 629        }
 630}
 631
 632static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
 633{
 634        struct mlx5_fpga_dma_buf *buf, *temp;
 635        int ix;
 636
 637        for (ix = 0; ix < conn->qp.sq.size; ix++) {
 638                buf = conn->qp.sq.bufs[ix];
 639                if (!buf)
 640                        continue;
 641                conn->qp.sq.bufs[ix] = NULL;
 642                mlx5_fpga_conn_unmap_buf(conn, buf);
 643                if (!buf->complete)
 644                        continue;
 645                buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
 646        }
 647        list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) {
 648                mlx5_fpga_conn_unmap_buf(conn, buf);
 649                if (!buf->complete)
 650                        continue;
 651                buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
 652        }
 653}
 654
 655static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
 656{
 657        mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
 658        mlx5_fpga_conn_free_recv_bufs(conn);
 659        mlx5_fpga_conn_flush_send_bufs(conn);
 660        kvfree(conn->qp.sq.bufs);
 661        kvfree(conn->qp.rq.bufs);
 662        mlx5_wq_destroy(&conn->qp.wq_ctrl);
 663}
 664
 665static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
 666{
 667        struct mlx5_core_dev *mdev = conn->fdev->mdev;
 668
 669        mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn);
 670
 671        return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
 672                                   &conn->qp.mqp);
 673}
 674
 675static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
 676{
 677        struct mlx5_fpga_device *fdev = conn->fdev;
 678        struct mlx5_core_dev *mdev = fdev->mdev;
 679        u32 *qpc = NULL;
 680        int err;
 681
 682        mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn);
 683
 684        qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
 685        if (!qpc) {
 686                err = -ENOMEM;
 687                goto out;
 688        }
 689
 690        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
 691        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 692        MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
 693        MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
 694        MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
 695        MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
 696        MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
 697        MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
 698
 699        err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
 700                                  &conn->qp.mqp);
 701        if (err) {
 702                mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
 703                goto out;
 704        }
 705
 706out:
 707        kfree(qpc);
 708        return err;
 709}
 710
 711static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
 712{
 713        struct mlx5_fpga_device *fdev = conn->fdev;
 714        struct mlx5_core_dev *mdev = fdev->mdev;
 715        u32 *qpc = NULL;
 716        int err;
 717
 718        mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
 719
 720        qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
 721        if (!qpc) {
 722                err = -ENOMEM;
 723                goto out;
 724        }
 725
 726        MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
 727        MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
 728        MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn);
 729        MLX5_SET(qpc, qpc, next_rcv_psn,
 730                 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
 731        MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
 732        MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
 733        ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
 734                        MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
 735        MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
 736                 MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port));
 737        MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
 738                 conn->qp.sgid_index);
 739        MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0);
 740        memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
 741               MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
 742               MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
 743
 744        err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
 745                                  &conn->qp.mqp);
 746        if (err) {
 747                mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
 748                goto out;
 749        }
 750
 751out:
 752        kfree(qpc);
 753        return err;
 754}
 755
 756static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
 757{
 758        struct mlx5_fpga_device *fdev = conn->fdev;
 759        struct mlx5_core_dev *mdev = fdev->mdev;
 760        u32 *qpc = NULL;
 761        u32 opt_mask;
 762        int err;
 763
 764        mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
 765
 766        qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
 767        if (!qpc) {
 768                err = -ENOMEM;
 769                goto out;
 770        }
 771
 772        MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
 773        MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
 774        MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */
 775        MLX5_SET(qpc, qpc, next_send_psn,
 776                 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn));
 777        MLX5_SET(qpc, qpc, retry_count, 7);
 778        MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
 779
 780        opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
 781        err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
 782                                  &conn->qp.mqp);
 783        if (err) {
 784                mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
 785                goto out;
 786        }
 787
 788out:
 789        kfree(qpc);
 790        return err;
 791}
 792
 793static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
 794{
 795        struct mlx5_fpga_device *fdev = conn->fdev;
 796        int err;
 797
 798        MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE);
 799        err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
 800                                  MLX5_FPGA_QPC_STATE, &conn->fpga_qpc);
 801        if (err) {
 802                mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err);
 803                goto out;
 804        }
 805
 806        err = mlx5_fpga_conn_reset_qp(conn);
 807        if (err) {
 808                mlx5_fpga_err(fdev, "Failed to change QP state to reset\n");
 809                goto err_fpga_qp;
 810        }
 811
 812        err = mlx5_fpga_conn_init_qp(conn);
 813        if (err) {
 814                mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n");
 815                goto err_fpga_qp;
 816        }
 817        conn->qp.active = true;
 818
 819        while (!mlx5_fpga_conn_post_recv_buf(conn))
 820                ;
 821
 822        err = mlx5_fpga_conn_rtr_qp(conn);
 823        if (err) {
 824                mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n");
 825                goto err_recv_bufs;
 826        }
 827
 828        err = mlx5_fpga_conn_rts_qp(conn);
 829        if (err) {
 830                mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n");
 831                goto err_recv_bufs;
 832        }
 833        goto out;
 834
 835err_recv_bufs:
 836        mlx5_fpga_conn_free_recv_bufs(conn);
 837err_fpga_qp:
 838        MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
 839        if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
 840                                MLX5_FPGA_QPC_STATE, &conn->fpga_qpc))
 841                mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n");
 842out:
 843        return err;
 844}
 845
 846struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
 847                                             struct mlx5_fpga_conn_attr *attr,
 848                                             enum mlx5_ifc_fpga_qp_type qp_type)
 849{
 850        struct mlx5_fpga_conn *ret, *conn;
 851        u8 *remote_mac, *remote_ip;
 852        int err;
 853
 854        if (!attr->recv_cb)
 855                return ERR_PTR(-EINVAL);
 856
 857        conn = kzalloc(sizeof(*conn), GFP_KERNEL);
 858        if (!conn)
 859                return ERR_PTR(-ENOMEM);
 860
 861        conn->fdev = fdev;
 862        INIT_LIST_HEAD(&conn->qp.sq.backlog);
 863
 864        spin_lock_init(&conn->qp.sq.lock);
 865
 866        conn->recv_cb = attr->recv_cb;
 867        conn->cb_arg = attr->cb_arg;
 868
 869        remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
 870        err = mlx5_query_nic_vport_mac_address(fdev->mdev, 0, remote_mac);
 871        if (err) {
 872                mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
 873                ret = ERR_PTR(err);
 874                goto err;
 875        }
 876
 877        /* Build Modified EUI-64 IPv6 address from the MAC address */
 878        remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip);
 879        remote_ip[0] = 0xfe;
 880        remote_ip[1] = 0x80;
 881        addrconf_addr_eui48(&remote_ip[8], remote_mac);
 882
 883        err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index);
 884        if (err) {
 885                mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err);
 886                ret = ERR_PTR(err);
 887                goto err;
 888        }
 889
 890        err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
 891                                     MLX5_ROCE_VERSION_2,
 892                                     MLX5_ROCE_L3_TYPE_IPV6,
 893                                     remote_ip, remote_mac, true, 0,
 894                                     MLX5_FPGA_PORT_NUM);
 895        if (err) {
 896                mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
 897                ret = ERR_PTR(err);
 898                goto err_rsvd_gid;
 899        }
 900        mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index);
 901
 902        /* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe,
 903         * created during processing of the cqe
 904         */
 905        err = mlx5_fpga_conn_create_cq(conn,
 906                                       (attr->tx_size + attr->rx_size) * 2);
 907        if (err) {
 908                mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err);
 909                ret = ERR_PTR(err);
 910                goto err_gid;
 911        }
 912
 913        mlx5_fpga_conn_arm_cq(conn);
 914
 915        err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size);
 916        if (err) {
 917                mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err);
 918                ret = ERR_PTR(err);
 919                goto err_cq;
 920        }
 921
 922        MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
 923        MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type);
 924        MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC);
 925        MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q);
 926        MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0);
 927        MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
 928        MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
 929        MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
 930        MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn);
 931        MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
 932        MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
 933
 934        err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc,
 935                                  &conn->fpga_qpn);
 936        if (err) {
 937                mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err);
 938                ret = ERR_PTR(err);
 939                goto err_qp;
 940        }
 941
 942        err = mlx5_fpga_conn_connect(conn);
 943        if (err) {
 944                ret = ERR_PTR(err);
 945                goto err_conn;
 946        }
 947
 948        mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn);
 949        ret = conn;
 950        goto out;
 951
 952err_conn:
 953        mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
 954err_qp:
 955        mlx5_fpga_conn_destroy_qp(conn);
 956err_cq:
 957        mlx5_fpga_conn_destroy_cq(conn);
 958err_gid:
 959        mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
 960                               NULL, false, 0, MLX5_FPGA_PORT_NUM);
 961err_rsvd_gid:
 962        mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
 963err:
 964        kfree(conn);
 965out:
 966        return ret;
 967}
 968
 969void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
 970{
 971        struct mlx5_fpga_device *fdev = conn->fdev;
 972        struct mlx5_core_dev *mdev = fdev->mdev;
 973        int err = 0;
 974
 975        conn->qp.active = false;
 976        tasklet_disable(&conn->cq.tasklet);
 977        synchronize_irq(conn->cq.mcq.irqn);
 978
 979        mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
 980        err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
 981                                  &conn->qp.mqp);
 982        if (err)
 983                mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err);
 984        mlx5_fpga_conn_destroy_qp(conn);
 985        mlx5_fpga_conn_destroy_cq(conn);
 986
 987        mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
 988                               NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM);
 989        mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
 990        kfree(conn);
 991}
 992
 993int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
 994{
 995        int err;
 996
 997        err = mlx5_nic_vport_enable_roce(fdev->mdev);
 998        if (err) {
 999                mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err);
1000                goto out;
1001        }
1002
1003        fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev);
1004        if (IS_ERR(fdev->conn_res.uar)) {
1005                err = PTR_ERR(fdev->conn_res.uar);
1006                mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err);
1007                goto err_roce;
1008        }
1009        mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n",
1010                      fdev->conn_res.uar->index);
1011
1012        err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn);
1013        if (err) {
1014                mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err);
1015                goto err_uar;
1016        }
1017        mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn);
1018
1019        err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn,
1020                                         &fdev->conn_res.mkey);
1021        if (err) {
1022                mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
1023                goto err_dealloc_pd;
1024        }
1025        mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key);
1026
1027        return 0;
1028
1029err_dealloc_pd:
1030        mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1031err_uar:
1032        mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1033err_roce:
1034        mlx5_nic_vport_disable_roce(fdev->mdev);
1035out:
1036        return err;
1037}
1038
1039void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
1040{
1041        mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
1042        mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1043        mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1044        mlx5_nic_vport_disable_roce(fdev->mdev);
1045}
1046