linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33
  34#include <net/addrconf.h>
  35#include <linux/etherdevice.h>
  36#include <linux/mlx5/vport.h>
  37
  38#include "mlx5_core.h"
  39#include "lib/mlx5.h"
  40#include "fpga/conn.h"
  41
  42#define MLX5_FPGA_PKEY 0xFFFF
  43#define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */
  44#define MLX5_FPGA_RECV_SIZE 2048
  45#define MLX5_FPGA_PORT_NUM 1
  46#define MLX5_FPGA_CQ_BUDGET 64
  47
  48static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
  49                                  struct mlx5_fpga_dma_buf *buf)
  50{
  51        struct device *dma_device;
  52        int err = 0;
  53
  54        if (unlikely(!buf->sg[0].data))
  55                goto out;
  56
  57        dma_device = &conn->fdev->mdev->pdev->dev;
  58        buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
  59                                             buf->sg[0].size, buf->dma_dir);
  60        err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
  61        if (unlikely(err)) {
  62                mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err);
  63                err = -ENOMEM;
  64                goto out;
  65        }
  66
  67        if (!buf->sg[1].data)
  68                goto out;
  69
  70        buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data,
  71                                             buf->sg[1].size, buf->dma_dir);
  72        err = dma_mapping_error(dma_device, buf->sg[1].dma_addr);
  73        if (unlikely(err)) {
  74                mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err);
  75                dma_unmap_single(dma_device, buf->sg[0].dma_addr,
  76                                 buf->sg[0].size, buf->dma_dir);
  77                err = -ENOMEM;
  78        }
  79
  80out:
  81        return err;
  82}
  83
  84static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
  85                                     struct mlx5_fpga_dma_buf *buf)
  86{
  87        struct device *dma_device;
  88
  89        dma_device = &conn->fdev->mdev->pdev->dev;
  90        if (buf->sg[1].data)
  91                dma_unmap_single(dma_device, buf->sg[1].dma_addr,
  92                                 buf->sg[1].size, buf->dma_dir);
  93
  94        if (likely(buf->sg[0].data))
  95                dma_unmap_single(dma_device, buf->sg[0].dma_addr,
  96                                 buf->sg[0].size, buf->dma_dir);
  97}
  98
  99static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
 100                                    struct mlx5_fpga_dma_buf *buf)
 101{
 102        struct mlx5_wqe_data_seg *data;
 103        unsigned int ix;
 104        int err = 0;
 105
 106        err = mlx5_fpga_conn_map_buf(conn, buf);
 107        if (unlikely(err))
 108                goto out;
 109
 110        if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
 111                mlx5_fpga_conn_unmap_buf(conn, buf);
 112                return -EBUSY;
 113        }
 114
 115        ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
 116        data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
 117        data->byte_count = cpu_to_be32(buf->sg[0].size);
 118        data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
 119        data->addr = cpu_to_be64(buf->sg[0].dma_addr);
 120
 121        conn->qp.rq.pc++;
 122        conn->qp.rq.bufs[ix] = buf;
 123
 124        /* Make sure that descriptors are written before doorbell record. */
 125        dma_wmb();
 126        *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
 127out:
 128        return err;
 129}
 130
 131static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
 132{
 133        /* ensure wqe is visible to device before updating doorbell record */
 134        dma_wmb();
 135        *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
 136        /* Make sure that doorbell record is visible before ringing */
 137        wmb();
 138        mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET);
 139}
 140
 141static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
 142                                     struct mlx5_fpga_dma_buf *buf)
 143{
 144        struct mlx5_wqe_ctrl_seg *ctrl;
 145        struct mlx5_wqe_data_seg *data;
 146        unsigned int ix, sgi;
 147        int size = 1;
 148
 149        ix = conn->qp.sq.pc & (conn->qp.sq.size - 1);
 150
 151        ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix);
 152        data = (void *)(ctrl + 1);
 153
 154        for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) {
 155                if (!buf->sg[sgi].data)
 156                        break;
 157                data->byte_count = cpu_to_be32(buf->sg[sgi].size);
 158                data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
 159                data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
 160                data++;
 161                size++;
 162        }
 163
 164        ctrl->imm = 0;
 165        ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
 166        ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
 167                                             MLX5_OPCODE_SEND);
 168        ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.qpn << 8));
 169
 170        conn->qp.sq.pc++;
 171        conn->qp.sq.bufs[ix] = buf;
 172        mlx5_fpga_conn_notify_hw(conn, ctrl);
 173}
 174
 175int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
 176                        struct mlx5_fpga_dma_buf *buf)
 177{
 178        unsigned long flags;
 179        int err;
 180
 181        if (!conn->qp.active)
 182                return -ENOTCONN;
 183
 184        buf->dma_dir = DMA_TO_DEVICE;
 185        err = mlx5_fpga_conn_map_buf(conn, buf);
 186        if (err)
 187                return err;
 188
 189        spin_lock_irqsave(&conn->qp.sq.lock, flags);
 190
 191        if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) {
 192                list_add_tail(&buf->list, &conn->qp.sq.backlog);
 193                goto out_unlock;
 194        }
 195
 196        mlx5_fpga_conn_post_send(conn, buf);
 197
 198out_unlock:
 199        spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
 200        return err;
 201}
 202
 203static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
 204{
 205        struct mlx5_fpga_dma_buf *buf;
 206        int err;
 207
 208        buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0);
 209        if (!buf)
 210                return -ENOMEM;
 211
 212        buf->sg[0].data = (void *)(buf + 1);
 213        buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
 214        buf->dma_dir = DMA_FROM_DEVICE;
 215
 216        err = mlx5_fpga_conn_post_recv(conn, buf);
 217        if (err)
 218                kfree(buf);
 219
 220        return err;
 221}
 222
 223static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
 224                                      struct mlx5_core_mkey *mkey)
 225{
 226        int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
 227        void *mkc;
 228        u32 *in;
 229        int err;
 230
 231        in = kvzalloc(inlen, GFP_KERNEL);
 232        if (!in)
 233                return -ENOMEM;
 234
 235        mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
 236        MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
 237        MLX5_SET(mkc, mkc, lw, 1);
 238        MLX5_SET(mkc, mkc, lr, 1);
 239
 240        MLX5_SET(mkc, mkc, pd, pdn);
 241        MLX5_SET(mkc, mkc, length64, 1);
 242        MLX5_SET(mkc, mkc, qpn, 0xffffff);
 243
 244        err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
 245
 246        kvfree(in);
 247        return err;
 248}
 249
 250static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
 251                                  struct mlx5_cqe64 *cqe, u8 status)
 252{
 253        struct mlx5_fpga_dma_buf *buf;
 254        int ix, err;
 255
 256        ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
 257        buf = conn->qp.rq.bufs[ix];
 258        conn->qp.rq.bufs[ix] = NULL;
 259        conn->qp.rq.cc++;
 260
 261        if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
 262                mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
 263                               buf, conn->fpga_qpn, status);
 264        else
 265                mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
 266                              buf, conn->fpga_qpn, status);
 267
 268        mlx5_fpga_conn_unmap_buf(conn, buf);
 269
 270        if (unlikely(status || !conn->qp.active)) {
 271                conn->qp.active = false;
 272                kfree(buf);
 273                return;
 274        }
 275
 276        buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
 277        mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n",
 278                      buf->sg[0].size);
 279        conn->recv_cb(conn->cb_arg, buf);
 280
 281        buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
 282        err = mlx5_fpga_conn_post_recv(conn, buf);
 283        if (unlikely(err)) {
 284                mlx5_fpga_warn(conn->fdev,
 285                               "Failed to re-post recv buf: %d\n", err);
 286                kfree(buf);
 287        }
 288}
 289
 290static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn,
 291                                  struct mlx5_cqe64 *cqe, u8 status)
 292{
 293        struct mlx5_fpga_dma_buf *buf, *nextbuf;
 294        unsigned long flags;
 295        int ix;
 296
 297        spin_lock_irqsave(&conn->qp.sq.lock, flags);
 298
 299        ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
 300        buf = conn->qp.sq.bufs[ix];
 301        conn->qp.sq.bufs[ix] = NULL;
 302        conn->qp.sq.cc++;
 303
 304        /* Handle backlog still under the spinlock to ensure message post order */
 305        if (unlikely(!list_empty(&conn->qp.sq.backlog))) {
 306                if (likely(conn->qp.active)) {
 307                        nextbuf = list_first_entry(&conn->qp.sq.backlog,
 308                                                   struct mlx5_fpga_dma_buf, list);
 309                        list_del(&nextbuf->list);
 310                        mlx5_fpga_conn_post_send(conn, nextbuf);
 311                }
 312        }
 313
 314        spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
 315
 316        if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
 317                mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
 318                               buf, conn->fpga_qpn, status);
 319        else
 320                mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
 321                              buf, conn->fpga_qpn, status);
 322
 323        mlx5_fpga_conn_unmap_buf(conn, buf);
 324
 325        if (likely(buf->complete))
 326                buf->complete(conn, conn->fdev, buf, status);
 327
 328        if (unlikely(status))
 329                conn->qp.active = false;
 330}
 331
 332static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
 333                                      struct mlx5_cqe64 *cqe)
 334{
 335        u8 opcode, status = 0;
 336
 337        opcode = get_cqe_opcode(cqe);
 338
 339        switch (opcode) {
 340        case MLX5_CQE_REQ_ERR:
 341                status = ((struct mlx5_err_cqe *)cqe)->syndrome;
 342                fallthrough;
 343        case MLX5_CQE_REQ:
 344                mlx5_fpga_conn_sq_cqe(conn, cqe, status);
 345                break;
 346
 347        case MLX5_CQE_RESP_ERR:
 348                status = ((struct mlx5_err_cqe *)cqe)->syndrome;
 349                fallthrough;
 350        case MLX5_CQE_RESP_SEND:
 351                mlx5_fpga_conn_rq_cqe(conn, cqe, status);
 352                break;
 353        default:
 354                mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n",
 355                               opcode);
 356        }
 357}
 358
 359static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
 360{
 361        mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
 362                    conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
 363}
 364
 365static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
 366                                       unsigned int budget)
 367{
 368        struct mlx5_cqe64 *cqe;
 369
 370        while (budget) {
 371                cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
 372                if (!cqe)
 373                        break;
 374
 375                budget--;
 376                mlx5_cqwq_pop(&conn->cq.wq);
 377                mlx5_fpga_conn_handle_cqe(conn, cqe);
 378                mlx5_cqwq_update_db_record(&conn->cq.wq);
 379        }
 380        if (!budget) {
 381                tasklet_schedule(&conn->cq.tasklet);
 382                return;
 383        }
 384
 385        mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
 386        /* ensure cq space is freed before enabling more cqes */
 387        wmb();
 388        mlx5_fpga_conn_arm_cq(conn);
 389}
 390
 391static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
 392{
 393        struct mlx5_fpga_conn *conn = (void *)data;
 394
 395        if (unlikely(!conn->qp.active))
 396                return;
 397        mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
 398}
 399
 400static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq,
 401                                       struct mlx5_eqe *eqe)
 402{
 403        struct mlx5_fpga_conn *conn;
 404
 405        conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
 406        if (unlikely(!conn->qp.active))
 407                return;
 408        mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
 409}
 410
 411static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
 412{
 413        struct mlx5_fpga_device *fdev = conn->fdev;
 414        struct mlx5_core_dev *mdev = fdev->mdev;
 415        u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
 416        u32 out[MLX5_ST_SZ_DW(create_cq_out)];
 417        struct mlx5_wq_param wqp;
 418        struct mlx5_cqe64 *cqe;
 419        int inlen, err, eqn;
 420        unsigned int irqn;
 421        void *cqc, *in;
 422        __be64 *pas;
 423        u32 i;
 424
 425        cq_size = roundup_pow_of_two(cq_size);
 426        MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
 427
 428        wqp.buf_numa_node = mdev->priv.numa_node;
 429        wqp.db_numa_node  = mdev->priv.numa_node;
 430
 431        err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq,
 432                               &conn->cq.wq_ctrl);
 433        if (err)
 434                return err;
 435
 436        for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) {
 437                cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i);
 438                cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
 439        }
 440
 441        inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
 442                sizeof(u64) * conn->cq.wq_ctrl.buf.npages;
 443        in = kvzalloc(inlen, GFP_KERNEL);
 444        if (!in) {
 445                err = -ENOMEM;
 446                goto err_cqwq;
 447        }
 448
 449        err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
 450        if (err) {
 451                kvfree(in);
 452                goto err_cqwq;
 453        }
 454
 455        cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
 456        MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
 457        MLX5_SET(cqc, cqc, c_eqn, eqn);
 458        MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
 459        MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift -
 460                           MLX5_ADAPTER_PAGE_SHIFT);
 461        MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
 462
 463        pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
 464        mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
 465
 466        err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out));
 467        kvfree(in);
 468
 469        if (err)
 470                goto err_cqwq;
 471
 472        conn->cq.mcq.cqe_sz     = 64;
 473        conn->cq.mcq.set_ci_db  = conn->cq.wq_ctrl.db.db;
 474        conn->cq.mcq.arm_db     = conn->cq.wq_ctrl.db.db + 1;
 475        *conn->cq.mcq.set_ci_db = 0;
 476        *conn->cq.mcq.arm_db    = 0;
 477        conn->cq.mcq.vector     = 0;
 478        conn->cq.mcq.comp       = mlx5_fpga_conn_cq_complete;
 479        conn->cq.mcq.irqn       = irqn;
 480        conn->cq.mcq.uar        = fdev->conn_res.uar;
 481        tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
 482                     (unsigned long)conn);
 483
 484        mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
 485
 486        goto out;
 487
 488err_cqwq:
 489        mlx5_wq_destroy(&conn->cq.wq_ctrl);
 490out:
 491        return err;
 492}
 493
 494static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
 495{
 496        tasklet_disable(&conn->cq.tasklet);
 497        tasklet_kill(&conn->cq.tasklet);
 498        mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
 499        mlx5_wq_destroy(&conn->cq.wq_ctrl);
 500}
 501
 502static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
 503{
 504        struct mlx5_fpga_device *fdev = conn->fdev;
 505        struct mlx5_core_dev *mdev = fdev->mdev;
 506        struct mlx5_wq_param wqp;
 507
 508        wqp.buf_numa_node = mdev->priv.numa_node;
 509        wqp.db_numa_node  = mdev->priv.numa_node;
 510
 511        return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq,
 512                                 &conn->qp.wq_ctrl);
 513}
 514
 515static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
 516                                    unsigned int tx_size, unsigned int rx_size)
 517{
 518        struct mlx5_fpga_device *fdev = conn->fdev;
 519        u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
 520        struct mlx5_core_dev *mdev = fdev->mdev;
 521        u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
 522        void *in = NULL, *qpc;
 523        int err, inlen;
 524
 525        conn->qp.rq.pc = 0;
 526        conn->qp.rq.cc = 0;
 527        conn->qp.rq.size = roundup_pow_of_two(rx_size);
 528        conn->qp.sq.pc = 0;
 529        conn->qp.sq.cc = 0;
 530        conn->qp.sq.size = roundup_pow_of_two(tx_size);
 531
 532        MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
 533        MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
 534        MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size));
 535        err = mlx5_fpga_conn_create_wq(conn, temp_qpc);
 536        if (err)
 537                goto out;
 538
 539        conn->qp.rq.bufs = kvcalloc(conn->qp.rq.size,
 540                                    sizeof(conn->qp.rq.bufs[0]),
 541                                    GFP_KERNEL);
 542        if (!conn->qp.rq.bufs) {
 543                err = -ENOMEM;
 544                goto err_wq;
 545        }
 546
 547        conn->qp.sq.bufs = kvcalloc(conn->qp.sq.size,
 548                                    sizeof(conn->qp.sq.bufs[0]),
 549                                    GFP_KERNEL);
 550        if (!conn->qp.sq.bufs) {
 551                err = -ENOMEM;
 552                goto err_rq_bufs;
 553        }
 554
 555        inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
 556                MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
 557                conn->qp.wq_ctrl.buf.npages;
 558        in = kvzalloc(inlen, GFP_KERNEL);
 559        if (!in) {
 560                err = -ENOMEM;
 561                goto err_sq_bufs;
 562        }
 563
 564        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 565        MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index);
 566        MLX5_SET(qpc, qpc, log_page_size,
 567                 conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 568        MLX5_SET(qpc, qpc, fre, 1);
 569        MLX5_SET(qpc, qpc, rlky, 1);
 570        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
 571        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 572        MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn);
 573        MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
 574        MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
 575        MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
 576        MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
 577        MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
 578        MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
 579        MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
 580        if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
 581                MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
 582
 583        mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
 584                                  (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
 585
 586        MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
 587        err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
 588        if (err)
 589                goto err_sq_bufs;
 590
 591        conn->qp.qpn = MLX5_GET(create_qp_out, out, qpn);
 592        mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.qpn);
 593
 594        goto out;
 595
 596err_sq_bufs:
 597        kvfree(conn->qp.sq.bufs);
 598err_rq_bufs:
 599        kvfree(conn->qp.rq.bufs);
 600err_wq:
 601        mlx5_wq_destroy(&conn->qp.wq_ctrl);
 602out:
 603        kvfree(in);
 604        return err;
 605}
 606
 607static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn)
 608{
 609        int ix;
 610
 611        for (ix = 0; ix < conn->qp.rq.size; ix++) {
 612                if (!conn->qp.rq.bufs[ix])
 613                        continue;
 614                mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
 615                kfree(conn->qp.rq.bufs[ix]);
 616                conn->qp.rq.bufs[ix] = NULL;
 617        }
 618}
 619
 620static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
 621{
 622        struct mlx5_fpga_dma_buf *buf, *temp;
 623        int ix;
 624
 625        for (ix = 0; ix < conn->qp.sq.size; ix++) {
 626                buf = conn->qp.sq.bufs[ix];
 627                if (!buf)
 628                        continue;
 629                conn->qp.sq.bufs[ix] = NULL;
 630                mlx5_fpga_conn_unmap_buf(conn, buf);
 631                if (!buf->complete)
 632                        continue;
 633                buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
 634        }
 635        list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) {
 636                mlx5_fpga_conn_unmap_buf(conn, buf);
 637                if (!buf->complete)
 638                        continue;
 639                buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
 640        }
 641}
 642
 643static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
 644{
 645        struct mlx5_core_dev *dev = conn->fdev->mdev;
 646        u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
 647
 648        MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
 649        MLX5_SET(destroy_qp_in, in, qpn, conn->qp.qpn);
 650        mlx5_cmd_exec_in(dev, destroy_qp, in);
 651
 652        mlx5_fpga_conn_free_recv_bufs(conn);
 653        mlx5_fpga_conn_flush_send_bufs(conn);
 654        kvfree(conn->qp.sq.bufs);
 655        kvfree(conn->qp.rq.bufs);
 656        mlx5_wq_destroy(&conn->qp.wq_ctrl);
 657}
 658
 659static int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
 660{
 661        struct mlx5_core_dev *mdev = conn->fdev->mdev;
 662        u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
 663
 664        mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.qpn);
 665
 666        MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
 667        MLX5_SET(qp_2rst_in, in, qpn, conn->qp.qpn);
 668
 669        return mlx5_cmd_exec_in(mdev, qp_2rst, in);
 670}
 671
 672static int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
 673{
 674        u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
 675        struct mlx5_fpga_device *fdev = conn->fdev;
 676        struct mlx5_core_dev *mdev = fdev->mdev;
 677        u32 *qpc;
 678
 679        mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.qpn);
 680
 681        qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
 682
 683        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
 684        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 685        MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
 686        MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
 687        MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
 688        MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
 689        MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
 690        MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
 691
 692        MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
 693        MLX5_SET(rst2init_qp_in, in, qpn, conn->qp.qpn);
 694
 695        return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
 696}
 697
 698static int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
 699{
 700        u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
 701        struct mlx5_fpga_device *fdev = conn->fdev;
 702        struct mlx5_core_dev *mdev = fdev->mdev;
 703        u32 *qpc;
 704
 705        mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
 706
 707        qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
 708
 709        MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
 710        MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
 711        MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn);
 712        MLX5_SET(qpc, qpc, next_rcv_psn,
 713                 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
 714        MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
 715        MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
 716        ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
 717                        MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
 718        MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
 719                 MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port));
 720        MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
 721                 conn->qp.sgid_index);
 722        MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0);
 723        memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
 724               MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
 725               MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
 726
 727        MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
 728        MLX5_SET(init2rtr_qp_in, in, qpn, conn->qp.qpn);
 729
 730        return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
 731}
 732
 733static int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
 734{
 735        struct mlx5_fpga_device *fdev = conn->fdev;
 736        u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
 737        struct mlx5_core_dev *mdev = fdev->mdev;
 738        u32 *qpc;
 739
 740        mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
 741
 742        qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
 743
 744        MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
 745        MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
 746        MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */
 747        MLX5_SET(qpc, qpc, next_send_psn,
 748                 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn));
 749        MLX5_SET(qpc, qpc, retry_count, 7);
 750        MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
 751
 752        MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
 753        MLX5_SET(rtr2rts_qp_in, in, qpn, conn->qp.qpn);
 754        MLX5_SET(rtr2rts_qp_in, in, opt_param_mask, MLX5_QP_OPTPAR_RNR_TIMEOUT);
 755
 756        return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
 757}
 758
 759static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
 760{
 761        struct mlx5_fpga_device *fdev = conn->fdev;
 762        int err;
 763
 764        MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE);
 765        err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
 766                                  MLX5_FPGA_QPC_STATE, &conn->fpga_qpc);
 767        if (err) {
 768                mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err);
 769                goto out;
 770        }
 771
 772        err = mlx5_fpga_conn_reset_qp(conn);
 773        if (err) {
 774                mlx5_fpga_err(fdev, "Failed to change QP state to reset\n");
 775                goto err_fpga_qp;
 776        }
 777
 778        err = mlx5_fpga_conn_init_qp(conn);
 779        if (err) {
 780                mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n");
 781                goto err_fpga_qp;
 782        }
 783        conn->qp.active = true;
 784
 785        while (!mlx5_fpga_conn_post_recv_buf(conn))
 786                ;
 787
 788        err = mlx5_fpga_conn_rtr_qp(conn);
 789        if (err) {
 790                mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n");
 791                goto err_recv_bufs;
 792        }
 793
 794        err = mlx5_fpga_conn_rts_qp(conn);
 795        if (err) {
 796                mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n");
 797                goto err_recv_bufs;
 798        }
 799        goto out;
 800
 801err_recv_bufs:
 802        mlx5_fpga_conn_free_recv_bufs(conn);
 803err_fpga_qp:
 804        MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
 805        if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
 806                                MLX5_FPGA_QPC_STATE, &conn->fpga_qpc))
 807                mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n");
 808out:
 809        return err;
 810}
 811
 812struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
 813                                             struct mlx5_fpga_conn_attr *attr,
 814                                             enum mlx5_ifc_fpga_qp_type qp_type)
 815{
 816        struct mlx5_fpga_conn *ret, *conn;
 817        u8 *remote_mac, *remote_ip;
 818        int err;
 819
 820        if (!attr->recv_cb)
 821                return ERR_PTR(-EINVAL);
 822
 823        conn = kzalloc(sizeof(*conn), GFP_KERNEL);
 824        if (!conn)
 825                return ERR_PTR(-ENOMEM);
 826
 827        conn->fdev = fdev;
 828        INIT_LIST_HEAD(&conn->qp.sq.backlog);
 829
 830        spin_lock_init(&conn->qp.sq.lock);
 831
 832        conn->recv_cb = attr->recv_cb;
 833        conn->cb_arg = attr->cb_arg;
 834
 835        remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
 836        err = mlx5_query_mac_address(fdev->mdev, remote_mac);
 837        if (err) {
 838                mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
 839                ret = ERR_PTR(err);
 840                goto err;
 841        }
 842
 843        /* Build Modified EUI-64 IPv6 address from the MAC address */
 844        remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip);
 845        remote_ip[0] = 0xfe;
 846        remote_ip[1] = 0x80;
 847        addrconf_addr_eui48(&remote_ip[8], remote_mac);
 848
 849        err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index);
 850        if (err) {
 851                mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err);
 852                ret = ERR_PTR(err);
 853                goto err;
 854        }
 855
 856        err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
 857                                     MLX5_ROCE_VERSION_2,
 858                                     MLX5_ROCE_L3_TYPE_IPV6,
 859                                     remote_ip, remote_mac, true, 0,
 860                                     MLX5_FPGA_PORT_NUM);
 861        if (err) {
 862                mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
 863                ret = ERR_PTR(err);
 864                goto err_rsvd_gid;
 865        }
 866        mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index);
 867
 868        /* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe,
 869         * created during processing of the cqe
 870         */
 871        err = mlx5_fpga_conn_create_cq(conn,
 872                                       (attr->tx_size + attr->rx_size) * 2);
 873        if (err) {
 874                mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err);
 875                ret = ERR_PTR(err);
 876                goto err_gid;
 877        }
 878
 879        mlx5_fpga_conn_arm_cq(conn);
 880
 881        err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size);
 882        if (err) {
 883                mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err);
 884                ret = ERR_PTR(err);
 885                goto err_cq;
 886        }
 887
 888        MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
 889        MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type);
 890        MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC);
 891        MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q);
 892        MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0);
 893        MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
 894        MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
 895        MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
 896        MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.qpn);
 897        MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
 898        MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
 899
 900        err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc,
 901                                  &conn->fpga_qpn);
 902        if (err) {
 903                mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err);
 904                ret = ERR_PTR(err);
 905                goto err_qp;
 906        }
 907
 908        err = mlx5_fpga_conn_connect(conn);
 909        if (err) {
 910                ret = ERR_PTR(err);
 911                goto err_conn;
 912        }
 913
 914        mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn);
 915        ret = conn;
 916        goto out;
 917
 918err_conn:
 919        mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
 920err_qp:
 921        mlx5_fpga_conn_destroy_qp(conn);
 922err_cq:
 923        mlx5_fpga_conn_destroy_cq(conn);
 924err_gid:
 925        mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
 926                               NULL, false, 0, MLX5_FPGA_PORT_NUM);
 927err_rsvd_gid:
 928        mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
 929err:
 930        kfree(conn);
 931out:
 932        return ret;
 933}
 934
 935void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
 936{
 937        conn->qp.active = false;
 938        tasklet_disable(&conn->cq.tasklet);
 939        synchronize_irq(conn->cq.mcq.irqn);
 940
 941        mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
 942        mlx5_fpga_conn_destroy_qp(conn);
 943        mlx5_fpga_conn_destroy_cq(conn);
 944
 945        mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
 946                               NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM);
 947        mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
 948        kfree(conn);
 949}
 950
 951int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
 952{
 953        int err;
 954
 955        err = mlx5_nic_vport_enable_roce(fdev->mdev);
 956        if (err) {
 957                mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err);
 958                goto out;
 959        }
 960
 961        fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev);
 962        if (IS_ERR(fdev->conn_res.uar)) {
 963                err = PTR_ERR(fdev->conn_res.uar);
 964                mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err);
 965                goto err_roce;
 966        }
 967        mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n",
 968                      fdev->conn_res.uar->index);
 969
 970        err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn);
 971        if (err) {
 972                mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err);
 973                goto err_uar;
 974        }
 975        mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn);
 976
 977        err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn,
 978                                         &fdev->conn_res.mkey);
 979        if (err) {
 980                mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
 981                goto err_dealloc_pd;
 982        }
 983        mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key);
 984
 985        return 0;
 986
 987err_dealloc_pd:
 988        mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
 989err_uar:
 990        mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
 991err_roce:
 992        mlx5_nic_vport_disable_roce(fdev->mdev);
 993out:
 994        return err;
 995}
 996
 997void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
 998{
 999        mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
1000        mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1001        mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1002        mlx5_nic_vport_disable_roce(fdev->mdev);
1003}
1004