linux/drivers/infiniband/hw/mlx5/dm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
   4 */
   5
   6#include <rdma/uverbs_std_types.h>
   7#include "dm.h"
   8
   9#define UVERBS_MODULE_NAME mlx5_ib
  10#include <rdma/uverbs_named_ioctl.h>
  11
  12static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
  13                                u64 length, u32 alignment)
  14{
  15        struct mlx5_core_dev *dev = dm->dev;
  16        u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
  17                                        >> PAGE_SHIFT;
  18        u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
  19        u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
  20        u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
  21        u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
  22        u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
  23        u32 mlx5_alignment;
  24        u64 page_idx = 0;
  25        int ret = 0;
  26
  27        if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
  28                return -EINVAL;
  29
  30        /* mlx5 device sets alignment as 64*2^driver_value
  31         * so normalizing is needed.
  32         */
  33        mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
  34                         alignment - MLX5_MEMIC_BASE_ALIGN;
  35        if (mlx5_alignment > max_alignment)
  36                return -EINVAL;
  37
  38        MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
  39        MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
  40        MLX5_SET(alloc_memic_in, in, memic_size, length);
  41        MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
  42                 mlx5_alignment);
  43
  44        while (page_idx < num_memic_hw_pages) {
  45                spin_lock(&dm->lock);
  46                page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
  47                                                      num_memic_hw_pages,
  48                                                      page_idx,
  49                                                      num_pages, 0);
  50
  51                if (page_idx < num_memic_hw_pages)
  52                        bitmap_set(dm->memic_alloc_pages,
  53                                   page_idx, num_pages);
  54
  55                spin_unlock(&dm->lock);
  56
  57                if (page_idx >= num_memic_hw_pages)
  58                        break;
  59
  60                MLX5_SET64(alloc_memic_in, in, range_start_addr,
  61                           hw_start_addr + (page_idx * PAGE_SIZE));
  62
  63                ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
  64                if (ret) {
  65                        spin_lock(&dm->lock);
  66                        bitmap_clear(dm->memic_alloc_pages,
  67                                     page_idx, num_pages);
  68                        spin_unlock(&dm->lock);
  69
  70                        if (ret == -EAGAIN) {
  71                                page_idx++;
  72                                continue;
  73                        }
  74
  75                        return ret;
  76                }
  77
  78                *addr = dev->bar_addr +
  79                        MLX5_GET64(alloc_memic_out, out, memic_start_addr);
  80
  81                return 0;
  82        }
  83
  84        return -ENOMEM;
  85}
  86
  87void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
  88                            u64 length)
  89{
  90        struct mlx5_core_dev *dev = dm->dev;
  91        u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
  92        u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
  93        u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
  94        u64 start_page_idx;
  95        int err;
  96
  97        addr -= dev->bar_addr;
  98        start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
  99
 100        MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
 101        MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
 102        MLX5_SET(dealloc_memic_in, in, memic_size, length);
 103
 104        err =  mlx5_cmd_exec_in(dev, dealloc_memic, in);
 105        if (err)
 106                return;
 107
 108        spin_lock(&dm->lock);
 109        bitmap_clear(dm->memic_alloc_pages,
 110                     start_page_idx, num_pages);
 111        spin_unlock(&dm->lock);
 112}
 113
 114void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
 115                               u8 operation)
 116{
 117        u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
 118        struct mlx5_core_dev *dev = dm->dev;
 119
 120        MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
 121        MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC);
 122        MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
 123        MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
 124
 125        mlx5_cmd_exec_in(dev, modify_memic, in);
 126}
 127
 128static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
 129                                   u8 operation, phys_addr_t *op_addr)
 130{
 131        u32 out[MLX5_ST_SZ_DW(modify_memic_out)] = {};
 132        u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
 133        struct mlx5_core_dev *dev = dm->dev;
 134        int err;
 135
 136        MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
 137        MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC);
 138        MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
 139        MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
 140
 141        err = mlx5_cmd_exec_inout(dev, modify_memic, in, out);
 142        if (err)
 143                return err;
 144
 145        *op_addr = dev->bar_addr +
 146                   MLX5_GET64(modify_memic_out, out, memic_operation_addr);
 147        return 0;
 148}
 149
 150static int add_dm_mmap_entry(struct ib_ucontext *context,
 151                             struct mlx5_user_mmap_entry *mentry, u8 mmap_flag,
 152                             size_t size, u64 address)
 153{
 154        mentry->mmap_flag = mmap_flag;
 155        mentry->address = address;
 156
 157        return rdma_user_mmap_entry_insert_range(
 158                context, &mentry->rdma_entry, size,
 159                MLX5_IB_MMAP_DEVICE_MEM << 16,
 160                (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
 161}
 162
 163static void mlx5_ib_dm_memic_free(struct kref *kref)
 164{
 165        struct mlx5_ib_dm_memic *dm =
 166                container_of(kref, struct mlx5_ib_dm_memic, ref);
 167        struct mlx5_ib_dev *dev = to_mdev(dm->base.ibdm.device);
 168
 169        mlx5_cmd_dealloc_memic(&dev->dm, dm->base.dev_addr, dm->base.size);
 170        kfree(dm);
 171}
 172
 173static int copy_op_to_user(struct mlx5_ib_dm_op_entry *op_entry,
 174                           struct uverbs_attr_bundle *attrs)
 175{
 176        u64 start_offset;
 177        u16 page_idx;
 178        int err;
 179
 180        page_idx = op_entry->mentry.rdma_entry.start_pgoff & 0xFFFF;
 181        start_offset = op_entry->op_addr & ~PAGE_MASK;
 182        err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
 183                             &page_idx, sizeof(page_idx));
 184        if (err)
 185                return err;
 186
 187        return uverbs_copy_to(attrs,
 188                              MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
 189                              &start_offset, sizeof(start_offset));
 190}
 191
 192static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op,
 193                           struct uverbs_attr_bundle *attrs)
 194{
 195        struct mlx5_ib_dm_op_entry *op_entry;
 196
 197        op_entry = xa_load(&dm->ops, op);
 198        if (!op_entry)
 199                return -ENOENT;
 200
 201        return copy_op_to_user(op_entry, attrs);
 202}
 203
 204static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
 205        struct uverbs_attr_bundle *attrs)
 206{
 207        struct ib_uobject *uobj = uverbs_attr_get_uobject(
 208                attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE);
 209        struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
 210        struct ib_dm *ibdm = uobj->object;
 211        struct mlx5_ib_dm_memic *dm = to_memic(ibdm);
 212        struct mlx5_ib_dm_op_entry *op_entry;
 213        int err;
 214        u8 op;
 215
 216        err = uverbs_copy_from(&op, attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP);
 217        if (err)
 218                return err;
 219
 220        if (op >= BITS_PER_TYPE(u32))
 221                return -EOPNOTSUPP;
 222
 223        if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
 224                return -EOPNOTSUPP;
 225
 226        mutex_lock(&dm->ops_xa_lock);
 227        err = map_existing_op(dm, op, attrs);
 228        if (!err || err != -ENOENT)
 229                goto err_unlock;
 230
 231        op_entry = kzalloc(sizeof(*op_entry), GFP_KERNEL);
 232        if (!op_entry)
 233                goto err_unlock;
 234
 235        err = mlx5_cmd_alloc_memic_op(&dev->dm, dm->base.dev_addr, op,
 236                                      &op_entry->op_addr);
 237        if (err) {
 238                kfree(op_entry);
 239                goto err_unlock;
 240        }
 241        op_entry->op = op;
 242        op_entry->dm = dm;
 243
 244        err = add_dm_mmap_entry(uobj->context, &op_entry->mentry,
 245                                MLX5_IB_MMAP_TYPE_MEMIC_OP, dm->base.size,
 246                                op_entry->op_addr & PAGE_MASK);
 247        if (err) {
 248                mlx5_cmd_dealloc_memic_op(&dev->dm, dm->base.dev_addr, op);
 249                kfree(op_entry);
 250                goto err_unlock;
 251        }
 252        /* From this point, entry will be freed by mmap_free */
 253        kref_get(&dm->ref);
 254
 255        err = copy_op_to_user(op_entry, attrs);
 256        if (err)
 257                goto err_remove;
 258
 259        err = xa_insert(&dm->ops, op, op_entry, GFP_KERNEL);
 260        if (err)
 261                goto err_remove;
 262        mutex_unlock(&dm->ops_xa_lock);
 263
 264        return 0;
 265
 266err_remove:
 267        rdma_user_mmap_entry_remove(&op_entry->mentry.rdma_entry);
 268err_unlock:
 269        mutex_unlock(&dm->ops_xa_lock);
 270
 271        return err;
 272}
 273
 274static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
 275                                           struct ib_dm_alloc_attr *attr,
 276                                           struct uverbs_attr_bundle *attrs)
 277{
 278        struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
 279        struct mlx5_ib_dm_memic *dm;
 280        u64 start_offset;
 281        u16 page_idx;
 282        int err;
 283        u64 address;
 284
 285        if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
 286                return ERR_PTR(-EOPNOTSUPP);
 287
 288        dm = kzalloc(sizeof(*dm), GFP_KERNEL);
 289        if (!dm)
 290                return ERR_PTR(-ENOMEM);
 291
 292        dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC;
 293        dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
 294        dm->base.ibdm.device = ctx->device;
 295
 296        kref_init(&dm->ref);
 297        xa_init(&dm->ops);
 298        mutex_init(&dm->ops_xa_lock);
 299        dm->req_length = attr->length;
 300
 301        err = mlx5_cmd_alloc_memic(dm_db, &dm->base.dev_addr,
 302                                   dm->base.size, attr->alignment);
 303        if (err) {
 304                kfree(dm);
 305                return ERR_PTR(err);
 306        }
 307
 308        address = dm->base.dev_addr & PAGE_MASK;
 309        err = add_dm_mmap_entry(ctx, &dm->mentry, MLX5_IB_MMAP_TYPE_MEMIC,
 310                                dm->base.size, address);
 311        if (err) {
 312                mlx5_cmd_dealloc_memic(dm_db, dm->base.dev_addr, dm->base.size);
 313                kfree(dm);
 314                return ERR_PTR(err);
 315        }
 316
 317        page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
 318        err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
 319                             &page_idx, sizeof(page_idx));
 320        if (err)
 321                goto err_copy;
 322
 323        start_offset = dm->base.dev_addr & ~PAGE_MASK;
 324        err = uverbs_copy_to(attrs,
 325                             MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
 326                             &start_offset, sizeof(start_offset));
 327        if (err)
 328                goto err_copy;
 329
 330        return &dm->base.ibdm;
 331
 332err_copy:
 333        rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
 334        return ERR_PTR(err);
 335}
 336
 337static enum mlx5_sw_icm_type get_icm_type(int uapi_type)
 338{
 339        return uapi_type == MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM ?
 340                       MLX5_SW_ICM_TYPE_STEERING :
 341                       MLX5_SW_ICM_TYPE_HEADER_MODIFY;
 342}
 343
 344static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
 345                                            struct ib_dm_alloc_attr *attr,
 346                                            struct uverbs_attr_bundle *attrs,
 347                                            int type)
 348{
 349        struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
 350        enum mlx5_sw_icm_type icm_type = get_icm_type(type);
 351        struct mlx5_ib_dm_icm *dm;
 352        u64 act_size;
 353        int err;
 354
 355        dm = kzalloc(sizeof(*dm), GFP_KERNEL);
 356        if (!dm)
 357                return ERR_PTR(-ENOMEM);
 358
 359        dm->base.type = type;
 360        dm->base.ibdm.device = ctx->device;
 361
 362        if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW)) {
 363                err = -EPERM;
 364                goto free;
 365        }
 366
 367        if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
 368              MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
 369              MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
 370              MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))) {
 371                err = -EOPNOTSUPP;
 372                goto free;
 373        }
 374
 375        /* Allocation size must a multiple of the basic block size
 376         * and a power of 2.
 377         */
 378        act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
 379        act_size = roundup_pow_of_two(act_size);
 380
 381        dm->base.size = act_size;
 382        err = mlx5_dm_sw_icm_alloc(dev, icm_type, act_size, attr->alignment,
 383                                   to_mucontext(ctx)->devx_uid,
 384                                   &dm->base.dev_addr, &dm->obj_id);
 385        if (err)
 386                goto free;
 387
 388        err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
 389                             &dm->base.dev_addr, sizeof(dm->base.dev_addr));
 390        if (err) {
 391                mlx5_dm_sw_icm_dealloc(dev, icm_type, dm->base.size,
 392                                       to_mucontext(ctx)->devx_uid,
 393                                       dm->base.dev_addr, dm->obj_id);
 394                goto free;
 395        }
 396        return &dm->base.ibdm;
 397free:
 398        kfree(dm);
 399        return ERR_PTR(err);
 400}
 401
 402struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
 403                               struct ib_ucontext *context,
 404                               struct ib_dm_alloc_attr *attr,
 405                               struct uverbs_attr_bundle *attrs)
 406{
 407        enum mlx5_ib_uapi_dm_type type;
 408        int err;
 409
 410        err = uverbs_get_const_default(&type, attrs,
 411                                       MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
 412                                       MLX5_IB_UAPI_DM_TYPE_MEMIC);
 413        if (err)
 414                return ERR_PTR(err);
 415
 416        mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
 417                    type, attr->length, attr->alignment);
 418
 419        switch (type) {
 420        case MLX5_IB_UAPI_DM_TYPE_MEMIC:
 421                return handle_alloc_dm_memic(context, attr, attrs);
 422        case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
 423                return handle_alloc_dm_sw_icm(context, attr, attrs, type);
 424        case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
 425                return handle_alloc_dm_sw_icm(context, attr, attrs, type);
 426        default:
 427                return ERR_PTR(-EOPNOTSUPP);
 428        }
 429}
 430
 431static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm)
 432{
 433        struct mlx5_ib_dm_op_entry *entry;
 434        unsigned long idx;
 435
 436        mutex_lock(&dm->ops_xa_lock);
 437        xa_for_each(&dm->ops, idx, entry) {
 438                xa_erase(&dm->ops, idx);
 439                rdma_user_mmap_entry_remove(&entry->mentry.rdma_entry);
 440        }
 441        mutex_unlock(&dm->ops_xa_lock);
 442}
 443
 444static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm)
 445{
 446        dm_memic_remove_ops(dm);
 447        rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
 448}
 449
 450static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext *ctx,
 451                               struct mlx5_ib_dm_icm *dm)
 452{
 453        enum mlx5_sw_icm_type type = get_icm_type(dm->base.type);
 454        struct mlx5_core_dev *dev = to_mdev(dm->base.ibdm.device)->mdev;
 455        int err;
 456
 457        err = mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, ctx->devx_uid,
 458                                     dm->base.dev_addr, dm->obj_id);
 459        if (!err)
 460                kfree(dm);
 461        return 0;
 462}
 463
 464static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm,
 465                              struct uverbs_attr_bundle *attrs)
 466{
 467        struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
 468                &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
 469        struct mlx5_ib_dm *dm = to_mdm(ibdm);
 470
 471        switch (dm->type) {
 472        case MLX5_IB_UAPI_DM_TYPE_MEMIC:
 473                mlx5_dm_memic_dealloc(to_memic(ibdm));
 474                return 0;
 475        case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
 476        case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
 477                return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm));
 478        default:
 479                return -EOPNOTSUPP;
 480        }
 481}
 482
 483static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)(
 484        struct uverbs_attr_bundle *attrs)
 485{
 486        struct ib_dm *ibdm =
 487                uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE);
 488        struct mlx5_ib_dm *dm = to_mdm(ibdm);
 489        struct mlx5_ib_dm_memic *memic;
 490        u64 start_offset;
 491        u16 page_idx;
 492        int err;
 493
 494        if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC)
 495                return -EOPNOTSUPP;
 496
 497        memic = to_memic(ibdm);
 498        page_idx = memic->mentry.rdma_entry.start_pgoff & 0xFFFF;
 499        err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
 500                             &page_idx, sizeof(page_idx));
 501        if (err)
 502                return err;
 503
 504        start_offset = memic->base.dev_addr & ~PAGE_MASK;
 505        err =  uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
 506                              &start_offset, sizeof(start_offset));
 507        if (err)
 508                return err;
 509
 510        return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
 511                              &memic->req_length,
 512                              sizeof(memic->req_length));
 513}
 514
 515void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
 516                          struct mlx5_user_mmap_entry *mentry)
 517{
 518        struct mlx5_ib_dm_op_entry *op_entry;
 519        struct mlx5_ib_dm_memic *mdm;
 520
 521        switch (mentry->mmap_flag) {
 522        case MLX5_IB_MMAP_TYPE_MEMIC:
 523                mdm = container_of(mentry, struct mlx5_ib_dm_memic, mentry);
 524                kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
 525                break;
 526        case MLX5_IB_MMAP_TYPE_MEMIC_OP:
 527                op_entry = container_of(mentry, struct mlx5_ib_dm_op_entry,
 528                                        mentry);
 529                mdm = op_entry->dm;
 530                mlx5_cmd_dealloc_memic_op(&dev->dm, mdm->base.dev_addr,
 531                                          op_entry->op);
 532                kfree(op_entry);
 533                kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
 534                break;
 535        default:
 536                WARN_ON(true);
 537        }
 538}
 539
 540DECLARE_UVERBS_NAMED_METHOD(
 541        MLX5_IB_METHOD_DM_QUERY,
 542        UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE, UVERBS_OBJECT_DM,
 543                        UVERBS_ACCESS_READ, UA_MANDATORY),
 544        UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
 545                            UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
 546        UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
 547                            UVERBS_ATTR_TYPE(u16), UA_MANDATORY),
 548        UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
 549                            UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
 550
 551ADD_UVERBS_ATTRIBUTES_SIMPLE(
 552        mlx5_ib_dm, UVERBS_OBJECT_DM, UVERBS_METHOD_DM_ALLOC,
 553        UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
 554                            UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
 555        UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
 556                            UVERBS_ATTR_TYPE(u16), UA_OPTIONAL),
 557        UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
 558                             enum mlx5_ib_uapi_dm_type, UA_OPTIONAL));
 559
 560DECLARE_UVERBS_NAMED_METHOD(
 561        MLX5_IB_METHOD_DM_MAP_OP_ADDR,
 562        UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE,
 563                        UVERBS_OBJECT_DM,
 564                        UVERBS_ACCESS_READ,
 565                        UA_MANDATORY),
 566        UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
 567                           UVERBS_ATTR_TYPE(u8),
 568                           UA_MANDATORY),
 569        UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
 570                            UVERBS_ATTR_TYPE(u64),
 571                            UA_MANDATORY),
 572        UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
 573                            UVERBS_ATTR_TYPE(u16),
 574                            UA_OPTIONAL));
 575
 576DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM,
 577                              &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR),
 578                              &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY));
 579
 580const struct uapi_definition mlx5_ib_dm_defs[] = {
 581        UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
 582        UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM),
 583        {},
 584};
 585
 586const struct ib_device_ops mlx5_ib_dev_dm_ops = {
 587        .alloc_dm = mlx5_ib_alloc_dm,
 588        .dealloc_dm = mlx5_ib_dealloc_dm,
 589        .reg_dm_mr = mlx5_ib_reg_dm_mr,
 590};
 591