dpdk/drivers/vdpa/mlx5/mlx5_vdpa.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright 2019 Mellanox Technologies, Ltd
   3 */
   4
   5#ifndef RTE_PMD_MLX5_VDPA_H_
   6#define RTE_PMD_MLX5_VDPA_H_
   7
   8#include <linux/virtio_net.h>
   9#include <sys/queue.h>
  10
  11#ifdef PEDANTIC
  12#pragma GCC diagnostic ignored "-Wpedantic"
  13#endif
  14#include <rte_vdpa.h>
  15#include <vdpa_driver.h>
  16#include <rte_vhost.h>
  17#ifdef PEDANTIC
  18#pragma GCC diagnostic error "-Wpedantic"
  19#endif
  20#include <rte_spinlock.h>
  21#include <rte_interrupts.h>
  22
  23#include <mlx5_glue.h>
  24#include <mlx5_devx_cmds.h>
  25#include <mlx5_common_devx.h>
  26#include <mlx5_prm.h>
  27
  28
  29#define MLX5_VDPA_INTR_RETRIES 256
  30#define MLX5_VDPA_INTR_RETRIES_USEC 1000
  31
  32#ifndef VIRTIO_F_ORDER_PLATFORM
  33#define VIRTIO_F_ORDER_PLATFORM 36
  34#endif
  35
  36#ifndef VIRTIO_F_RING_PACKED
  37#define VIRTIO_F_RING_PACKED 34
  38#endif
  39
  40#define MLX5_VDPA_DEFAULT_TIMER_DELAY_US 0u
  41#define MLX5_VDPA_DEFAULT_TIMER_STEP_US 1u
  42
  43struct mlx5_vdpa_cq {
  44        uint16_t log_desc_n;
  45        uint32_t cq_ci:24;
  46        uint32_t arm_sn:2;
  47        uint32_t armed:1;
  48        int callfd;
  49        rte_spinlock_t sl;
  50        struct mlx5_devx_cq cq_obj;
  51        uint64_t errors;
  52};
  53
  54struct mlx5_vdpa_event_qp {
  55        struct mlx5_vdpa_cq cq;
  56        struct mlx5_devx_obj *fw_qp;
  57        struct mlx5_devx_qp sw_qp;
  58        uint16_t qp_pi;
  59};
  60
  61struct mlx5_vdpa_query_mr {
  62        union {
  63                struct ibv_mr *mr;
  64                struct mlx5_devx_obj *mkey;
  65        };
  66        int is_indirect;
  67};
  68
  69enum {
  70        MLX5_VDPA_NOTIFIER_STATE_DISABLED,
  71        MLX5_VDPA_NOTIFIER_STATE_ENABLED,
  72        MLX5_VDPA_NOTIFIER_STATE_ERR
  73};
  74
  75#define MLX5_VDPA_USED_RING_LEN(size) \
  76        ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
  77#define MLX5_VDPA_MAX_C_THRD 256
  78#define MLX5_VDPA_MAX_TASKS_PER_THRD 4096
  79#define MLX5_VDPA_TASKS_PER_DEV 64
  80#define MLX5_VDPA_MAX_MRS 0xFFFF
  81
  82/* Vdpa task types. */
  83enum mlx5_vdpa_task_type {
  84        MLX5_VDPA_TASK_REG_MR = 1,
  85        MLX5_VDPA_TASK_SETUP_VIRTQ,
  86        MLX5_VDPA_TASK_STOP_VIRTQ,
  87        MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
  88        MLX5_VDPA_TASK_PREPARE_VIRTQ,
  89};
  90
  91/* Generic task information and size must be multiple of 4B. */
  92struct mlx5_vdpa_task {
  93        struct mlx5_vdpa_priv *priv;
  94        enum mlx5_vdpa_task_type type;
  95        uint32_t *remaining_cnt;
  96        uint32_t *err_cnt;
  97        uint32_t idx;
  98} __rte_packed __rte_aligned(4);
  99
 100/* Generic mlx5_vdpa_c_thread information. */
 101struct mlx5_vdpa_c_thread {
 102        pthread_t tid;
 103        struct rte_ring *rng;
 104        pthread_cond_t c_cond;
 105};
 106
 107struct mlx5_vdpa_conf_thread_mng {
 108        void *initializer_priv;
 109        uint32_t refcnt;
 110        uint32_t max_thrds;
 111        pthread_mutex_t cthrd_lock;
 112        struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
 113};
 114extern struct mlx5_vdpa_conf_thread_mng conf_thread_mng;
 115
 116struct mlx5_vdpa_vmem_info {
 117        struct rte_vhost_memory *vmem;
 118        uint32_t entries_num;
 119        uint64_t gcd;
 120        uint64_t size;
 121        uint8_t mode;
 122};
 123
 124struct mlx5_vdpa_virtq {
 125        SLIST_ENTRY(mlx5_vdpa_virtq) next;
 126        uint16_t index;
 127        uint16_t vq_size;
 128        uint8_t notifier_state;
 129        uint32_t configured:1;
 130        uint32_t enable:1;
 131        uint32_t stopped:1;
 132        uint32_t rx_csum:1;
 133        uint32_t virtio_version_1_0:1;
 134        uint32_t event_mode:3;
 135        uint32_t version;
 136        pthread_mutex_t virtq_lock;
 137        struct mlx5_vdpa_priv *priv;
 138        struct mlx5_devx_obj *virtq;
 139        struct mlx5_devx_obj *counters;
 140        struct mlx5_vdpa_event_qp eqp;
 141        struct {
 142                struct mlx5dv_devx_umem *obj;
 143                void *buf;
 144                uint32_t size;
 145        } umems[3];
 146        struct rte_intr_handle *intr_handle;
 147        uint64_t err_time[3]; /* RDTSC time of recent errors. */
 148        uint32_t n_retry;
 149        struct mlx5_devx_virtio_q_couners_attr stats;
 150        struct mlx5_devx_virtio_q_couners_attr reset;
 151};
 152
 153struct mlx5_vdpa_steer {
 154        struct mlx5_devx_obj *rqt;
 155        void *domain;
 156        void *tbl;
 157        struct {
 158                struct mlx5dv_flow_matcher *matcher;
 159                struct mlx5_devx_obj *tir;
 160                void *tir_action;
 161                void *flow;
 162        } rss[7];
 163};
 164
 165enum {
 166        MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER,
 167        MLX5_VDPA_EVENT_MODE_FIXED_TIMER,
 168        MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT
 169};
 170
 171enum mlx5_dev_state {
 172        MLX5_VDPA_STATE_PROBED = 0,
 173        MLX5_VDPA_STATE_CONFIGURED,
 174        MLX5_VDPA_STATE_IN_PROGRESS /* Shutting down. */
 175};
 176
 177struct mlx5_vdpa_priv {
 178        TAILQ_ENTRY(mlx5_vdpa_priv) next;
 179        bool connected;
 180        bool use_c_thread;
 181        enum mlx5_dev_state state;
 182        rte_spinlock_t db_lock;
 183        pthread_mutex_t steer_update_lock;
 184        uint64_t no_traffic_counter;
 185        pthread_t timer_tid;
 186        int event_mode;
 187        int event_core; /* Event thread cpu affinity core. */
 188        uint32_t event_us;
 189        uint32_t timer_delay_us;
 190        uint32_t no_traffic_max;
 191        uint8_t hw_latency_mode; /* Hardware CQ moderation mode. */
 192        uint16_t hw_max_latency_us; /* Hardware CQ moderation period in usec. */
 193        uint16_t hw_max_pending_comp; /* Hardware CQ moderation counter. */
 194        uint16_t queue_size; /* virtq depth for pre-creating virtq resource */
 195        uint16_t queues; /* Max virtq pair for pre-creating virtq resource */
 196        struct rte_vdpa_device *vdev; /* vDPA device. */
 197        struct mlx5_common_device *cdev; /* Backend mlx5 device. */
 198        int vid; /* vhost device id. */
 199        struct mlx5_hca_vdpa_attr caps;
 200        uint32_t gpa_mkey_index;
 201        struct ibv_mr *null_mr;
 202        struct mlx5_vdpa_vmem_info vmem_info;
 203        struct mlx5dv_devx_event_channel *eventc;
 204        struct mlx5dv_devx_event_channel *err_chnl;
 205        struct mlx5_uar uar;
 206        struct rte_intr_handle *err_intr_handle;
 207        struct mlx5_devx_obj *td;
 208        struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
 209        uint16_t nr_virtqs;
 210        uint8_t num_lag_ports;
 211        uint64_t features; /* Negotiated features. */
 212        uint16_t log_max_rqt_size;
 213        uint16_t last_c_thrd_idx;
 214        uint16_t dev_close_progress;
 215        uint16_t num_mrs; /* Number of memory regions. */
 216        struct mlx5_vdpa_steer steer;
 217        struct mlx5dv_var *var;
 218        void *virtq_db_addr;
 219        struct mlx5_pmd_wrapped_mr lm_mr;
 220        struct mlx5_vdpa_query_mr **mrs;
 221        struct mlx5_vdpa_virtq virtqs[];
 222};
 223
 224enum {
 225        MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
 226        MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
 227        MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
 228        MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
 229        MLX5_VDPA_STATS_INVALID_BUFFER,
 230        MLX5_VDPA_STATS_COMPLETION_ERRORS,
 231        MLX5_VDPA_STATS_MAX
 232};
 233
 234/*
 235 * Check whether virtq is for traffic receive.
 236 * According to VIRTIO_NET Spec the virtqueues index identity its type by:
 237 * 0 receiveq1
 238 * 1 transmitq1
 239 * ...
 240 * 2(N-1) receiveqN
 241 * 2(N-1)+1 transmitqN
 242 * 2N controlq
 243 */
 244static inline uint8_t
 245is_virtq_recvq(int virtq_index, int nr_vring)
 246{
 247        if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
 248                return 1;
 249        return 0;
 250}
 251
 252/**
 253 * Release all the prepared memory regions and all their related resources.
 254 *
 255 * @param[in] priv
 256 *   The vdpa driver private structure.
 257 */
 258void mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv);
 259
 260/**
 261 * Register all the memory regions of the virtio device to the HW and allocate
 262 * all their related resources.
 263 *
 264 * @param[in] priv
 265 *   The vdpa driver private structure.
 266 *
 267 * @return
 268 *   0 on success, a negative errno value otherwise and rte_errno is set.
 269 */
 270int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
 271
 272
 273/**
 274 * Create an event QP and all its related resources.
 275 *
 276 * @param[in] priv
 277 *   The vdpa driver private structure.
 278 * @param[in] desc_n
 279 *   Number of descriptors.
 280 * @param[in] callfd
 281 *   The guest notification file descriptor.
 282 * @param[in/out] virtq
 283 *   Pointer to the virt-queue structure.
 284 * @param[in] reset
 285 *   If true, it will reset event qp.
 286 *
 287 * @return
 288 *   0 on success, -1 otherwise and rte_errno is set.
 289 */
 290int
 291mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
 292        int callfd, struct mlx5_vdpa_virtq *virtq, bool reset);
 293
 294/**
 295 * Destroy an event QP and all its related resources.
 296 *
 297 * @param[in/out] eqp
 298 *   Pointer to the event QP structure.
 299 */
 300void mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp);
 301
 302/**
 303 * Create all the event global resources.
 304 *
 305 * @param[in] priv
 306 *   The vdpa driver private structure.
 307 */
 308int
 309mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv);
 310
 311/**
 312 * Release all the event global resources.
 313 *
 314 * @param[in] priv
 315 *   The vdpa driver private structure.
 316 */
 317void mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv);
 318
 319/**
 320 * Setup CQE event.
 321 *
 322 * @param[in] priv
 323 *   The vdpa driver private structure.
 324 *
 325 * @return
 326 *   0 on success, a negative errno value otherwise and rte_errno is set.
 327 */
 328int mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv);
 329
 330/**
 331 * Unset CQE event .
 332 *
 333 * @param[in] priv
 334 *   The vdpa driver private structure.
 335 */
 336void mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv);
 337
 338/**
 339 * Setup error interrupt handler.
 340 *
 341 * @param[in] priv
 342 *   The vdpa driver private structure.
 343 *
 344 * @return
 345 *   0 on success, a negative errno value otherwise and rte_errno is set.
 346 */
 347int mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv);
 348
 349/**
 350 * Unset error event handler.
 351 *
 352 * @param[in] priv
 353 *   The vdpa driver private structure.
 354 */
 355void mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv);
 356
 357/**
 358 * Release virtqs and resources except that to be reused.
 359 *
 360 * @param[in] priv
 361 *   The vdpa driver private structure.
 362 * @param[in] release_resource
 363 *   The vdpa driver release resource without prepare resource.
 364 */
 365void
 366mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv,
 367                bool release_resource);
 368
 369/**
 370 * Cleanup cached resources of all virtqs.
 371 *
 372 * @param[in] priv
 373 *   The vdpa driver private structure.
 374 */
 375void mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv);
 376
 377/**
 378 * Create all the HW virtqs resources and all their related resources.
 379 *
 380 * @param[in] priv
 381 *   The vdpa driver private structure.
 382 *
 383 * @return
 384 *   0 on success, a negative errno value otherwise and rte_errno is set.
 385 */
 386int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv);
 387
 388/**
 389 * Enable\Disable virtq..
 390 *
 391 * @param[in] priv
 392 *   The vdpa driver private structure.
 393 * @param[in] index
 394 *   The virtq index.
 395 * @param[in] enable
 396 *   Set to enable, otherwise disable.
 397 *
 398 * @return
 399 *   0 on success, a negative value otherwise.
 400 */
 401int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
 402
 403/**
 404 * Unset steering - stop traffic.
 405 *
 406 * @param[in] priv
 407 *   The vdpa driver private structure.
 408 */
 409void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
 410
 411/**
 412 * Update steering according to the received queues status.
 413 *
 414 * @param[in] priv
 415 *   The vdpa driver private structure.
 416 * @param[in] is_dummy
 417 *   If set, it is updated with dummy queue for prepare resource.
 418 *
 419 * @return
 420 *   0 on success, a negative value otherwise.
 421 */
 422int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv, bool is_dummy);
 423
 424/**
 425 * Setup steering and all its related resources to enable RSS traffic from the
 426 * device to all the Rx host queues.
 427 *
 428 * @param[in] priv
 429 *   The vdpa driver private structure.
 430 *
 431 * @return
 432 *   0 on success, a negative value otherwise.
 433 */
 434int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv);
 435
 436/**
 437 * Enable\Disable live migration logging.
 438 *
 439 * @param[in] priv
 440 *   The vdpa driver private structure.
 441 * @param[in] enable
 442 *   Set for enable, unset for disable.
 443 *
 444 * @return
 445 *   0 on success, a negative value otherwise.
 446 */
 447int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable);
 448
 449/**
 450 * Set dirty bitmap logging to allow live migration.
 451 *
 452 * @param[in] priv
 453 *   The vdpa driver private structure.
 454 * @param[in] log_base
 455 *   Vhost log base.
 456 * @param[in] log_size
 457 *   Vhost log size.
 458 *
 459 * @return
 460 *   0 on success, a negative value otherwise.
 461 */
 462int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
 463                               uint64_t log_size);
 464
 465/**
 466 * Log all virtqs information for live migration.
 467 *
 468 * @param[in] priv
 469 *   The vdpa driver private structure.
 470 * @param[in] enable
 471 *   Set for enable, unset for disable.
 472 *
 473 * @return
 474 *   0 on success, a negative value otherwise.
 475 */
 476int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv);
 477
 478/**
 479 * Modify virtq state to be ready or suspend.
 480 *
 481 * @param[in] virtq
 482 *   The vdpa driver private virtq structure.
 483 * @param[in] state
 484 *   Set for ready, otherwise suspend.
 485 *
 486 * @return
 487 *   0 on success, a negative value otherwise.
 488 */
 489int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
 490
 491/**
 492 * Stop virtq before destroying it.
 493 *
 494 * @param[in] priv
 495 *   The vdpa driver private structure.
 496 * @param[in] index
 497 *   The virtq index.
 498 *
 499 * @return
 500 *   0 on success, a negative value otherwise.
 501 */
 502int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
 503
 504/**
 505 * Query virtq information.
 506 *
 507 * @param[in] priv
 508 *   The vdpa driver private structure.
 509 * @param[in] index
 510 *   The virtq index.
 511 *
 512 * @return
 513 *   0 on success, a negative value otherwise.
 514 */
 515int mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index);
 516
 517/**
 518 * Get virtq statistics.
 519 *
 520 * @param[in] priv
 521 *   The vdpa driver private structure.
 522 * @param[in] qid
 523 *   The virtq index.
 524 * @param stats
 525 *   The virtq statistics array to fill.
 526 * @param n
 527 *   The number of elements in @p stats array.
 528 *
 529 * @return
 530 *   A negative value on error, otherwise the number of entries filled in the
 531 *   @p stats array.
 532 */
 533int
 534mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
 535                          struct rte_vdpa_stat *stats, unsigned int n);
 536
 537/**
 538 * Reset virtq statistics.
 539 *
 540 * @param[in] priv
 541 *   The vdpa driver private structure.
 542 * @param[in] qid
 543 *   The virtq index.
 544 *
 545 * @return
 546 *   A negative value on error, otherwise 0.
 547 */
 548int
 549mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
 550
 551/**
 552 * Drain virtq CQ CQE.
 553 *
 554 * @param[in] priv
 555 *   The vdpa driver private structure.
 556 */
 557void
 558mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv);
 559
 560bool
 561mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv);
 562
 563/**
 564 * Create configuration multi-threads resource
 565 *
 566 * @param[in] cpu_core
 567 *   CPU core number to set configuration threads affinity to.
 568 *
 569 * @return
 570 *   0 on success, a negative value otherwise.
 571 */
 572int
 573mlx5_vdpa_mult_threads_create(int cpu_core);
 574
 575/**
 576 * Destroy configuration multi-threads resource
 577 *
 578 */
 579void
 580mlx5_vdpa_mult_threads_destroy(bool need_unlock);
 581
 582bool
 583mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
 584                uint32_t thrd_idx,
 585                enum mlx5_vdpa_task_type task_type,
 586                uint32_t *remaining_cnt, uint32_t *err_cnt,
 587                void **task_data, uint32_t num);
 588int
 589mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
 590bool
 591mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
 592                uint32_t *err_cnt, uint32_t sleep_time);
 593int
 594mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
 595void
 596mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv);
 597void
 598mlx5_vdpa_virtq_unreg_intr_handle_all(struct mlx5_vdpa_priv *priv);
 599bool
 600mlx5_vdpa_virtq_single_resource_prepare(struct mlx5_vdpa_priv *priv,
 601                int index);
 602int
 603mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp);
 604void
 605mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq);
 606void
 607mlx5_vdpa_prepare_virtq_destroy(struct mlx5_vdpa_priv *priv);
 608#endif /* RTE_PMD_MLX5_VDPA_H_ */
 609