1
2
3
4
5#ifndef RTE_PMD_MLX5_VDPA_H_
6#define RTE_PMD_MLX5_VDPA_H_
7
8#include <linux/virtio_net.h>
9#include <sys/queue.h>
10
11#ifdef PEDANTIC
12#pragma GCC diagnostic ignored "-Wpedantic"
13#endif
14#include <rte_vdpa.h>
15#include <vdpa_driver.h>
16#include <rte_vhost.h>
17#ifdef PEDANTIC
18#pragma GCC diagnostic error "-Wpedantic"
19#endif
20#include <rte_spinlock.h>
21#include <rte_interrupts.h>
22
23#include <mlx5_glue.h>
24#include <mlx5_devx_cmds.h>
25#include <mlx5_common_devx.h>
26#include <mlx5_prm.h>
27
28
29#define MLX5_VDPA_INTR_RETRIES 256
30#define MLX5_VDPA_INTR_RETRIES_USEC 1000
31
32#ifndef VIRTIO_F_ORDER_PLATFORM
33#define VIRTIO_F_ORDER_PLATFORM 36
34#endif
35
36#ifndef VIRTIO_F_RING_PACKED
37#define VIRTIO_F_RING_PACKED 34
38#endif
39
40#define MLX5_VDPA_DEFAULT_TIMER_DELAY_US 0u
41#define MLX5_VDPA_DEFAULT_TIMER_STEP_US 1u
42
43struct mlx5_vdpa_cq {
44 uint16_t log_desc_n;
45 uint32_t cq_ci:24;
46 uint32_t arm_sn:2;
47 uint32_t armed:1;
48 int callfd;
49 rte_spinlock_t sl;
50 struct mlx5_devx_cq cq_obj;
51 uint64_t errors;
52};
53
54struct mlx5_vdpa_event_qp {
55 struct mlx5_vdpa_cq cq;
56 struct mlx5_devx_obj *fw_qp;
57 struct mlx5_devx_qp sw_qp;
58 uint16_t qp_pi;
59};
60
61struct mlx5_vdpa_query_mr {
62 union {
63 struct ibv_mr *mr;
64 struct mlx5_devx_obj *mkey;
65 };
66 int is_indirect;
67};
68
69enum {
70 MLX5_VDPA_NOTIFIER_STATE_DISABLED,
71 MLX5_VDPA_NOTIFIER_STATE_ENABLED,
72 MLX5_VDPA_NOTIFIER_STATE_ERR
73};
74
75#define MLX5_VDPA_USED_RING_LEN(size) \
76 ((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
77#define MLX5_VDPA_MAX_C_THRD 256
78#define MLX5_VDPA_MAX_TASKS_PER_THRD 4096
79#define MLX5_VDPA_TASKS_PER_DEV 64
80#define MLX5_VDPA_MAX_MRS 0xFFFF
81
82
83enum mlx5_vdpa_task_type {
84 MLX5_VDPA_TASK_REG_MR = 1,
85 MLX5_VDPA_TASK_SETUP_VIRTQ,
86 MLX5_VDPA_TASK_STOP_VIRTQ,
87 MLX5_VDPA_TASK_DEV_CLOSE_NOWAIT,
88 MLX5_VDPA_TASK_PREPARE_VIRTQ,
89};
90
91
92struct mlx5_vdpa_task {
93 struct mlx5_vdpa_priv *priv;
94 enum mlx5_vdpa_task_type type;
95 uint32_t *remaining_cnt;
96 uint32_t *err_cnt;
97 uint32_t idx;
98} __rte_packed __rte_aligned(4);
99
100
101struct mlx5_vdpa_c_thread {
102 pthread_t tid;
103 struct rte_ring *rng;
104 pthread_cond_t c_cond;
105};
106
107struct mlx5_vdpa_conf_thread_mng {
108 void *initializer_priv;
109 uint32_t refcnt;
110 uint32_t max_thrds;
111 pthread_mutex_t cthrd_lock;
112 struct mlx5_vdpa_c_thread cthrd[MLX5_VDPA_MAX_C_THRD];
113};
114extern struct mlx5_vdpa_conf_thread_mng conf_thread_mng;
115
116struct mlx5_vdpa_vmem_info {
117 struct rte_vhost_memory *vmem;
118 uint32_t entries_num;
119 uint64_t gcd;
120 uint64_t size;
121 uint8_t mode;
122};
123
124struct mlx5_vdpa_virtq {
125 SLIST_ENTRY(mlx5_vdpa_virtq) next;
126 uint16_t index;
127 uint16_t vq_size;
128 uint8_t notifier_state;
129 uint32_t configured:1;
130 uint32_t enable:1;
131 uint32_t stopped:1;
132 uint32_t rx_csum:1;
133 uint32_t virtio_version_1_0:1;
134 uint32_t event_mode:3;
135 uint32_t version;
136 pthread_mutex_t virtq_lock;
137 struct mlx5_vdpa_priv *priv;
138 struct mlx5_devx_obj *virtq;
139 struct mlx5_devx_obj *counters;
140 struct mlx5_vdpa_event_qp eqp;
141 struct {
142 struct mlx5dv_devx_umem *obj;
143 void *buf;
144 uint32_t size;
145 } umems[3];
146 struct rte_intr_handle *intr_handle;
147 uint64_t err_time[3];
148 uint32_t n_retry;
149 struct mlx5_devx_virtio_q_couners_attr stats;
150 struct mlx5_devx_virtio_q_couners_attr reset;
151};
152
153struct mlx5_vdpa_steer {
154 struct mlx5_devx_obj *rqt;
155 void *domain;
156 void *tbl;
157 struct {
158 struct mlx5dv_flow_matcher *matcher;
159 struct mlx5_devx_obj *tir;
160 void *tir_action;
161 void *flow;
162 } rss[7];
163};
164
165enum {
166 MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER,
167 MLX5_VDPA_EVENT_MODE_FIXED_TIMER,
168 MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT
169};
170
171enum mlx5_dev_state {
172 MLX5_VDPA_STATE_PROBED = 0,
173 MLX5_VDPA_STATE_CONFIGURED,
174 MLX5_VDPA_STATE_IN_PROGRESS
175};
176
177struct mlx5_vdpa_priv {
178 TAILQ_ENTRY(mlx5_vdpa_priv) next;
179 bool connected;
180 bool use_c_thread;
181 enum mlx5_dev_state state;
182 rte_spinlock_t db_lock;
183 pthread_mutex_t steer_update_lock;
184 uint64_t no_traffic_counter;
185 pthread_t timer_tid;
186 int event_mode;
187 int event_core;
188 uint32_t event_us;
189 uint32_t timer_delay_us;
190 uint32_t no_traffic_max;
191 uint8_t hw_latency_mode;
192 uint16_t hw_max_latency_us;
193 uint16_t hw_max_pending_comp;
194 uint16_t queue_size;
195 uint16_t queues;
196 struct rte_vdpa_device *vdev;
197 struct mlx5_common_device *cdev;
198 int vid;
199 struct mlx5_hca_vdpa_attr caps;
200 uint32_t gpa_mkey_index;
201 struct ibv_mr *null_mr;
202 struct mlx5_vdpa_vmem_info vmem_info;
203 struct mlx5dv_devx_event_channel *eventc;
204 struct mlx5dv_devx_event_channel *err_chnl;
205 struct mlx5_uar uar;
206 struct rte_intr_handle *err_intr_handle;
207 struct mlx5_devx_obj *td;
208 struct mlx5_devx_obj *tiss[16];
209 uint16_t nr_virtqs;
210 uint8_t num_lag_ports;
211 uint64_t features;
212 uint16_t log_max_rqt_size;
213 uint16_t last_c_thrd_idx;
214 uint16_t dev_close_progress;
215 uint16_t num_mrs;
216 struct mlx5_vdpa_steer steer;
217 struct mlx5dv_var *var;
218 void *virtq_db_addr;
219 struct mlx5_pmd_wrapped_mr lm_mr;
220 struct mlx5_vdpa_query_mr **mrs;
221 struct mlx5_vdpa_virtq virtqs[];
222};
223
224enum {
225 MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
226 MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
227 MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
228 MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
229 MLX5_VDPA_STATS_INVALID_BUFFER,
230 MLX5_VDPA_STATS_COMPLETION_ERRORS,
231 MLX5_VDPA_STATS_MAX
232};
233
234
235
236
237
238
239
240
241
242
243
244static inline uint8_t
245is_virtq_recvq(int virtq_index, int nr_vring)
246{
247 if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
248 return 1;
249 return 0;
250}
251
252
253
254
255
256
257
258void mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv);
259
260
261
262
263
264
265
266
267
268
269
270int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290int
291mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
292 int callfd, struct mlx5_vdpa_virtq *virtq, bool reset);
293
294
295
296
297
298
299
300void mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp);
301
302
303
304
305
306
307
308int
309mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv);
310
311
312
313
314
315
316
317void mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv);
318
319
320
321
322
323
324
325
326
327
328int mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv);
329
330
331
332
333
334
335
336void mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv);
337
338
339
340
341
342
343
344
345
346
347int mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv);
348
349
350
351
352
353
354
355void mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv);
356
357
358
359
360
361
362
363
364
365void
366mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv,
367 bool release_resource);
368
369
370
371
372
373
374
375void mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv);
376
377
378
379
380
381
382
383
384
385
386int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv);
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
402
403
404
405
406
407
408
409void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
410
411
412
413
414
415
416
417
418
419
420
421
422int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv, bool is_dummy);
423
424
425
426
427
428
429
430
431
432
433
434int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv);
435
436
437
438
439
440
441
442
443
444
445
446
447int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable);
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
463 uint64_t log_size);
464
465
466
467
468
469
470
471
472
473
474
475
476int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv);
477
478
479
480
481
482
483
484
485
486
487
488
489int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
490
491
492
493
494
495
496
497
498
499
500
501
502int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
503
504
505
506
507
508
509
510
511
512
513
514
515int mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index);
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533int
534mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
535 struct rte_vdpa_stat *stats, unsigned int n);
536
537
538
539
540
541
542
543
544
545
546
547
548int
549mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
550
551
552
553
554
555
556
557void
558mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv);
559
560bool
561mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv);
562
563
564
565
566
567
568
569
570
571
572int
573mlx5_vdpa_mult_threads_create(int cpu_core);
574
575
576
577
578
579void
580mlx5_vdpa_mult_threads_destroy(bool need_unlock);
581
582bool
583mlx5_vdpa_task_add(struct mlx5_vdpa_priv *priv,
584 uint32_t thrd_idx,
585 enum mlx5_vdpa_task_type task_type,
586 uint32_t *remaining_cnt, uint32_t *err_cnt,
587 void **task_data, uint32_t num);
588int
589mlx5_vdpa_register_mr(struct mlx5_vdpa_priv *priv, uint32_t idx);
590bool
591mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
592 uint32_t *err_cnt, uint32_t sleep_time);
593int
594mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
595void
596mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv);
597void
598mlx5_vdpa_virtq_unreg_intr_handle_all(struct mlx5_vdpa_priv *priv);
599bool
600mlx5_vdpa_virtq_single_resource_prepare(struct mlx5_vdpa_priv *priv,
601 int index);
602int
603mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp);
604void
605mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq);
606void
607mlx5_vdpa_prepare_virtq_destroy(struct mlx5_vdpa_priv *priv);
608#endif
609