1
2
3
4
5#include "mlx5_flow_os.h"
6#include "mlx5_win_ext.h"
7
8#include <rte_thread.h>
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28int
29mlx5_flow_os_validate_flow_attributes(struct rte_eth_dev *dev,
30 const struct rte_flow_attr *attributes,
31 bool external,
32 struct rte_flow_error *error)
33{
34 int ret = 1;
35
36 RTE_SET_USED(dev);
37 RTE_SET_USED(external);
38 if (attributes->group)
39 return rte_flow_error_set(error, ENOTSUP,
40 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
41 NULL,
42 "groups are not supported");
43 if (attributes->priority)
44 return rte_flow_error_set(error, ENOTSUP,
45 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
46 NULL,
47 "priorities are not supported");
48 if (attributes->transfer)
49 return rte_flow_error_set(error, ENOTSUP,
50 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
51 NULL,
52 "transfer not supported");
53 if (!(attributes->ingress))
54 return rte_flow_error_set(error, ENOTSUP,
55 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
56 NULL, "must specify ingress only");
57 return ret;
58}
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75int
76mlx5_flow_os_create_flow_matcher(void *ctx,
77 void *attr,
78 void *table,
79 void **matcher)
80{
81 struct mlx5dv_flow_matcher_attr *mattr;
82
83 RTE_SET_USED(table);
84 *matcher = NULL;
85 mattr = attr;
86 if (mattr->type != IBV_FLOW_ATTR_NORMAL) {
87 rte_errno = ENOTSUP;
88 return -rte_errno;
89 }
90 struct mlx5_matcher *mlx5_matcher =
91 mlx5_malloc(MLX5_MEM_ZERO,
92 sizeof(struct mlx5_matcher) +
93 MLX5_ST_SZ_BYTES(fte_match_param),
94 0, SOCKET_ID_ANY);
95 if (!mlx5_matcher) {
96 rte_errno = ENOMEM;
97 return -rte_errno;
98 }
99 mlx5_matcher->ctx = ctx;
100 memcpy(&mlx5_matcher->attr, attr, sizeof(mlx5_matcher->attr));
101 memcpy(&mlx5_matcher->match_buf,
102 mattr->match_mask->match_buf,
103 MLX5_ST_SZ_BYTES(fte_match_param));
104 *matcher = mlx5_matcher;
105 return 0;
106}
107
108
109
110
111
112
113
114
115
116
117int
118mlx5_flow_os_destroy_flow_matcher(void *matcher)
119{
120 mlx5_free(matcher);
121 return 0;
122}
123
124
125
126
127
128
129
130
131
132
133
134
135int
136mlx5_flow_os_create_flow_action_dest_devx_tir(struct mlx5_devx_obj *tir,
137 void **action)
138{
139 struct mlx5_action *mlx5_action =
140 mlx5_malloc(MLX5_MEM_ZERO,
141 sizeof(struct mlx5_action),
142 0, SOCKET_ID_ANY);
143
144 if (!mlx5_action) {
145 rte_errno = ENOMEM;
146 return -rte_errno;
147 }
148 mlx5_action->type = MLX5_FLOW_CONTEXT_DEST_TYPE_TIR;
149 mlx5_action->dest_tir.id = tir->id;
150 *action = mlx5_action;
151 return 0;
152}
153
154
155
156
157
158
159
160
161
162
163int
164mlx5_flow_os_destroy_flow_action(void *action)
165{
166 mlx5_free(action);
167 return 0;
168}
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187int
188mlx5_flow_os_create_flow(void *matcher, void *match_value,
189 size_t num_actions,
190 void *actions[], void **flow)
191{
192 struct mlx5_action *action;
193 size_t i;
194 struct mlx5_matcher *mlx5_matcher = matcher;
195 struct mlx5_flow_dv_match_params *mlx5_match_value = match_value;
196 uint32_t in[MLX5_ST_SZ_DW(devx_fs_rule_add_in)] = {0};
197 void *matcher_c = MLX5_ADDR_OF(devx_fs_rule_add_in, in,
198 match_criteria);
199 void *matcher_v = MLX5_ADDR_OF(devx_fs_rule_add_in, in,
200 match_value);
201
202 MLX5_ASSERT(mlx5_matcher->ctx);
203 memcpy(matcher_c, mlx5_matcher->match_buf,
204 mlx5_match_value->size);
205
206 memcpy(matcher_v, mlx5_match_value->buf,
207 mlx5_match_value->size);
208 for (i = 0; i < num_actions; i++) {
209 action = actions[i];
210 switch (action->type) {
211 case MLX5_FLOW_CONTEXT_DEST_TYPE_TIR:
212 MLX5_SET(devx_fs_rule_add_in, in,
213 dest.destination_type,
214 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
215 MLX5_SET(devx_fs_rule_add_in, in,
216 dest.destination_id,
217 action->dest_tir.id);
218 break;
219 default:
220 break;
221 }
222 MLX5_SET(devx_fs_rule_add_in, in, match_criteria_enable,
223 MLX5_MATCH_OUTER_HEADERS);
224 }
225 *flow = mlx5_glue->devx_fs_rule_add(mlx5_matcher->ctx, in, sizeof(in));
226 return (*flow) ? 0 : -1;
227}
228
229
230
231
232
233
234
235
236
237
238int
239mlx5_flow_os_destroy_flow(void *drv_flow_ptr)
240{
241 return mlx5_glue->devx_fs_rule_del(drv_flow_ptr);
242}
243
244struct mlx5_workspace_thread {
245 HANDLE thread_handle;
246 struct mlx5_flow_workspace *mlx5_ws;
247 struct mlx5_workspace_thread *next;
248};
249
250
251
252
253static struct mlx5_workspace_thread *curr;
254static struct mlx5_workspace_thread *first;
255rte_thread_key ws_tls_index;
256static pthread_mutex_t lock_thread_list;
257
258static bool
259mlx5_is_thread_alive(HANDLE thread_handle)
260{
261 DWORD result = WaitForSingleObject(thread_handle, 0);
262
263 if (result == WAIT_OBJECT_0)
264 return false;
265 return false;
266}
267
268static int
269mlx5_get_current_thread(HANDLE *p_handle)
270{
271 BOOL ret = DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
272 GetCurrentProcess(), p_handle, 0, 0, DUPLICATE_SAME_ACCESS);
273
274 if (!ret) {
275 RTE_LOG_WIN32_ERR("DuplicateHandle()");
276 return -1;
277 }
278 return 0;
279}
280
281static void
282mlx5_clear_thread_list(void)
283{
284 struct mlx5_workspace_thread *temp = first;
285 struct mlx5_workspace_thread *next, *prev = NULL;
286 HANDLE curr_thread;
287
288 if (!temp)
289 return;
290 if (mlx5_get_current_thread(&curr_thread)) {
291 DRV_LOG(ERR, "Failed to get current thread "
292 "handle.");
293 return;
294 }
295 while (temp) {
296 next = temp->next;
297 if (temp->thread_handle != curr_thread &&
298 !mlx5_is_thread_alive(temp->thread_handle)) {
299 if (temp == first) {
300 if (curr == temp)
301 curr = temp->next;
302 first = temp->next;
303 } else if (temp == curr) {
304 curr = prev;
305 }
306 flow_release_workspace(temp->mlx5_ws);
307 CloseHandle(temp->thread_handle);
308 free(temp);
309 if (prev)
310 prev->next = next;
311 temp = next;
312 continue;
313 }
314 prev = temp;
315 temp = temp->next;
316 }
317 CloseHandle(curr_thread);
318}
319
320
321
322
323void
324mlx5_flow_os_release_workspace(void)
325{
326 mlx5_clear_thread_list();
327 if (first) {
328 MLX5_ASSERT(!first->next);
329 flow_release_workspace(first->mlx5_ws);
330 free(first);
331 }
332 rte_thread_key_delete(ws_tls_index);
333 pthread_mutex_destroy(&lock_thread_list);
334}
335
336static int
337mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data)
338{
339 HANDLE curr_thread;
340 struct mlx5_workspace_thread *temp = calloc(1, sizeof(*temp));
341
342 if (!temp) {
343 DRV_LOG(ERR, "Failed to allocate thread workspace "
344 "memory.");
345 return -1;
346 }
347 if (mlx5_get_current_thread(&curr_thread)) {
348 DRV_LOG(ERR, "Failed to get current thread "
349 "handle.");
350 free(temp);
351 return -1;
352 }
353 temp->mlx5_ws = data;
354 temp->thread_handle = curr_thread;
355 pthread_mutex_lock(&lock_thread_list);
356 mlx5_clear_thread_list();
357 if (!first) {
358 first = temp;
359 curr = temp;
360 } else {
361 curr->next = temp;
362 curr = curr->next;
363 }
364 pthread_mutex_unlock(&lock_thread_list);
365 return 0;
366}
367
368int
369mlx5_flow_os_init_workspace_once(void)
370{
371 int err = rte_thread_key_create(&ws_tls_index, NULL);
372
373 if (err) {
374 DRV_LOG(ERR, "Can't create flow workspace data thread key.");
375 return err;
376 }
377 pthread_mutex_init(&lock_thread_list, NULL);
378 return 0;
379}
380
381void *
382mlx5_flow_os_get_specific_workspace(void)
383{
384 return rte_thread_value_get(ws_tls_index);
385}
386
387int
388mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data)
389{
390 int err = 0;
391 int old_err = rte_errno;
392
393 rte_errno = 0;
394 if (!rte_thread_value_get(ws_tls_index)) {
395 if (rte_errno) {
396 DRV_LOG(ERR, "Failed checking specific workspace.");
397 rte_errno = old_err;
398 return -1;
399 }
400
401
402
403
404
405 err = mlx5_add_workspace_to_list(data);
406 if (err) {
407 DRV_LOG(ERR, "Failed adding workspace to list.");
408 rte_errno = old_err;
409 return -1;
410 }
411 }
412 if (rte_thread_value_set(ws_tls_index, data)) {
413 DRV_LOG(ERR, "Failed setting specific workspace.");
414 err = -1;
415 }
416 rte_errno = old_err;
417 return err;
418}
419