1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/rhashtable.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/fs_helpers.h>
37#include <linux/mlx5/fs.h>
38#include <linux/rbtree.h>
39
40#include "mlx5_core.h"
41#include "fs_cmd.h"
42#include "fpga/ipsec.h"
43#include "fpga/sdk.h"
44#include "fpga/core.h"
45
46#define SBU_QP_QUEUE_SIZE 8
47#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000)
48
49enum mlx5_fpga_ipsec_cmd_status {
50 MLX5_FPGA_IPSEC_CMD_PENDING,
51 MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
52 MLX5_FPGA_IPSEC_CMD_COMPLETE,
53};
54
55struct mlx5_fpga_ipsec_cmd_context {
56 struct mlx5_fpga_dma_buf buf;
57 enum mlx5_fpga_ipsec_cmd_status status;
58 struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
59 int status_code;
60 struct completion complete;
61 struct mlx5_fpga_device *dev;
62 struct list_head list;
63 u8 command[0];
64};
65
66struct mlx5_fpga_esp_xfrm;
67
68struct mlx5_fpga_ipsec_sa_ctx {
69 struct rhash_head hash;
70 struct mlx5_ifc_fpga_ipsec_sa hw_sa;
71 struct mlx5_core_dev *dev;
72 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
73};
74
75struct mlx5_fpga_esp_xfrm {
76 unsigned int num_rules;
77 struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
78 struct mutex lock;
79 struct mlx5_accel_esp_xfrm accel_xfrm;
80};
81
82struct mlx5_fpga_ipsec_rule {
83 struct rb_node node;
84 struct fs_fte *fte;
85 struct mlx5_fpga_ipsec_sa_ctx *ctx;
86};
87
88static const struct rhashtable_params rhash_sa = {
89 .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
90 .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
91 .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
92 .automatic_shrinking = true,
93 .min_size = 1,
94};
95
96struct mlx5_fpga_ipsec {
97 struct mlx5_fpga_device *fdev;
98 struct list_head pending_cmds;
99 spinlock_t pending_cmds_lock;
100 u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
101 struct mlx5_fpga_conn *conn;
102
103 struct notifier_block fs_notifier_ingress_bypass;
104 struct notifier_block fs_notifier_egress;
105
106
107
108
109
110
111 struct rhashtable sa_hash;
112 struct mutex sa_hash_lock;
113
114
115
116
117 struct rb_root rules_rb;
118 struct mutex rules_rb_lock;
119};
120
121static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
122{
123 if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
124 return false;
125
126 if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
127 MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
128 return false;
129
130 if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
131 MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
132 return false;
133
134 return true;
135}
136
137static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
138 struct mlx5_fpga_device *fdev,
139 struct mlx5_fpga_dma_buf *buf,
140 u8 status)
141{
142 struct mlx5_fpga_ipsec_cmd_context *context;
143
144 if (status) {
145 context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
146 buf);
147 mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
148 status);
149 context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
150 complete(&context->complete);
151 }
152}
153
154static inline
155int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
156{
157 switch (syndrome) {
158 case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
159 return 0;
160 case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
161 return -EEXIST;
162 case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
163 return -EINVAL;
164 case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
165 return -EIO;
166 }
167 return -EIO;
168}
169
170static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
171{
172 struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
173 struct mlx5_fpga_ipsec_cmd_context *context;
174 enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
175 struct mlx5_fpga_device *fdev = cb_arg;
176 unsigned long flags;
177
178 if (buf->sg[0].size < sizeof(*resp)) {
179 mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
180 buf->sg[0].size, sizeof(*resp));
181 return;
182 }
183
184 mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
185 ntohl(resp->syndrome));
186
187 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
188 context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
189 struct mlx5_fpga_ipsec_cmd_context,
190 list);
191 if (context)
192 list_del(&context->list);
193 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
194
195 if (!context) {
196 mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
197 return;
198 }
199 mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
200
201 syndrome = ntohl(resp->syndrome);
202 context->status_code = syndrome_to_errno(syndrome);
203 context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
204 memcpy(&context->resp, resp, sizeof(*resp));
205
206 if (context->status_code)
207 mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
208 syndrome);
209
210 complete(&context->complete);
211}
212
213static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
214 const void *cmd, int cmd_size)
215{
216 struct mlx5_fpga_ipsec_cmd_context *context;
217 struct mlx5_fpga_device *fdev = mdev->fpga;
218 unsigned long flags;
219 int res;
220
221 if (!fdev || !fdev->ipsec)
222 return ERR_PTR(-EOPNOTSUPP);
223
224 if (cmd_size & 3)
225 return ERR_PTR(-EINVAL);
226
227 context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
228 if (!context)
229 return ERR_PTR(-ENOMEM);
230
231 context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
232 context->dev = fdev;
233 context->buf.complete = mlx5_fpga_ipsec_send_complete;
234 init_completion(&context->complete);
235 memcpy(&context->command, cmd, cmd_size);
236 context->buf.sg[0].size = cmd_size;
237 context->buf.sg[0].data = &context->command;
238
239 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
240 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
241 if (!res)
242 list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
243 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
244
245 if (res) {
246 mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
247 kfree(context);
248 return ERR_PTR(res);
249 }
250
251
252 return context;
253}
254
255static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
256{
257 struct mlx5_fpga_ipsec_cmd_context *context = ctx;
258 unsigned long timeout =
259 msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC);
260 int res;
261
262 res = wait_for_completion_timeout(&context->complete, timeout);
263 if (!res) {
264 mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
265 return -ETIMEDOUT;
266 }
267
268 if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
269 res = context->status_code;
270 else
271 res = -EIO;
272
273 return res;
274}
275
276static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
277{
278 if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
279 return true;
280 return false;
281}
282
283static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
284 struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
285 int opcode)
286{
287 struct mlx5_core_dev *dev = fdev->mdev;
288 struct mlx5_ifc_fpga_ipsec_sa *sa;
289 struct mlx5_fpga_ipsec_cmd_context *cmd_context;
290 size_t sa_cmd_size;
291 int err;
292
293 hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
294 if (is_v2_sadb_supported(fdev->ipsec))
295 sa_cmd_size = sizeof(*hw_sa);
296 else
297 sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
298
299 cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
300 mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
301 if (IS_ERR(cmd_context))
302 return PTR_ERR(cmd_context);
303
304 err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
305 if (err)
306 goto out;
307
308 sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
309 if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
310 mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
311 ntohl(sa->ipsec_sa_v1.sw_sa_handle),
312 ntohl(cmd_context->resp.sw_sa_handle));
313 err = -EIO;
314 }
315
316out:
317 kfree(cmd_context);
318 return err;
319}
320
321u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
322{
323 struct mlx5_fpga_device *fdev = mdev->fpga;
324 u32 ret = 0;
325
326 if (mlx5_fpga_is_ipsec_device(mdev)) {
327 ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
328 ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
329 } else {
330 return ret;
331 }
332
333 if (!fdev->ipsec)
334 return ret;
335
336 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
337 ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
338
339 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
340 ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
341
342 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
343 ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
344
345 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
346 ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
347
348 if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
349 ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
350 ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
351 }
352
353 return ret;
354}
355
356unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
357{
358 struct mlx5_fpga_device *fdev = mdev->fpga;
359
360 if (!fdev || !fdev->ipsec)
361 return 0;
362
363 return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
364 number_of_ipsec_counters);
365}
366
367int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
368 unsigned int counters_count)
369{
370 struct mlx5_fpga_device *fdev = mdev->fpga;
371 unsigned int i;
372 __be32 *data;
373 u32 count;
374 u64 addr;
375 int ret;
376
377 if (!fdev || !fdev->ipsec)
378 return 0;
379
380 addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
381 ipsec_counters_addr_low) +
382 ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
383 ipsec_counters_addr_high) << 32);
384
385 count = mlx5_fpga_ipsec_counters_count(mdev);
386
387 data = kzalloc(sizeof(*data) * count * 2, GFP_KERNEL);
388 if (!data) {
389 ret = -ENOMEM;
390 goto out;
391 }
392
393 ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
394 MLX5_FPGA_ACCESS_TYPE_DONTCARE);
395 if (ret < 0) {
396 mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
397 ret);
398 goto out;
399 }
400 ret = 0;
401
402 if (count > counters_count)
403 count = counters_count;
404
405
406 for (i = 0; i < count; i++)
407 counters[i] = (u64)ntohl(data[i * 2]) |
408 ((u64)ntohl(data[i * 2 + 1]) << 32);
409
410out:
411 kfree(data);
412 return ret;
413}
414
415static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
416{
417 struct mlx5_fpga_ipsec_cmd_context *context;
418 struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
419 int err;
420
421 cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
422 cmd.flags = htonl(flags);
423 context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
424 if (IS_ERR(context)) {
425 err = PTR_ERR(context);
426 goto out;
427 }
428
429 err = mlx5_fpga_ipsec_cmd_wait(context);
430 if (err)
431 goto out;
432
433 if ((context->resp.flags & cmd.flags) != cmd.flags) {
434 mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
435 cmd.flags,
436 context->resp.flags);
437 err = -EIO;
438 }
439
440out:
441 return err;
442}
443
444static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
445{
446 u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
447 u32 flags = 0;
448
449 if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
450 flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
451
452 return mlx5_fpga_ipsec_set_caps(mdev, flags);
453}
454
455static void
456mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
457 const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
458 struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
459{
460 const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
461
462
463 memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
464 aes_gcm->key_len / 8);
465
466 if (aes_gcm->key_len == 128)
467 memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
468 aes_gcm->aes_key, aes_gcm->key_len / 8);
469
470
471 memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
472 sizeof(aes_gcm->seq_iv));
473 memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
474 sizeof(aes_gcm->salt));
475
476
477 if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
478 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
479 hw_sa->ipsec_sa_v1.flags |=
480 (xfrm_attrs->flags &
481 MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
482 MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
483 hw_sa->esn = htonl(xfrm_attrs->esn);
484 } else {
485 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
486 hw_sa->ipsec_sa_v1.flags &=
487 ~(xfrm_attrs->flags &
488 MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
489 MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
490 hw_sa->esn = 0;
491 }
492
493
494 hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
495
496
497 switch (aes_gcm->key_len) {
498 case 128:
499 hw_sa->ipsec_sa_v1.enc_mode =
500 MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
501 break;
502 case 256:
503 hw_sa->ipsec_sa_v1.enc_mode =
504 MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
505 break;
506 }
507
508
509 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
510 MLX5_FPGA_IPSEC_SA_SPI_EN |
511 MLX5_FPGA_IPSEC_SA_IP_ESP;
512
513 if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
514 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
515 else
516 hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
517}
518
519static void
520mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
521 struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
522 const __be32 saddr[4],
523 const __be32 daddr[4],
524 const __be32 spi, bool is_ipv6,
525 struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
526{
527 mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
528
529
530 memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
531 memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
532
533
534 hw_sa->ipsec_sa_v1.spi = spi;
535
536
537 if (is_ipv6)
538 hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
539}
540
541static bool is_full_mask(const void *p, size_t len)
542{
543 WARN_ON(len % 4);
544
545 return !memchr_inv(p, 0xff, len);
546}
547
548static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
549 const u32 *match_c,
550 const u32 *match_v)
551{
552 const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
553 match_c,
554 misc_parameters);
555 const void *headers_c = MLX5_ADDR_OF(fte_match_param,
556 match_c,
557 outer_headers);
558 const void *headers_v = MLX5_ADDR_OF(fte_match_param,
559 match_v,
560 outer_headers);
561
562 if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
563 const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
564 headers_c,
565 src_ipv4_src_ipv6.ipv4_layout.ipv4);
566 const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
567 headers_c,
568 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
569
570 if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
571 ipv4)) ||
572 !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
573 ipv4)))
574 return false;
575 } else {
576 const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
577 headers_c,
578 src_ipv4_src_ipv6.ipv6_layout.ipv6);
579 const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
580 headers_c,
581 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
582
583 if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
584 ipv6)) ||
585 !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
586 ipv6)))
587 return false;
588 }
589
590 if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
591 outer_esp_spi),
592 MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
593 return false;
594
595 return true;
596}
597
598static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
599 u8 match_criteria_enable,
600 const u32 *match_c,
601 const u32 *match_v)
602{
603 u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
604 bool ipv6_flow;
605
606 ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
607
608 if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
609 mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
610 mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
611 mlx5_fs_is_vxlan_flow(match_c) ||
612 !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
613 ipv6_flow))
614 return false;
615
616 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
617 return false;
618
619 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
620 mlx5_fs_is_outer_ipsec_flow(match_c))
621 return false;
622
623 if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
624 ipv6_flow)
625 return false;
626
627 if (!validate_fpga_full_mask(dev, match_c, match_v))
628 return false;
629
630 return true;
631}
632
633static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
634 u8 match_criteria_enable,
635 const u32 *match_c,
636 const u32 *match_v,
637 struct mlx5_flow_act *flow_act)
638{
639 const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
640 outer_headers);
641 bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
642 MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
643 bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
644 MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
645 int ret;
646
647 ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
648 match_v);
649 if (!ret)
650 return ret;
651
652 if (is_dmac || is_smac ||
653 (match_criteria_enable &
654 ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
655 (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
656 flow_act->has_flow_tag)
657 return false;
658
659 return true;
660}
661
662void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
663 struct mlx5_accel_esp_xfrm *accel_xfrm,
664 const __be32 saddr[4],
665 const __be32 daddr[4],
666 const __be32 spi, bool is_ipv6)
667{
668 struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
669 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
670 container_of(accel_xfrm, typeof(*fpga_xfrm),
671 accel_xfrm);
672 struct mlx5_fpga_device *fdev = mdev->fpga;
673 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
674 int opcode, err;
675 void *context;
676
677
678 sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
679 if (!sa_ctx)
680 return ERR_PTR(-ENOMEM);
681
682 sa_ctx->dev = mdev;
683
684
685 mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
686 saddr, daddr, spi, is_ipv6,
687 &sa_ctx->hw_sa);
688
689 mutex_lock(&fpga_xfrm->lock);
690
691 if (fpga_xfrm->sa_ctx) {
692
693 if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
694 sizeof(sa_ctx->hw_sa))) {
695 context = ERR_PTR(-EINVAL);
696 goto exists;
697 }
698
699 ++fpga_xfrm->num_rules;
700 context = fpga_xfrm->sa_ctx;
701 goto exists;
702 }
703
704
705 mutex_lock(&fipsec->sa_hash_lock);
706
707 err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
708 rhash_sa);
709 if (err) {
710
711
712
713
714 context = ERR_PTR(-EEXIST);
715 goto unlock_hash;
716 }
717
718
719 opcode = is_v2_sadb_supported(fdev->ipsec) ?
720 MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
721 MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
722 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
723 sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
724 if (err) {
725 context = ERR_PTR(err);
726 goto delete_hash;
727 }
728
729 mutex_unlock(&fipsec->sa_hash_lock);
730
731 ++fpga_xfrm->num_rules;
732 fpga_xfrm->sa_ctx = sa_ctx;
733 sa_ctx->fpga_xfrm = fpga_xfrm;
734
735 mutex_unlock(&fpga_xfrm->lock);
736
737 return sa_ctx;
738
739delete_hash:
740 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
741 rhash_sa));
742unlock_hash:
743 mutex_unlock(&fipsec->sa_hash_lock);
744
745exists:
746 mutex_unlock(&fpga_xfrm->lock);
747 kfree(sa_ctx);
748 return context;
749}
750
751static void *
752mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
753 struct fs_fte *fte,
754 bool is_egress)
755{
756 struct mlx5_accel_esp_xfrm *accel_xfrm;
757 __be32 saddr[4], daddr[4], spi;
758 struct mlx5_flow_group *fg;
759 bool is_ipv6 = false;
760
761 fs_get_obj(fg, fte->node.parent);
762
763 if (is_egress &&
764 !mlx5_is_fpga_egress_ipsec_rule(mdev,
765 fg->mask.match_criteria_enable,
766 fg->mask.match_criteria,
767 fte->val,
768 &fte->action))
769 return ERR_PTR(-EINVAL);
770 else if (!mlx5_is_fpga_ipsec_rule(mdev,
771 fg->mask.match_criteria_enable,
772 fg->mask.match_criteria,
773 fte->val))
774 return ERR_PTR(-EINVAL);
775
776
777 accel_xfrm =
778 (struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
779
780
781 if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
782 fte->val)) {
783 memcpy(&saddr[3],
784 MLX5_ADDR_OF(fte_match_set_lyr_2_4,
785 fte->val,
786 src_ipv4_src_ipv6.ipv4_layout.ipv4),
787 sizeof(saddr[3]));
788 memcpy(&daddr[3],
789 MLX5_ADDR_OF(fte_match_set_lyr_2_4,
790 fte->val,
791 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
792 sizeof(daddr[3]));
793 } else {
794 memcpy(saddr,
795 MLX5_ADDR_OF(fte_match_param,
796 fte->val,
797 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
798 sizeof(saddr));
799 memcpy(daddr,
800 MLX5_ADDR_OF(fte_match_param,
801 fte->val,
802 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
803 sizeof(daddr));
804 is_ipv6 = true;
805 }
806
807
808 spi = MLX5_GET_BE(typeof(spi),
809 fte_match_param, fte->val,
810 misc_parameters.outer_esp_spi);
811
812
813 return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
814 saddr, daddr,
815 spi, is_ipv6);
816}
817
818static void
819mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
820{
821 struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
822 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
823 int opcode = is_v2_sadb_supported(fdev->ipsec) ?
824 MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
825 MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
826 int err;
827
828 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
829 sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
830 if (err) {
831 WARN_ON(err);
832 return;
833 }
834
835 mutex_lock(&fipsec->sa_hash_lock);
836 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
837 rhash_sa));
838 mutex_unlock(&fipsec->sa_hash_lock);
839}
840
841void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
842{
843 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
844 ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
845
846 mutex_lock(&fpga_xfrm->lock);
847 if (!--fpga_xfrm->num_rules) {
848 mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
849 fpga_xfrm->sa_ctx = NULL;
850 }
851 mutex_unlock(&fpga_xfrm->lock);
852}
853
854static inline struct mlx5_fpga_ipsec_rule *
855_rule_search(struct rb_root *root, struct fs_fte *fte)
856{
857 struct rb_node *node = root->rb_node;
858
859 while (node) {
860 struct mlx5_fpga_ipsec_rule *rule =
861 container_of(node, struct mlx5_fpga_ipsec_rule,
862 node);
863
864 if (rule->fte < fte)
865 node = node->rb_left;
866 else if (rule->fte > fte)
867 node = node->rb_right;
868 else
869 return rule;
870 }
871 return NULL;
872}
873
874static struct mlx5_fpga_ipsec_rule *
875rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
876{
877 struct mlx5_fpga_ipsec_rule *rule;
878
879 mutex_lock(&ipsec_dev->rules_rb_lock);
880 rule = _rule_search(&ipsec_dev->rules_rb, fte);
881 mutex_unlock(&ipsec_dev->rules_rb_lock);
882
883 return rule;
884}
885
886static inline int _rule_insert(struct rb_root *root,
887 struct mlx5_fpga_ipsec_rule *rule)
888{
889 struct rb_node **new = &root->rb_node, *parent = NULL;
890
891
892 while (*new) {
893 struct mlx5_fpga_ipsec_rule *this =
894 container_of(*new, struct mlx5_fpga_ipsec_rule,
895 node);
896
897 parent = *new;
898 if (rule->fte < this->fte)
899 new = &((*new)->rb_left);
900 else if (rule->fte > this->fte)
901 new = &((*new)->rb_right);
902 else
903 return -EEXIST;
904 }
905
906
907 rb_link_node(&rule->node, parent, new);
908 rb_insert_color(&rule->node, root);
909
910 return 0;
911}
912
913static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
914 struct mlx5_fpga_ipsec_rule *rule)
915{
916 int ret;
917
918 mutex_lock(&ipsec_dev->rules_rb_lock);
919 ret = _rule_insert(&ipsec_dev->rules_rb, rule);
920 mutex_unlock(&ipsec_dev->rules_rb_lock);
921
922 return ret;
923}
924
925static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
926 struct mlx5_fpga_ipsec_rule *rule)
927{
928 struct rb_root *root = &ipsec_dev->rules_rb;
929
930 mutex_lock(&ipsec_dev->rules_rb_lock);
931 rb_erase(&rule->node, root);
932 mutex_unlock(&ipsec_dev->rules_rb_lock);
933}
934
935static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
936 struct mlx5_fpga_ipsec_rule *rule)
937{
938 _rule_delete(ipsec_dev, rule);
939 kfree(rule);
940}
941
942struct mailbox_mod {
943 uintptr_t saved_esp_id;
944 u32 saved_action;
945 u32 saved_outer_esp_spi_value;
946};
947
948static void restore_spec_mailbox(struct fs_fte *fte,
949 struct mailbox_mod *mbox_mod)
950{
951 char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
952 fte->val,
953 misc_parameters);
954
955 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
956 mbox_mod->saved_outer_esp_spi_value);
957 fte->action.action |= mbox_mod->saved_action;
958 fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
959}
960
961static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
962 struct fs_fte *fte,
963 struct mailbox_mod *mbox_mod)
964{
965 char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
966 fte->val,
967 misc_parameters);
968
969 mbox_mod->saved_esp_id = fte->action.esp_id;
970 mbox_mod->saved_action = fte->action.action &
971 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
972 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
973 mbox_mod->saved_outer_esp_spi_value =
974 MLX5_GET(fte_match_set_misc, misc_params_v,
975 outer_esp_spi);
976
977 fte->action.esp_id = 0;
978 fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
979 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
980 if (!MLX5_CAP_FLOWTABLE(mdev,
981 flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
982 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
983}
984
985static enum fs_flow_table_type egress_to_fs_ft(bool egress)
986{
987 return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
988}
989
990static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
991 struct mlx5_flow_table *ft,
992 u32 *in,
993 unsigned int *group_id,
994 bool is_egress)
995{
996 int (*create_flow_group)(struct mlx5_core_dev *dev,
997 struct mlx5_flow_table *ft, u32 *in,
998 unsigned int *group_id) =
999 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
1000 char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
1001 match_criteria.misc_parameters);
1002 u32 saved_outer_esp_spi_mask;
1003 u8 match_criteria_enable;
1004 int ret;
1005
1006 if (MLX5_CAP_FLOWTABLE(dev,
1007 flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1008 return create_flow_group(dev, ft, in, group_id);
1009
1010 match_criteria_enable =
1011 MLX5_GET(create_flow_group_in, in, match_criteria_enable);
1012 saved_outer_esp_spi_mask =
1013 MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
1014 if (!match_criteria_enable || !saved_outer_esp_spi_mask)
1015 return create_flow_group(dev, ft, in, group_id);
1016
1017 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
1018
1019 if (!(*misc_params_c) &&
1020 !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
1021 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1022 match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
1023
1024 ret = create_flow_group(dev, ft, in, group_id);
1025
1026 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
1027 MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
1028
1029 return ret;
1030}
1031
1032static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
1033 struct mlx5_flow_table *ft,
1034 struct mlx5_flow_group *fg,
1035 struct fs_fte *fte,
1036 bool is_egress)
1037{
1038 int (*create_fte)(struct mlx5_core_dev *dev,
1039 struct mlx5_flow_table *ft,
1040 struct mlx5_flow_group *fg,
1041 struct fs_fte *fte) =
1042 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
1043 struct mlx5_fpga_device *fdev = dev->fpga;
1044 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1045 struct mlx5_fpga_ipsec_rule *rule;
1046 bool is_esp = fte->action.esp_id;
1047 struct mailbox_mod mbox_mod;
1048 int ret;
1049
1050 if (!is_esp ||
1051 !(fte->action.action &
1052 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1053 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1054 return create_fte(dev, ft, fg, fte);
1055
1056 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1057 if (!rule)
1058 return -ENOMEM;
1059
1060 rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
1061 if (IS_ERR(rule->ctx)) {
1062 int err = PTR_ERR(rule->ctx);
1063 kfree(rule);
1064 return err;
1065 }
1066
1067 rule->fte = fte;
1068 WARN_ON(rule_insert(fipsec, rule));
1069
1070 modify_spec_mailbox(dev, fte, &mbox_mod);
1071 ret = create_fte(dev, ft, fg, fte);
1072 restore_spec_mailbox(fte, &mbox_mod);
1073 if (ret) {
1074 _rule_delete(fipsec, rule);
1075 mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1076 kfree(rule);
1077 }
1078
1079 return ret;
1080}
1081
1082static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
1083 struct mlx5_flow_table *ft,
1084 unsigned int group_id,
1085 int modify_mask,
1086 struct fs_fte *fte,
1087 bool is_egress)
1088{
1089 int (*update_fte)(struct mlx5_core_dev *dev,
1090 struct mlx5_flow_table *ft,
1091 unsigned int group_id,
1092 int modify_mask,
1093 struct fs_fte *fte) =
1094 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
1095 bool is_esp = fte->action.esp_id;
1096 struct mailbox_mod mbox_mod;
1097 int ret;
1098
1099 if (!is_esp ||
1100 !(fte->action.action &
1101 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1102 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1103 return update_fte(dev, ft, group_id, modify_mask, fte);
1104
1105 modify_spec_mailbox(dev, fte, &mbox_mod);
1106 ret = update_fte(dev, ft, group_id, modify_mask, fte);
1107 restore_spec_mailbox(fte, &mbox_mod);
1108
1109 return ret;
1110}
1111
1112static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
1113 struct mlx5_flow_table *ft,
1114 struct fs_fte *fte,
1115 bool is_egress)
1116{
1117 int (*delete_fte)(struct mlx5_core_dev *dev,
1118 struct mlx5_flow_table *ft,
1119 struct fs_fte *fte) =
1120 mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
1121 struct mlx5_fpga_device *fdev = dev->fpga;
1122 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1123 struct mlx5_fpga_ipsec_rule *rule;
1124 bool is_esp = fte->action.esp_id;
1125 struct mailbox_mod mbox_mod;
1126 int ret;
1127
1128 if (!is_esp ||
1129 !(fte->action.action &
1130 (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1131 MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1132 return delete_fte(dev, ft, fte);
1133
1134 rule = rule_search(fipsec, fte);
1135 if (!rule)
1136 return -ENOENT;
1137
1138 mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1139 rule_delete(fipsec, rule);
1140
1141 modify_spec_mailbox(dev, fte, &mbox_mod);
1142 ret = delete_fte(dev, ft, fte);
1143 restore_spec_mailbox(fte, &mbox_mod);
1144
1145 return ret;
1146}
1147
1148static int
1149mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev,
1150 struct mlx5_flow_table *ft,
1151 u32 *in,
1152 unsigned int *group_id)
1153{
1154 return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true);
1155}
1156
1157static int
1158mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev,
1159 struct mlx5_flow_table *ft,
1160 struct mlx5_flow_group *fg,
1161 struct fs_fte *fte)
1162{
1163 return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true);
1164}
1165
1166static int
1167mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev,
1168 struct mlx5_flow_table *ft,
1169 unsigned int group_id,
1170 int modify_mask,
1171 struct fs_fte *fte)
1172{
1173 return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
1174 true);
1175}
1176
1177static int
1178mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev,
1179 struct mlx5_flow_table *ft,
1180 struct fs_fte *fte)
1181{
1182 return fpga_ipsec_fs_delete_fte(dev, ft, fte, true);
1183}
1184
1185static int
1186mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev,
1187 struct mlx5_flow_table *ft,
1188 u32 *in,
1189 unsigned int *group_id)
1190{
1191 return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false);
1192}
1193
1194static int
1195mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev,
1196 struct mlx5_flow_table *ft,
1197 struct mlx5_flow_group *fg,
1198 struct fs_fte *fte)
1199{
1200 return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false);
1201}
1202
1203static int
1204mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev,
1205 struct mlx5_flow_table *ft,
1206 unsigned int group_id,
1207 int modify_mask,
1208 struct fs_fte *fte)
1209{
1210 return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
1211 false);
1212}
1213
1214static int
1215mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev,
1216 struct mlx5_flow_table *ft,
1217 struct fs_fte *fte)
1218{
1219 return fpga_ipsec_fs_delete_fte(dev, ft, fte, false);
1220}
1221
1222static struct mlx5_flow_cmds fpga_ipsec_ingress;
1223static struct mlx5_flow_cmds fpga_ipsec_egress;
1224
1225const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
1226{
1227 switch (type) {
1228 case FS_FT_NIC_RX:
1229 return &fpga_ipsec_ingress;
1230 case FS_FT_NIC_TX:
1231 return &fpga_ipsec_egress;
1232 default:
1233 WARN_ON(true);
1234 return NULL;
1235 }
1236}
1237
1238int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
1239{
1240 struct mlx5_fpga_conn_attr init_attr = {0};
1241 struct mlx5_fpga_device *fdev = mdev->fpga;
1242 struct mlx5_fpga_conn *conn;
1243 int err;
1244
1245 if (!mlx5_fpga_is_ipsec_device(mdev))
1246 return 0;
1247
1248 fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
1249 if (!fdev->ipsec)
1250 return -ENOMEM;
1251
1252 fdev->ipsec->fdev = fdev;
1253
1254 err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
1255 fdev->ipsec->caps);
1256 if (err) {
1257 mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
1258 err);
1259 goto error;
1260 }
1261
1262 INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
1263 spin_lock_init(&fdev->ipsec->pending_cmds_lock);
1264
1265 init_attr.rx_size = SBU_QP_QUEUE_SIZE;
1266 init_attr.tx_size = SBU_QP_QUEUE_SIZE;
1267 init_attr.recv_cb = mlx5_fpga_ipsec_recv;
1268 init_attr.cb_arg = fdev;
1269 conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
1270 if (IS_ERR(conn)) {
1271 err = PTR_ERR(conn);
1272 mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
1273 err);
1274 goto error;
1275 }
1276 fdev->ipsec->conn = conn;
1277
1278 err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
1279 if (err)
1280 goto err_destroy_conn;
1281 mutex_init(&fdev->ipsec->sa_hash_lock);
1282
1283 fdev->ipsec->rules_rb = RB_ROOT;
1284 mutex_init(&fdev->ipsec->rules_rb_lock);
1285
1286 err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
1287 if (err) {
1288 mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
1289 err);
1290 goto err_destroy_hash;
1291 }
1292
1293 return 0;
1294
1295err_destroy_hash:
1296 rhashtable_destroy(&fdev->ipsec->sa_hash);
1297
1298err_destroy_conn:
1299 mlx5_fpga_sbu_conn_destroy(conn);
1300
1301error:
1302 kfree(fdev->ipsec);
1303 fdev->ipsec = NULL;
1304 return err;
1305}
1306
1307static void destroy_rules_rb(struct rb_root *root)
1308{
1309 struct mlx5_fpga_ipsec_rule *r, *tmp;
1310
1311 rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
1312 rb_erase(&r->node, root);
1313 mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
1314 kfree(r);
1315 }
1316}
1317
1318void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
1319{
1320 struct mlx5_fpga_device *fdev = mdev->fpga;
1321
1322 if (!mlx5_fpga_is_ipsec_device(mdev))
1323 return;
1324
1325 destroy_rules_rb(&fdev->ipsec->rules_rb);
1326 rhashtable_destroy(&fdev->ipsec->sa_hash);
1327
1328 mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
1329 kfree(fdev->ipsec);
1330 fdev->ipsec = NULL;
1331}
1332
1333void mlx5_fpga_ipsec_build_fs_cmds(void)
1334{
1335
1336 fpga_ipsec_ingress.create_flow_table =
1337 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
1338 fpga_ipsec_ingress.destroy_flow_table =
1339 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
1340 fpga_ipsec_ingress.modify_flow_table =
1341 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
1342 fpga_ipsec_ingress.create_flow_group =
1343 mlx5_fpga_ipsec_fs_create_flow_group_ingress;
1344 fpga_ipsec_ingress.destroy_flow_group =
1345 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
1346 fpga_ipsec_ingress.create_fte =
1347 mlx5_fpga_ipsec_fs_create_fte_ingress;
1348 fpga_ipsec_ingress.update_fte =
1349 mlx5_fpga_ipsec_fs_update_fte_ingress;
1350 fpga_ipsec_ingress.delete_fte =
1351 mlx5_fpga_ipsec_fs_delete_fte_ingress;
1352 fpga_ipsec_ingress.update_root_ft =
1353 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
1354
1355
1356 fpga_ipsec_egress.create_flow_table =
1357 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
1358 fpga_ipsec_egress.destroy_flow_table =
1359 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
1360 fpga_ipsec_egress.modify_flow_table =
1361 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
1362 fpga_ipsec_egress.create_flow_group =
1363 mlx5_fpga_ipsec_fs_create_flow_group_egress;
1364 fpga_ipsec_egress.destroy_flow_group =
1365 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
1366 fpga_ipsec_egress.create_fte =
1367 mlx5_fpga_ipsec_fs_create_fte_egress;
1368 fpga_ipsec_egress.update_fte =
1369 mlx5_fpga_ipsec_fs_update_fte_egress;
1370 fpga_ipsec_egress.delete_fte =
1371 mlx5_fpga_ipsec_fs_delete_fte_egress;
1372 fpga_ipsec_egress.update_root_ft =
1373 mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
1374}
1375
1376static int
1377mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
1378 const struct mlx5_accel_esp_xfrm_attrs *attrs)
1379{
1380 if (attrs->tfc_pad) {
1381 mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
1382 return -EOPNOTSUPP;
1383 }
1384
1385 if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
1386 mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
1387 return -EOPNOTSUPP;
1388 }
1389
1390 if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
1391 mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
1392 return -EOPNOTSUPP;
1393 }
1394
1395 if (attrs->keymat.aes_gcm.iv_algo !=
1396 MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
1397 mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
1398 return -EOPNOTSUPP;
1399 }
1400
1401 if (attrs->keymat.aes_gcm.icv_len != 128) {
1402 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
1403 return -EOPNOTSUPP;
1404 }
1405
1406 if (attrs->keymat.aes_gcm.key_len != 128 &&
1407 attrs->keymat.aes_gcm.key_len != 256) {
1408 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1409 return -EOPNOTSUPP;
1410 }
1411
1412 if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
1413 (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
1414 v2_command))) {
1415 mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1416 return -EOPNOTSUPP;
1417 }
1418
1419 return 0;
1420}
1421
1422struct mlx5_accel_esp_xfrm *
1423mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
1424 const struct mlx5_accel_esp_xfrm_attrs *attrs,
1425 u32 flags)
1426{
1427 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1428
1429 if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
1430 mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
1431 return ERR_PTR(-EINVAL);
1432 }
1433
1434 if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1435 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1436 return ERR_PTR(-EOPNOTSUPP);
1437 }
1438
1439 fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
1440 if (!fpga_xfrm)
1441 return ERR_PTR(-ENOMEM);
1442
1443 mutex_init(&fpga_xfrm->lock);
1444 memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
1445 sizeof(fpga_xfrm->accel_xfrm.attrs));
1446
1447 return &fpga_xfrm->accel_xfrm;
1448}
1449
1450void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
1451{
1452 struct mlx5_fpga_esp_xfrm *fpga_xfrm =
1453 container_of(xfrm, struct mlx5_fpga_esp_xfrm,
1454 accel_xfrm);
1455
1456 kfree(fpga_xfrm);
1457}
1458
1459int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
1460 const struct mlx5_accel_esp_xfrm_attrs *attrs)
1461{
1462 struct mlx5_core_dev *mdev = xfrm->mdev;
1463 struct mlx5_fpga_device *fdev = mdev->fpga;
1464 struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1465 struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1466 struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
1467
1468 int err = 0;
1469
1470 if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
1471 return 0;
1472
1473 if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1474 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1475 return -EOPNOTSUPP;
1476 }
1477
1478 if (is_v2_sadb_supported(fipsec)) {
1479 mlx5_core_warn(mdev, "Modify esp is not supported\n");
1480 return -EOPNOTSUPP;
1481 }
1482
1483 fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
1484
1485 mutex_lock(&fpga_xfrm->lock);
1486
1487 if (!fpga_xfrm->sa_ctx)
1488
1489 goto change_sw_xfrm_attrs;
1490
1491
1492 memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
1493 mutex_lock(&fipsec->sa_hash_lock);
1494
1495 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1496 &fpga_xfrm->sa_ctx->hash, rhash_sa));
1497
1498 mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
1499 &fpga_xfrm->sa_ctx->hw_sa);
1500
1501 err = rhashtable_insert_fast(&fipsec->sa_hash,
1502 &fpga_xfrm->sa_ctx->hash, rhash_sa);
1503 if (err)
1504 goto rollback_sa;
1505
1506
1507 err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
1508 MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
1509 fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
1510 if (err)
1511 WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1512 &fpga_xfrm->sa_ctx->hash,
1513 rhash_sa));
1514rollback_sa:
1515 if (err) {
1516
1517 memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
1518 sizeof(org_hw_sa));
1519 WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
1520 &fpga_xfrm->sa_ctx->hash,
1521 rhash_sa));
1522 }
1523 mutex_unlock(&fipsec->sa_hash_lock);
1524
1525change_sw_xfrm_attrs:
1526 if (!err)
1527 memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
1528 mutex_unlock(&fpga_xfrm->lock);
1529 return err;
1530}
1531