1
2
3
4#include <net/inet6_hashtables.h>
5#include "en_accel/en_accel.h"
6#include "en_accel/tls.h"
7#include "en_accel/ktls_txrx.h"
8#include "en_accel/ktls_utils.h"
9#include "en_accel/fs_tcp.h"
10
11struct accel_rule {
12 struct work_struct work;
13 struct mlx5e_priv *priv;
14 struct mlx5_flow_handle *rule;
15};
16
17#define PROGRESS_PARAMS_WRITE_UNIT 64
18#define PROGRESS_PARAMS_PADDED_SIZE \
19 (ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \
20 PROGRESS_PARAMS_WRITE_UNIT))
21
22struct mlx5e_ktls_rx_resync_buf {
23 union {
24 struct mlx5_wqe_tls_progress_params_seg progress;
25 u8 pad[PROGRESS_PARAMS_PADDED_SIZE];
26 } ____cacheline_aligned_in_smp;
27 dma_addr_t dma_addr;
28 struct mlx5e_ktls_offload_context_rx *priv_rx;
29};
30
31enum {
32 MLX5E_PRIV_RX_FLAG_DELETING,
33 MLX5E_NUM_PRIV_RX_FLAGS,
34};
35
36struct mlx5e_ktls_rx_resync_ctx {
37 struct tls_offload_resync_async core;
38 struct work_struct work;
39 struct mlx5e_priv *priv;
40 refcount_t refcnt;
41 __be64 sw_rcd_sn_be;
42 u32 seq;
43};
44
45struct mlx5e_ktls_offload_context_rx {
46 struct tls12_crypto_info_aes_gcm_128 crypto_info;
47 struct accel_rule rule;
48 struct sock *sk;
49 struct mlx5e_rq_stats *rq_stats;
50 struct mlx5e_tls_sw_stats *sw_stats;
51 struct completion add_ctx;
52 struct mlx5e_tir tir;
53 u32 key_id;
54 u32 rxq;
55 DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
56
57
58 struct mlx5e_ktls_rx_resync_ctx resync;
59 struct list_head list;
60};
61
62static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx)
63{
64 if (!refcount_dec_and_test(&priv_rx->resync.refcnt))
65 return false;
66
67 kfree(priv_rx);
68 return true;
69}
70
71static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx)
72{
73 refcount_inc(&priv_rx->resync.refcnt);
74}
75
76struct mlx5e_ktls_resync_resp {
77
78 spinlock_t lock;
79 struct list_head list;
80};
81
82void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list)
83{
84 kvfree(resp_list);
85}
86
87struct mlx5e_ktls_resync_resp *
88mlx5e_ktls_rx_resync_create_resp_list(void)
89{
90 struct mlx5e_ktls_resync_resp *resp_list;
91
92 resp_list = kvzalloc(sizeof(*resp_list), GFP_KERNEL);
93 if (!resp_list)
94 return ERR_PTR(-ENOMEM);
95
96 INIT_LIST_HEAD(&resp_list->list);
97 spin_lock_init(&resp_list->lock);
98
99 return resp_list;
100}
101
102static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
103{
104 struct mlx5e_tir_builder *builder;
105 int err;
106
107 builder = mlx5e_tir_builder_alloc(false);
108 if (!builder)
109 return -ENOMEM;
110
111 mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
112 mlx5e_tir_builder_build_direct(builder);
113 mlx5e_tir_builder_build_tls(builder);
114 err = mlx5e_tir_init(tir, builder, mdev, false);
115
116 mlx5e_tir_builder_free(builder);
117
118 return err;
119}
120
121static void accel_rule_handle_work(struct work_struct *work)
122{
123 struct mlx5e_ktls_offload_context_rx *priv_rx;
124 struct accel_rule *accel_rule;
125 struct mlx5_flow_handle *rule;
126
127 accel_rule = container_of(work, struct accel_rule, work);
128 priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule);
129 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
130 goto out;
131
132 rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
133 mlx5e_tir_get_tirn(&priv_rx->tir),
134 MLX5_FS_DEFAULT_FLOW_TAG);
135 if (!IS_ERR_OR_NULL(rule))
136 accel_rule->rule = rule;
137out:
138 complete(&priv_rx->add_ctx);
139}
140
141static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv)
142{
143 INIT_WORK(&rule->work, accel_rule_handle_work);
144 rule->priv = priv;
145}
146
147static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi,
148 struct mlx5e_icosq_wqe_info *wi)
149{
150 sq->db.wqe_info[pi] = *wi;
151}
152
153static struct mlx5_wqe_ctrl_seg *
154post_static_params(struct mlx5e_icosq *sq,
155 struct mlx5e_ktls_offload_context_rx *priv_rx)
156{
157 struct mlx5e_set_tls_static_params_wqe *wqe;
158 struct mlx5e_icosq_wqe_info wi;
159 u16 pi, num_wqebbs;
160
161 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
162 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
163 return ERR_PTR(-ENOSPC);
164
165 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
166 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
167 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
168 mlx5e_tir_get_tirn(&priv_rx->tir),
169 priv_rx->key_id, priv_rx->resync.seq, false,
170 TLS_OFFLOAD_CTX_DIR_RX);
171 wi = (struct mlx5e_icosq_wqe_info) {
172 .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
173 .num_wqebbs = num_wqebbs,
174 .tls_set_params.priv_rx = priv_rx,
175 };
176 icosq_fill_wi(sq, pi, &wi);
177 sq->pc += num_wqebbs;
178
179 return &wqe->ctrl;
180}
181
182static struct mlx5_wqe_ctrl_seg *
183post_progress_params(struct mlx5e_icosq *sq,
184 struct mlx5e_ktls_offload_context_rx *priv_rx,
185 u32 next_record_tcp_sn)
186{
187 struct mlx5e_set_tls_progress_params_wqe *wqe;
188 struct mlx5e_icosq_wqe_info wi;
189 u16 pi, num_wqebbs;
190
191 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
192 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
193 return ERR_PTR(-ENOSPC);
194
195 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
196 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
197 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn,
198 mlx5e_tir_get_tirn(&priv_rx->tir),
199 false, next_record_tcp_sn,
200 TLS_OFFLOAD_CTX_DIR_RX);
201 wi = (struct mlx5e_icosq_wqe_info) {
202 .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS,
203 .num_wqebbs = num_wqebbs,
204 .tls_set_params.priv_rx = priv_rx,
205 };
206
207 icosq_fill_wi(sq, pi, &wi);
208 sq->pc += num_wqebbs;
209
210 return &wqe->ctrl;
211}
212
213static int post_rx_param_wqes(struct mlx5e_channel *c,
214 struct mlx5e_ktls_offload_context_rx *priv_rx,
215 u32 next_record_tcp_sn)
216{
217 struct mlx5_wqe_ctrl_seg *cseg;
218 struct mlx5e_icosq *sq;
219 int err;
220
221 err = 0;
222 sq = &c->async_icosq;
223 spin_lock_bh(&c->async_icosq_lock);
224
225 cseg = post_static_params(sq, priv_rx);
226 if (IS_ERR(cseg))
227 goto err_out;
228 cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn);
229 if (IS_ERR(cseg))
230 goto err_out;
231
232 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
233unlock:
234 spin_unlock_bh(&c->async_icosq_lock);
235
236 return err;
237
238err_out:
239 priv_rx->rq_stats->tls_resync_req_skip++;
240 err = PTR_ERR(cseg);
241 complete(&priv_rx->add_ctx);
242 goto unlock;
243}
244
245static void
246mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
247 struct mlx5e_ktls_offload_context_rx *priv_rx)
248{
249 struct mlx5e_ktls_offload_context_rx **ctx =
250 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
251
252 BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
253 TLS_OFFLOAD_CONTEXT_SIZE_RX);
254
255 *ctx = priv_rx;
256}
257
258static struct mlx5e_ktls_offload_context_rx *
259mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx)
260{
261 struct mlx5e_ktls_offload_context_rx **ctx =
262 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
263
264 return *ctx;
265}
266
267
268
269static int
270resync_post_get_progress_params(struct mlx5e_icosq *sq,
271 struct mlx5e_ktls_offload_context_rx *priv_rx)
272{
273 struct mlx5e_get_tls_progress_params_wqe *wqe;
274 struct mlx5e_ktls_rx_resync_buf *buf;
275 struct mlx5e_icosq_wqe_info wi;
276 struct mlx5_wqe_ctrl_seg *cseg;
277 struct mlx5_seg_get_psv *psv;
278 struct device *pdev;
279 int err;
280 u16 pi;
281
282 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
283 if (unlikely(!buf)) {
284 err = -ENOMEM;
285 goto err_out;
286 }
287
288 pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
289 buf->dma_addr = dma_map_single(pdev, &buf->progress,
290 PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
291 if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
292 err = -ENOMEM;
293 goto err_free;
294 }
295
296 buf->priv_rx = priv_rx;
297
298 spin_lock_bh(&sq->channel->async_icosq_lock);
299
300 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
301 spin_unlock_bh(&sq->channel->async_icosq_lock);
302 err = -ENOSPC;
303 goto err_dma_unmap;
304 }
305
306 pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS);
307 wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
308
309#define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
310
311 cseg = &wqe->ctrl;
312 cseg->opmod_idx_opcode =
313 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV |
314 (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24));
315 cseg->qpn_ds =
316 cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT);
317
318 psv = &wqe->psv;
319 psv->num_psv = 1 << 4;
320 psv->l_key = sq->channel->mkey_be;
321 psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir));
322 psv->va = cpu_to_be64(buf->dma_addr);
323
324 wi = (struct mlx5e_icosq_wqe_info) {
325 .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
326 .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS,
327 .tls_get_params.buf = buf,
328 };
329 icosq_fill_wi(sq, pi, &wi);
330 sq->pc++;
331 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
332 spin_unlock_bh(&sq->channel->async_icosq_lock);
333
334 return 0;
335
336err_dma_unmap:
337 dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
338err_free:
339 kfree(buf);
340err_out:
341 priv_rx->rq_stats->tls_resync_req_skip++;
342 return err;
343}
344
345
346
347
348static void resync_handle_work(struct work_struct *work)
349{
350 struct mlx5e_ktls_offload_context_rx *priv_rx;
351 struct mlx5e_ktls_rx_resync_ctx *resync;
352 struct mlx5e_channel *c;
353 struct mlx5e_icosq *sq;
354
355 resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work);
356 priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
357
358 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
359 mlx5e_ktls_priv_rx_put(priv_rx);
360 return;
361 }
362
363 c = resync->priv->channels.c[priv_rx->rxq];
364 sq = &c->async_icosq;
365
366 if (resync_post_get_progress_params(sq, priv_rx))
367 mlx5e_ktls_priv_rx_put(priv_rx);
368}
369
370static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
371 struct mlx5e_priv *priv)
372{
373 INIT_WORK(&resync->work, resync_handle_work);
374 resync->priv = priv;
375 refcount_set(&resync->refcnt, 1);
376}
377
378
379
380
381static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
382 struct mlx5e_channel *c)
383{
384 struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
385 struct mlx5e_ktls_resync_resp *ktls_resync;
386 struct mlx5e_icosq *sq;
387 bool trigger_poll;
388
389 memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
390
391 sq = &c->async_icosq;
392 ktls_resync = sq->ktls_resync;
393
394 spin_lock_bh(&ktls_resync->lock);
395 list_add_tail(&priv_rx->list, &ktls_resync->list);
396 trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
397 spin_unlock_bh(&ktls_resync->lock);
398
399 if (!trigger_poll)
400 return;
401
402 if (!napi_if_scheduled_mark_missed(&c->napi)) {
403 spin_lock_bh(&c->async_icosq_lock);
404 mlx5e_trigger_irq(sq);
405 spin_unlock_bh(&c->async_icosq_lock);
406 }
407}
408
409
410
411
412
413
414void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
415 struct mlx5e_icosq *sq)
416{
417 struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
418 struct mlx5e_ktls_offload_context_rx *priv_rx;
419 struct mlx5e_ktls_rx_resync_ctx *resync;
420 u8 tracker_state, auth_state, *ctx;
421 struct device *dev;
422 u32 hw_seq;
423
424 priv_rx = buf->priv_rx;
425 resync = &priv_rx->resync;
426 dev = mlx5_core_dma_dev(resync->priv->mdev);
427 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
428 goto out;
429
430 dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
431 DMA_FROM_DEVICE);
432
433 ctx = buf->progress.ctx;
434 tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state);
435 auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
436 if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
437 auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
438 priv_rx->rq_stats->tls_resync_req_skip++;
439 goto out;
440 }
441
442 hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
443 tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
444 priv_rx->rq_stats->tls_resync_req_end++;
445out:
446 mlx5e_ktls_priv_rx_put(priv_rx);
447 dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
448 kfree(buf);
449}
450
451
452
453
454static bool resync_queue_get_psv(struct sock *sk)
455{
456 struct mlx5e_ktls_offload_context_rx *priv_rx;
457 struct mlx5e_ktls_rx_resync_ctx *resync;
458
459 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk));
460 if (unlikely(!priv_rx))
461 return false;
462
463 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
464 return false;
465
466 resync = &priv_rx->resync;
467 mlx5e_ktls_priv_rx_get(priv_rx);
468 if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
469 mlx5e_ktls_priv_rx_put(priv_rx);
470
471 return true;
472}
473
474
475static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
476{
477 struct ethhdr *eth = (struct ethhdr *)(skb->data);
478 struct net_device *netdev = rq->netdev;
479 struct sock *sk = NULL;
480 unsigned int datalen;
481 struct iphdr *iph;
482 struct tcphdr *th;
483 __be32 seq;
484 int depth = 0;
485
486 __vlan_get_protocol(skb, eth->h_proto, &depth);
487 iph = (struct iphdr *)(skb->data + depth);
488
489 if (iph->version == 4) {
490 depth += sizeof(struct iphdr);
491 th = (void *)iph + sizeof(struct iphdr);
492
493 sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
494 iph->saddr, th->source, iph->daddr,
495 th->dest, netdev->ifindex);
496#if IS_ENABLED(CONFIG_IPV6)
497 } else {
498 struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
499
500 depth += sizeof(struct ipv6hdr);
501 th = (void *)ipv6h + sizeof(struct ipv6hdr);
502
503 sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
504 &ipv6h->saddr, th->source,
505 &ipv6h->daddr, ntohs(th->dest),
506 netdev->ifindex, 0);
507#endif
508 }
509
510 depth += sizeof(struct tcphdr);
511
512 if (unlikely(!sk))
513 return;
514
515 if (unlikely(sk->sk_state == TCP_TIME_WAIT))
516 goto unref;
517
518 if (unlikely(!resync_queue_get_psv(sk)))
519 goto unref;
520
521 seq = th->seq;
522 datalen = skb->len - depth;
523 tls_offload_rx_resync_async_request_start(sk, seq, datalen);
524 rq->stats->tls_resync_req_start++;
525
526unref:
527 sock_gen_put(sk);
528}
529
530void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
531 u32 seq, u8 *rcd_sn)
532{
533 struct mlx5e_ktls_offload_context_rx *priv_rx;
534 struct mlx5e_ktls_rx_resync_ctx *resync;
535 struct mlx5e_priv *priv;
536 struct mlx5e_channel *c;
537
538 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk));
539 if (unlikely(!priv_rx))
540 return;
541
542 resync = &priv_rx->resync;
543 resync->sw_rcd_sn_be = *(__be64 *)rcd_sn;
544 resync->seq = seq;
545
546 priv = netdev_priv(netdev);
547 c = priv->channels.c[priv_rx->rxq];
548
549 resync_handle_seq_match(priv_rx, c);
550}
551
552
553
554void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
555 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
556{
557 struct mlx5e_rq_stats *stats = rq->stats;
558
559 switch (get_cqe_tls_offload(cqe)) {
560 case CQE_TLS_OFFLOAD_DECRYPTED:
561 skb->decrypted = 1;
562 stats->tls_decrypted_packets++;
563 stats->tls_decrypted_bytes += *cqe_bcnt;
564 break;
565 case CQE_TLS_OFFLOAD_RESYNC:
566 stats->tls_resync_req_pkt++;
567 resync_update_sn(rq, skb);
568 break;
569 default:
570 stats->tls_err++;
571 break;
572 }
573}
574
575void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi)
576{
577 struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx;
578 struct accel_rule *rule = &priv_rx->rule;
579
580 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
581 complete(&priv_rx->add_ctx);
582 return;
583 }
584 queue_work(rule->priv->tls->rx_wq, &rule->work);
585}
586
587static int mlx5e_ktls_sk_get_rxq(struct sock *sk)
588{
589 int rxq = sk_rx_queue_get(sk);
590
591 if (unlikely(rxq == -1))
592 rxq = 0;
593
594 return rxq;
595}
596
597int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
598 struct tls_crypto_info *crypto_info,
599 u32 start_offload_tcp_sn)
600{
601 struct mlx5e_ktls_offload_context_rx *priv_rx;
602 struct mlx5e_ktls_rx_resync_ctx *resync;
603 struct tls_context *tls_ctx;
604 struct mlx5_core_dev *mdev;
605 struct mlx5e_priv *priv;
606 int rxq, err;
607 u32 rqtn;
608
609 tls_ctx = tls_get_ctx(sk);
610 priv = netdev_priv(netdev);
611 mdev = priv->mdev;
612 priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL);
613 if (unlikely(!priv_rx))
614 return -ENOMEM;
615
616 err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id);
617 if (err)
618 goto err_create_key;
619
620 priv_rx->crypto_info =
621 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
622
623 rxq = mlx5e_ktls_sk_get_rxq(sk);
624 priv_rx->rxq = rxq;
625 priv_rx->sk = sk;
626
627 priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
628 priv_rx->sw_stats = &priv->tls->sw_stats;
629 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
630
631 rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
632
633 err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
634 if (err)
635 goto err_create_tir;
636
637 init_completion(&priv_rx->add_ctx);
638
639 accel_rule_init(&priv_rx->rule, priv);
640 resync = &priv_rx->resync;
641 resync_init(resync, priv);
642 tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core;
643 tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC);
644
645 err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn);
646 if (err)
647 goto err_post_wqes;
648
649 atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx);
650
651 return 0;
652
653err_post_wqes:
654 mlx5e_tir_destroy(&priv_rx->tir);
655err_create_tir:
656 mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
657err_create_key:
658 kfree(priv_rx);
659 return err;
660}
661
662void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
663{
664 struct mlx5e_ktls_offload_context_rx *priv_rx;
665 struct mlx5e_ktls_rx_resync_ctx *resync;
666 struct mlx5_core_dev *mdev;
667 struct mlx5e_priv *priv;
668
669 priv = netdev_priv(netdev);
670 mdev = priv->mdev;
671
672 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
673 set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
674 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
675 synchronize_net();
676 if (!cancel_work_sync(&priv_rx->rule.work))
677
678
679
680 wait_for_completion(&priv_rx->add_ctx);
681 resync = &priv_rx->resync;
682 if (cancel_work_sync(&resync->work))
683 mlx5e_ktls_priv_rx_put(priv_rx);
684
685 atomic64_inc(&priv_rx->sw_stats->rx_tls_del);
686 if (priv_rx->rule.rule)
687 mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
688
689 mlx5e_tir_destroy(&priv_rx->tir);
690 mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
691
692
693
694
695 mlx5e_ktls_priv_rx_put(priv_rx);
696}
697
698bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
699{
700 struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp;
701 struct mlx5e_ktls_resync_resp *ktls_resync;
702 struct mlx5_wqe_ctrl_seg *db_cseg;
703 struct mlx5e_icosq *sq;
704 LIST_HEAD(local_list);
705 int i, j;
706
707 sq = &c->async_icosq;
708
709 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
710 return false;
711
712 ktls_resync = sq->ktls_resync;
713 db_cseg = NULL;
714 i = 0;
715
716 spin_lock(&ktls_resync->lock);
717 list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) {
718 list_move(&priv_rx->list, &local_list);
719 if (++i == budget)
720 break;
721 }
722 if (list_empty(&ktls_resync->list))
723 clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
724 spin_unlock(&ktls_resync->lock);
725
726 spin_lock(&c->async_icosq_lock);
727 for (j = 0; j < i; j++) {
728 struct mlx5_wqe_ctrl_seg *cseg;
729
730 priv_rx = list_first_entry(&local_list,
731 struct mlx5e_ktls_offload_context_rx,
732 list);
733 cseg = post_static_params(sq, priv_rx);
734 if (IS_ERR(cseg))
735 break;
736 list_del(&priv_rx->list);
737 db_cseg = cseg;
738 }
739 if (db_cseg)
740 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg);
741 spin_unlock(&c->async_icosq_lock);
742
743 priv_rx->rq_stats->tls_resync_res_ok += j;
744
745 if (!list_empty(&local_list)) {
746
747
748
749
750 spin_lock(&ktls_resync->lock);
751 list_splice(&local_list, &ktls_resync->list);
752 set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
753 spin_unlock(&ktls_resync->lock);
754 priv_rx->rq_stats->tls_resync_res_retry++;
755 }
756
757 return i == budget;
758}
759