1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <crypto/aead.h>
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/netdevice.h>
36#include <net/dst.h>
37#include <net/inet_connection_sock.h>
38#include <net/tcp.h>
39#include <net/tls.h>
40
41
42
43
44static DECLARE_RWSEM(device_offload_lock);
45
46static void tls_device_gc_task(struct work_struct *work);
47
48static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
49static LIST_HEAD(tls_device_gc_list);
50static LIST_HEAD(tls_device_list);
51static DEFINE_SPINLOCK(tls_device_lock);
52
53static void tls_device_free_ctx(struct tls_context *ctx)
54{
55 if (ctx->tx_conf == TLS_HW) {
56 kfree(tls_offload_ctx_tx(ctx));
57 kfree(ctx->tx.rec_seq);
58 kfree(ctx->tx.iv);
59 }
60
61 if (ctx->rx_conf == TLS_HW)
62 kfree(tls_offload_ctx_rx(ctx));
63
64 tls_ctx_free(ctx);
65}
66
67static void tls_device_gc_task(struct work_struct *work)
68{
69 struct tls_context *ctx, *tmp;
70 unsigned long flags;
71 LIST_HEAD(gc_list);
72
73 spin_lock_irqsave(&tls_device_lock, flags);
74 list_splice_init(&tls_device_gc_list, &gc_list);
75 spin_unlock_irqrestore(&tls_device_lock, flags);
76
77 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
78 struct net_device *netdev = ctx->netdev;
79
80 if (netdev && ctx->tx_conf == TLS_HW) {
81 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
82 TLS_OFFLOAD_CTX_DIR_TX);
83 dev_put(netdev);
84 ctx->netdev = NULL;
85 }
86
87 list_del(&ctx->list);
88 tls_device_free_ctx(ctx);
89 }
90}
91
92static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
93{
94 unsigned long flags;
95
96 spin_lock_irqsave(&tls_device_lock, flags);
97 list_move_tail(&ctx->list, &tls_device_gc_list);
98
99
100
101
102 schedule_work(&tls_device_gc_work);
103
104 spin_unlock_irqrestore(&tls_device_lock, flags);
105}
106
107
108static struct net_device *get_netdev_for_sock(struct sock *sk)
109{
110 struct dst_entry *dst = sk_dst_get(sk);
111 struct net_device *netdev = NULL;
112
113 if (likely(dst)) {
114 netdev = dst->dev;
115 dev_hold(netdev);
116 }
117
118 dst_release(dst);
119
120 return netdev;
121}
122
123static void destroy_record(struct tls_record_info *record)
124{
125 int nr_frags = record->num_frags;
126 skb_frag_t *frag;
127
128 while (nr_frags-- > 0) {
129 frag = &record->frags[nr_frags];
130 __skb_frag_unref(frag);
131 }
132 kfree(record);
133}
134
135static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
136{
137 struct tls_record_info *info, *temp;
138
139 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
140 list_del(&info->list);
141 destroy_record(info);
142 }
143
144 offload_ctx->retransmit_hint = NULL;
145}
146
147static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
148{
149 struct tls_context *tls_ctx = tls_get_ctx(sk);
150 struct tls_record_info *info, *temp;
151 struct tls_offload_context_tx *ctx;
152 u64 deleted_records = 0;
153 unsigned long flags;
154
155 if (!tls_ctx)
156 return;
157
158 ctx = tls_offload_ctx_tx(tls_ctx);
159
160 spin_lock_irqsave(&ctx->lock, flags);
161 info = ctx->retransmit_hint;
162 if (info && !before(acked_seq, info->end_seq)) {
163 ctx->retransmit_hint = NULL;
164 list_del(&info->list);
165 destroy_record(info);
166 deleted_records++;
167 }
168
169 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
170 if (before(acked_seq, info->end_seq))
171 break;
172 list_del(&info->list);
173
174 destroy_record(info);
175 deleted_records++;
176 }
177
178 ctx->unacked_record_sn += deleted_records;
179 spin_unlock_irqrestore(&ctx->lock, flags);
180}
181
182
183
184
185
186static void tls_device_sk_destruct(struct sock *sk)
187{
188 struct tls_context *tls_ctx = tls_get_ctx(sk);
189 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
190
191 tls_ctx->sk_destruct(sk);
192
193 if (tls_ctx->tx_conf == TLS_HW) {
194 if (ctx->open_record)
195 destroy_record(ctx->open_record);
196 delete_all_records(ctx);
197 crypto_free_aead(ctx->aead_send);
198 clean_acked_data_disable(inet_csk(sk));
199 }
200
201 if (refcount_dec_and_test(&tls_ctx->refcount))
202 tls_device_queue_ctx_destruction(tls_ctx);
203}
204
205void tls_device_free_resources_tx(struct sock *sk)
206{
207 struct tls_context *tls_ctx = tls_get_ctx(sk);
208
209 tls_free_partial_record(sk, tls_ctx);
210}
211
212static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
213 u32 seq)
214{
215 struct net_device *netdev;
216 struct sk_buff *skb;
217 int err = 0;
218 u8 *rcd_sn;
219
220 skb = tcp_write_queue_tail(sk);
221 if (skb)
222 TCP_SKB_CB(skb)->eor = 1;
223
224 rcd_sn = tls_ctx->tx.rec_seq;
225
226 down_read(&device_offload_lock);
227 netdev = tls_ctx->netdev;
228 if (netdev)
229 err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
230 rcd_sn,
231 TLS_OFFLOAD_CTX_DIR_TX);
232 up_read(&device_offload_lock);
233 if (err)
234 return;
235
236 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
237}
238
239static void tls_append_frag(struct tls_record_info *record,
240 struct page_frag *pfrag,
241 int size)
242{
243 skb_frag_t *frag;
244
245 frag = &record->frags[record->num_frags - 1];
246 if (frag->page.p == pfrag->page &&
247 frag->page_offset + frag->size == pfrag->offset) {
248 frag->size += size;
249 } else {
250 ++frag;
251 frag->page.p = pfrag->page;
252 frag->page_offset = pfrag->offset;
253 frag->size = size;
254 ++record->num_frags;
255 get_page(pfrag->page);
256 }
257
258 pfrag->offset += size;
259 record->len += size;
260}
261
262static int tls_push_record(struct sock *sk,
263 struct tls_context *ctx,
264 struct tls_offload_context_tx *offload_ctx,
265 struct tls_record_info *record,
266 struct page_frag *pfrag,
267 int flags,
268 unsigned char record_type)
269{
270 struct tls_prot_info *prot = &ctx->prot_info;
271 struct tcp_sock *tp = tcp_sk(sk);
272 struct page_frag dummy_tag_frag;
273 skb_frag_t *frag;
274 int i;
275
276
277 frag = &record->frags[0];
278 tls_fill_prepend(ctx,
279 skb_frag_address(frag),
280 record->len - prot->prepend_size,
281 record_type,
282 prot->version);
283
284
285 dummy_tag_frag.page = skb_frag_page(frag);
286 dummy_tag_frag.offset = 0;
287
288 tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
289 record->end_seq = tp->write_seq + record->len;
290 spin_lock_irq(&offload_ctx->lock);
291 list_add_tail(&record->list, &offload_ctx->records_list);
292 spin_unlock_irq(&offload_ctx->lock);
293 offload_ctx->open_record = NULL;
294
295 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
296 tls_device_resync_tx(sk, ctx, tp->write_seq);
297
298 tls_advance_record_sn(sk, prot, &ctx->tx);
299
300 for (i = 0; i < record->num_frags; i++) {
301 frag = &record->frags[i];
302 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
303 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
304 frag->size, frag->page_offset);
305 sk_mem_charge(sk, frag->size);
306 get_page(skb_frag_page(frag));
307 }
308 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
309
310
311 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
312}
313
314static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
315 struct page_frag *pfrag,
316 size_t prepend_size)
317{
318 struct tls_record_info *record;
319 skb_frag_t *frag;
320
321 record = kmalloc(sizeof(*record), GFP_KERNEL);
322 if (!record)
323 return -ENOMEM;
324
325 frag = &record->frags[0];
326 __skb_frag_set_page(frag, pfrag->page);
327 frag->page_offset = pfrag->offset;
328 skb_frag_size_set(frag, prepend_size);
329
330 get_page(pfrag->page);
331 pfrag->offset += prepend_size;
332
333 record->num_frags = 1;
334 record->len = prepend_size;
335 offload_ctx->open_record = record;
336 return 0;
337}
338
339static int tls_do_allocation(struct sock *sk,
340 struct tls_offload_context_tx *offload_ctx,
341 struct page_frag *pfrag,
342 size_t prepend_size)
343{
344 int ret;
345
346 if (!offload_ctx->open_record) {
347 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
348 sk->sk_allocation))) {
349 sk->sk_prot->enter_memory_pressure(sk);
350 sk_stream_moderate_sndbuf(sk);
351 return -ENOMEM;
352 }
353
354 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
355 if (ret)
356 return ret;
357
358 if (pfrag->size > pfrag->offset)
359 return 0;
360 }
361
362 if (!sk_page_frag_refill(sk, pfrag))
363 return -ENOMEM;
364
365 return 0;
366}
367
368static int tls_push_data(struct sock *sk,
369 struct iov_iter *msg_iter,
370 size_t size, int flags,
371 unsigned char record_type)
372{
373 struct tls_context *tls_ctx = tls_get_ctx(sk);
374 struct tls_prot_info *prot = &tls_ctx->prot_info;
375 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
376 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
377 struct tls_record_info *record = ctx->open_record;
378 int tls_push_record_flags;
379 struct page_frag *pfrag;
380 size_t orig_size = size;
381 u32 max_open_record_len;
382 int copy, rc = 0;
383 bool done = false;
384 long timeo;
385
386 if (flags &
387 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
388 return -ENOTSUPP;
389
390 if (sk->sk_err)
391 return -sk->sk_err;
392
393 flags |= MSG_SENDPAGE_DECRYPTED;
394 tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
395
396 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
397 if (tls_is_partially_sent_record(tls_ctx)) {
398 rc = tls_push_partial_record(sk, tls_ctx, flags);
399 if (rc < 0)
400 return rc;
401 }
402
403 pfrag = sk_page_frag(sk);
404
405
406
407
408 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
409 prot->prepend_size;
410 do {
411 rc = tls_do_allocation(sk, ctx, pfrag,
412 prot->prepend_size);
413 if (rc) {
414 rc = sk_stream_wait_memory(sk, &timeo);
415 if (!rc)
416 continue;
417
418 record = ctx->open_record;
419 if (!record)
420 break;
421handle_error:
422 if (record_type != TLS_RECORD_TYPE_DATA) {
423
424
425
426
427 size = orig_size;
428 destroy_record(record);
429 ctx->open_record = NULL;
430 } else if (record->len > prot->prepend_size) {
431 goto last_record;
432 }
433
434 break;
435 }
436
437 record = ctx->open_record;
438 copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
439 copy = min_t(size_t, copy, (max_open_record_len - record->len));
440
441 if (copy_from_iter_nocache(page_address(pfrag->page) +
442 pfrag->offset,
443 copy, msg_iter) != copy) {
444 rc = -EFAULT;
445 goto handle_error;
446 }
447 tls_append_frag(record, pfrag, copy);
448
449 size -= copy;
450 if (!size) {
451last_record:
452 tls_push_record_flags = flags;
453 if (more) {
454 tls_ctx->pending_open_record_frags =
455 !!record->num_frags;
456 break;
457 }
458
459 done = true;
460 }
461
462 if (done || record->len >= max_open_record_len ||
463 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
464 rc = tls_push_record(sk,
465 tls_ctx,
466 ctx,
467 record,
468 pfrag,
469 tls_push_record_flags,
470 record_type);
471 if (rc < 0)
472 break;
473 }
474 } while (!done);
475
476 if (orig_size - size > 0)
477 rc = orig_size - size;
478
479 return rc;
480}
481
482int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
483{
484 unsigned char record_type = TLS_RECORD_TYPE_DATA;
485 int rc;
486
487 lock_sock(sk);
488
489 if (unlikely(msg->msg_controllen)) {
490 rc = tls_proccess_cmsg(sk, msg, &record_type);
491 if (rc)
492 goto out;
493 }
494
495 rc = tls_push_data(sk, &msg->msg_iter, size,
496 msg->msg_flags, record_type);
497
498out:
499 release_sock(sk);
500 return rc;
501}
502
503int tls_device_sendpage(struct sock *sk, struct page *page,
504 int offset, size_t size, int flags)
505{
506 struct iov_iter msg_iter;
507 char *kaddr = kmap(page);
508 struct kvec iov;
509 int rc;
510
511 if (flags & MSG_SENDPAGE_NOTLAST)
512 flags |= MSG_MORE;
513
514 lock_sock(sk);
515
516 if (flags & MSG_OOB) {
517 rc = -ENOTSUPP;
518 goto out;
519 }
520
521 iov.iov_base = kaddr + offset;
522 iov.iov_len = size;
523 iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
524 rc = tls_push_data(sk, &msg_iter, size,
525 flags, TLS_RECORD_TYPE_DATA);
526 kunmap(page);
527
528out:
529 release_sock(sk);
530 return rc;
531}
532
533struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
534 u32 seq, u64 *p_record_sn)
535{
536 u64 record_sn = context->hint_record_sn;
537 struct tls_record_info *info;
538
539 info = context->retransmit_hint;
540 if (!info ||
541 before(seq, info->end_seq - info->len)) {
542
543
544
545 info = list_first_entry(&context->records_list,
546 struct tls_record_info, list);
547 record_sn = context->unacked_record_sn;
548 }
549
550 list_for_each_entry_from(info, &context->records_list, list) {
551 if (before(seq, info->end_seq)) {
552 if (!context->retransmit_hint ||
553 after(info->end_seq,
554 context->retransmit_hint->end_seq)) {
555 context->hint_record_sn = record_sn;
556 context->retransmit_hint = info;
557 }
558 *p_record_sn = record_sn;
559 return info;
560 }
561 record_sn++;
562 }
563
564 return NULL;
565}
566EXPORT_SYMBOL(tls_get_record);
567
568static int tls_device_push_pending_record(struct sock *sk, int flags)
569{
570 struct iov_iter msg_iter;
571
572 iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
573 return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
574}
575
576void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
577{
578 if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
579 gfp_t sk_allocation = sk->sk_allocation;
580
581 sk->sk_allocation = GFP_ATOMIC;
582 tls_push_partial_record(sk, ctx,
583 MSG_DONTWAIT | MSG_NOSIGNAL |
584 MSG_SENDPAGE_DECRYPTED);
585 sk->sk_allocation = sk_allocation;
586 }
587}
588
589static void tls_device_resync_rx(struct tls_context *tls_ctx,
590 struct sock *sk, u32 seq, u8 *rcd_sn)
591{
592 struct net_device *netdev;
593
594 if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
595 return;
596 netdev = READ_ONCE(tls_ctx->netdev);
597 if (netdev)
598 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
599 TLS_OFFLOAD_CTX_DIR_RX);
600 clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
601}
602
603void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
604{
605 struct tls_context *tls_ctx = tls_get_ctx(sk);
606 struct tls_offload_context_rx *rx_ctx;
607 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
608 struct tls_prot_info *prot;
609 u32 is_req_pending;
610 s64 resync_req;
611 u32 req_seq;
612
613 if (tls_ctx->rx_conf != TLS_HW)
614 return;
615
616 prot = &tls_ctx->prot_info;
617 rx_ctx = tls_offload_ctx_rx(tls_ctx);
618 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
619
620 switch (rx_ctx->resync_type) {
621 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
622 resync_req = atomic64_read(&rx_ctx->resync_req);
623 req_seq = resync_req >> 32;
624 seq += TLS_HEADER_SIZE - 1;
625 is_req_pending = resync_req;
626
627 if (likely(!is_req_pending) || req_seq != seq ||
628 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
629 return;
630 break;
631 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
632 if (likely(!rx_ctx->resync_nh_do_now))
633 return;
634
635
636
637
638 if (tcp_inq(sk) > rcd_len)
639 return;
640
641 rx_ctx->resync_nh_do_now = 0;
642 seq += rcd_len;
643 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
644 break;
645 }
646
647 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
648}
649
650static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
651 struct tls_offload_context_rx *ctx,
652 struct sock *sk, struct sk_buff *skb)
653{
654 struct strp_msg *rxm;
655
656
657 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
658 return;
659
660 if (ctx->resync_nh_do_now)
661 return;
662
663 if (ctx->resync_nh_reset) {
664 ctx->resync_nh_reset = 0;
665 ctx->resync_nh.decrypted_failed = 1;
666 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
667 return;
668 }
669
670 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
671 return;
672
673
674 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
675 ctx->resync_nh.decrypted_tgt *= 2;
676 else
677 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
678
679 rxm = strp_msg(skb);
680
681
682 if (tcp_inq(sk) > rxm->full_len) {
683 ctx->resync_nh_do_now = 1;
684 } else {
685 struct tls_prot_info *prot = &tls_ctx->prot_info;
686 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
687
688 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
689 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
690
691 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
692 rcd_sn);
693 }
694}
695
696static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
697{
698 struct strp_msg *rxm = strp_msg(skb);
699 int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
700 struct sk_buff *skb_iter, *unused;
701 struct scatterlist sg[1];
702 char *orig_buf, *buf;
703
704 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
705 TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
706 if (!orig_buf)
707 return -ENOMEM;
708 buf = orig_buf;
709
710 nsg = skb_cow_data(skb, 0, &unused);
711 if (unlikely(nsg < 0)) {
712 err = nsg;
713 goto free_buf;
714 }
715
716 sg_init_table(sg, 1);
717 sg_set_buf(&sg[0], buf,
718 rxm->full_len + TLS_HEADER_SIZE +
719 TLS_CIPHER_AES_GCM_128_IV_SIZE);
720 err = skb_copy_bits(skb, offset, buf,
721 TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
722 if (err)
723 goto free_buf;
724
725
726 err = decrypt_skb(sk, skb, sg);
727 if (err != -EBADMSG)
728 goto free_buf;
729 else
730 err = 0;
731
732 data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
733
734 if (skb_pagelen(skb) > offset) {
735 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
736
737 if (skb->decrypted) {
738 err = skb_store_bits(skb, offset, buf, copy);
739 if (err)
740 goto free_buf;
741 }
742
743 offset += copy;
744 buf += copy;
745 }
746
747 pos = skb_pagelen(skb);
748 skb_walk_frags(skb, skb_iter) {
749 int frag_pos;
750
751
752
753
754
755 if (pos + skb_iter->len <= offset)
756 goto done_with_frag;
757 if (pos >= data_len + rxm->offset)
758 break;
759
760 frag_pos = offset - pos;
761 copy = min_t(int, skb_iter->len - frag_pos,
762 data_len + rxm->offset - offset);
763
764 if (skb_iter->decrypted) {
765 err = skb_store_bits(skb_iter, frag_pos, buf, copy);
766 if (err)
767 goto free_buf;
768 }
769
770 offset += copy;
771 buf += copy;
772done_with_frag:
773 pos += skb_iter->len;
774 }
775
776free_buf:
777 kfree(orig_buf);
778 return err;
779}
780
781int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
782{
783 struct tls_context *tls_ctx = tls_get_ctx(sk);
784 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
785 int is_decrypted = skb->decrypted;
786 int is_encrypted = !is_decrypted;
787 struct sk_buff *skb_iter;
788
789
790 skb_walk_frags(skb, skb_iter) {
791 is_decrypted &= skb_iter->decrypted;
792 is_encrypted &= !skb_iter->decrypted;
793 }
794
795 ctx->sw.decrypted |= is_decrypted;
796
797
798
799
800
801 if (is_decrypted) {
802 ctx->resync_nh_reset = 1;
803 return 0;
804 }
805 if (is_encrypted) {
806 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
807 return 0;
808 }
809
810 ctx->resync_nh_reset = 1;
811 return tls_device_reencrypt(sk, skb);
812}
813
814static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
815 struct net_device *netdev)
816{
817 if (sk->sk_destruct != tls_device_sk_destruct) {
818 refcount_set(&ctx->refcount, 1);
819 dev_hold(netdev);
820 ctx->netdev = netdev;
821 spin_lock_irq(&tls_device_lock);
822 list_add_tail(&ctx->list, &tls_device_list);
823 spin_unlock_irq(&tls_device_lock);
824
825 ctx->sk_destruct = sk->sk_destruct;
826 sk->sk_destruct = tls_device_sk_destruct;
827 }
828}
829
830int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
831{
832 u16 nonce_size, tag_size, iv_size, rec_seq_size;
833 struct tls_context *tls_ctx = tls_get_ctx(sk);
834 struct tls_prot_info *prot = &tls_ctx->prot_info;
835 struct tls_record_info *start_marker_record;
836 struct tls_offload_context_tx *offload_ctx;
837 struct tls_crypto_info *crypto_info;
838 struct net_device *netdev;
839 char *iv, *rec_seq;
840 struct sk_buff *skb;
841 int rc = -EINVAL;
842 __be64 rcd_sn;
843
844 if (!ctx)
845 goto out;
846
847 if (ctx->priv_ctx_tx) {
848 rc = -EEXIST;
849 goto out;
850 }
851
852 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
853 if (!start_marker_record) {
854 rc = -ENOMEM;
855 goto out;
856 }
857
858 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
859 if (!offload_ctx) {
860 rc = -ENOMEM;
861 goto free_marker_record;
862 }
863
864 crypto_info = &ctx->crypto_send.info;
865 if (crypto_info->version != TLS_1_2_VERSION) {
866 rc = -EOPNOTSUPP;
867 goto free_offload_ctx;
868 }
869
870 switch (crypto_info->cipher_type) {
871 case TLS_CIPHER_AES_GCM_128:
872 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
873 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
874 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
875 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
876 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
877 rec_seq =
878 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
879 break;
880 default:
881 rc = -EINVAL;
882 goto free_offload_ctx;
883 }
884
885
886 if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
887 rc = -EINVAL;
888 goto free_offload_ctx;
889 }
890
891 prot->version = crypto_info->version;
892 prot->cipher_type = crypto_info->cipher_type;
893 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
894 prot->tag_size = tag_size;
895 prot->overhead_size = prot->prepend_size + prot->tag_size;
896 prot->iv_size = iv_size;
897 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
898 GFP_KERNEL);
899 if (!ctx->tx.iv) {
900 rc = -ENOMEM;
901 goto free_offload_ctx;
902 }
903
904 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
905
906 prot->rec_seq_size = rec_seq_size;
907 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
908 if (!ctx->tx.rec_seq) {
909 rc = -ENOMEM;
910 goto free_iv;
911 }
912
913 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
914 if (rc)
915 goto free_rec_seq;
916
917
918 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
919 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
920
921 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
922 start_marker_record->len = 0;
923 start_marker_record->num_frags = 0;
924
925 INIT_LIST_HEAD(&offload_ctx->records_list);
926 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
927 spin_lock_init(&offload_ctx->lock);
928 sg_init_table(offload_ctx->sg_tx_data,
929 ARRAY_SIZE(offload_ctx->sg_tx_data));
930
931 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
932 ctx->push_pending_record = tls_device_push_pending_record;
933
934
935
936
937
938 skb = tcp_write_queue_tail(sk);
939 if (skb)
940 TCP_SKB_CB(skb)->eor = 1;
941
942
943
944
945
946
947 down_read(&device_offload_lock);
948 netdev = get_netdev_for_sock(sk);
949 if (!netdev) {
950 pr_err_ratelimited("%s: netdev not found\n", __func__);
951 rc = -EINVAL;
952 goto release_lock;
953 }
954
955 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
956 rc = -ENOTSUPP;
957 goto release_netdev;
958 }
959
960
961
962
963
964 if (!(netdev->flags & IFF_UP)) {
965 rc = -EINVAL;
966 goto release_netdev;
967 }
968
969 ctx->priv_ctx_tx = offload_ctx;
970 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
971 &ctx->crypto_send.info,
972 tcp_sk(sk)->write_seq);
973 if (rc)
974 goto release_netdev;
975
976 tls_device_attach(ctx, sk, netdev);
977
978
979
980
981
982 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
983 dev_put(netdev);
984 up_read(&device_offload_lock);
985 goto out;
986
987release_netdev:
988 dev_put(netdev);
989release_lock:
990 up_read(&device_offload_lock);
991 clean_acked_data_disable(inet_csk(sk));
992 crypto_free_aead(offload_ctx->aead_send);
993free_rec_seq:
994 kfree(ctx->tx.rec_seq);
995free_iv:
996 kfree(ctx->tx.iv);
997free_offload_ctx:
998 kfree(offload_ctx);
999 ctx->priv_ctx_tx = NULL;
1000free_marker_record:
1001 kfree(start_marker_record);
1002out:
1003 return rc;
1004}
1005
1006int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1007{
1008 struct tls_offload_context_rx *context;
1009 struct net_device *netdev;
1010 int rc = 0;
1011
1012 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1013 return -EOPNOTSUPP;
1014
1015
1016
1017
1018
1019
1020 down_read(&device_offload_lock);
1021 netdev = get_netdev_for_sock(sk);
1022 if (!netdev) {
1023 pr_err_ratelimited("%s: netdev not found\n", __func__);
1024 rc = -EINVAL;
1025 goto release_lock;
1026 }
1027
1028 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1029 rc = -ENOTSUPP;
1030 goto release_netdev;
1031 }
1032
1033
1034
1035
1036
1037 if (!(netdev->flags & IFF_UP)) {
1038 rc = -EINVAL;
1039 goto release_netdev;
1040 }
1041
1042 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
1043 if (!context) {
1044 rc = -ENOMEM;
1045 goto release_netdev;
1046 }
1047 context->resync_nh_reset = 1;
1048
1049 ctx->priv_ctx_rx = context;
1050 rc = tls_set_sw_offload(sk, ctx, 0);
1051 if (rc)
1052 goto release_ctx;
1053
1054 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
1055 &ctx->crypto_recv.info,
1056 tcp_sk(sk)->copied_seq);
1057 if (rc)
1058 goto free_sw_resources;
1059
1060 tls_device_attach(ctx, sk, netdev);
1061 goto release_netdev;
1062
1063free_sw_resources:
1064 up_read(&device_offload_lock);
1065 tls_sw_free_resources_rx(sk);
1066 down_read(&device_offload_lock);
1067release_ctx:
1068 ctx->priv_ctx_rx = NULL;
1069release_netdev:
1070 dev_put(netdev);
1071release_lock:
1072 up_read(&device_offload_lock);
1073 return rc;
1074}
1075
1076void tls_device_offload_cleanup_rx(struct sock *sk)
1077{
1078 struct tls_context *tls_ctx = tls_get_ctx(sk);
1079 struct net_device *netdev;
1080
1081 down_read(&device_offload_lock);
1082 netdev = tls_ctx->netdev;
1083 if (!netdev)
1084 goto out;
1085
1086 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1087 TLS_OFFLOAD_CTX_DIR_RX);
1088
1089 if (tls_ctx->tx_conf != TLS_HW) {
1090 dev_put(netdev);
1091 tls_ctx->netdev = NULL;
1092 }
1093out:
1094 up_read(&device_offload_lock);
1095 tls_sw_release_resources_rx(sk);
1096}
1097
1098static int tls_device_down(struct net_device *netdev)
1099{
1100 struct tls_context *ctx, *tmp;
1101 unsigned long flags;
1102 LIST_HEAD(list);
1103
1104
1105 down_write(&device_offload_lock);
1106
1107 spin_lock_irqsave(&tls_device_lock, flags);
1108 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1109 if (ctx->netdev != netdev ||
1110 !refcount_inc_not_zero(&ctx->refcount))
1111 continue;
1112
1113 list_move(&ctx->list, &list);
1114 }
1115 spin_unlock_irqrestore(&tls_device_lock, flags);
1116
1117 list_for_each_entry_safe(ctx, tmp, &list, list) {
1118 if (ctx->tx_conf == TLS_HW)
1119 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1120 TLS_OFFLOAD_CTX_DIR_TX);
1121 if (ctx->rx_conf == TLS_HW)
1122 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1123 TLS_OFFLOAD_CTX_DIR_RX);
1124 WRITE_ONCE(ctx->netdev, NULL);
1125 smp_mb__before_atomic();
1126 while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
1127 usleep_range(10, 200);
1128 dev_put(netdev);
1129 list_del_init(&ctx->list);
1130
1131 if (refcount_dec_and_test(&ctx->refcount))
1132 tls_device_free_ctx(ctx);
1133 }
1134
1135 up_write(&device_offload_lock);
1136
1137 flush_work(&tls_device_gc_work);
1138
1139 return NOTIFY_DONE;
1140}
1141
1142static int tls_dev_event(struct notifier_block *this, unsigned long event,
1143 void *ptr)
1144{
1145 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1146
1147 if (!dev->tlsdev_ops &&
1148 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1149 return NOTIFY_DONE;
1150
1151 switch (event) {
1152 case NETDEV_REGISTER:
1153 case NETDEV_FEAT_CHANGE:
1154 if ((dev->features & NETIF_F_HW_TLS_RX) &&
1155 !dev->tlsdev_ops->tls_dev_resync)
1156 return NOTIFY_BAD;
1157
1158 if (dev->tlsdev_ops &&
1159 dev->tlsdev_ops->tls_dev_add &&
1160 dev->tlsdev_ops->tls_dev_del)
1161 return NOTIFY_DONE;
1162 else
1163 return NOTIFY_BAD;
1164 case NETDEV_DOWN:
1165 return tls_device_down(dev);
1166 }
1167 return NOTIFY_DONE;
1168}
1169
1170static struct notifier_block tls_dev_notifier = {
1171 .notifier_call = tls_dev_event,
1172};
1173
1174void __init tls_device_init(void)
1175{
1176 register_netdevice_notifier(&tls_dev_notifier);
1177}
1178
1179void __exit tls_device_cleanup(void)
1180{
1181 unregister_netdevice_notifier(&tls_dev_notifier);
1182 flush_work(&tls_device_gc_work);
1183 clean_acked_data_flush();
1184}
1185