1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#ifndef _TLS_OFFLOAD_H
35#define _TLS_OFFLOAD_H
36
37#include <linux/types.h>
38#include <asm/byteorder.h>
39#include <linux/crypto.h>
40#include <linux/socket.h>
41#include <linux/tcp.h>
42#include <linux/skmsg.h>
43#include <linux/mutex.h>
44#include <linux/netdevice.h>
45#include <linux/rcupdate.h>
46
47#include <net/net_namespace.h>
48#include <net/tcp.h>
49#include <net/strparser.h>
50#include <crypto/aead.h>
51#include <uapi/linux/tls.h>
52
53
54
55#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
56
57#define TLS_HEADER_SIZE 5
58#define TLS_NONCE_OFFSET TLS_HEADER_SIZE
59
60#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
61
62#define TLS_RECORD_TYPE_DATA 0x17
63
64#define TLS_AAD_SPACE_SIZE 13
65
66#define MAX_IV_SIZE 16
67#define TLS_MAX_REC_SEQ_SIZE 8
68
69
70
71
72
73
74
75
76#define TLS_AES_CCM_IV_B0_BYTE 2
77
78#define __TLS_INC_STATS(net, field) \
79 __SNMP_INC_STATS((net)->mib.tls_statistics, field)
80#define TLS_INC_STATS(net, field) \
81 SNMP_INC_STATS((net)->mib.tls_statistics, field)
82#define __TLS_DEC_STATS(net, field) \
83 __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
84#define TLS_DEC_STATS(net, field) \
85 SNMP_DEC_STATS((net)->mib.tls_statistics, field)
86
87enum {
88 TLS_BASE,
89 TLS_SW,
90 TLS_HW,
91 TLS_HW_RECORD,
92 TLS_NUM_CONFIG,
93};
94
95
96
97
98
99struct tls_rec {
100 struct list_head list;
101 int tx_ready;
102 int tx_flags;
103
104 struct sk_msg msg_plaintext;
105 struct sk_msg msg_encrypted;
106
107
108 struct scatterlist sg_aead_in[2];
109
110 struct scatterlist sg_aead_out[2];
111
112 char content_type;
113 struct scatterlist sg_content_type;
114
115 char aad_space[TLS_AAD_SPACE_SIZE];
116 u8 iv_data[MAX_IV_SIZE];
117 struct aead_request aead_req;
118 u8 aead_req_ctx[];
119};
120
121struct tls_msg {
122 struct strp_msg rxm;
123 u8 control;
124};
125
126struct tx_work {
127 struct delayed_work work;
128 struct sock *sk;
129};
130
131struct tls_sw_context_tx {
132 struct crypto_aead *aead_send;
133 struct crypto_wait async_wait;
134 struct tx_work tx_work;
135 struct tls_rec *open_rec;
136 struct list_head tx_list;
137 atomic_t encrypt_pending;
138
139 spinlock_t encrypt_compl_lock;
140 int async_notify;
141 u8 async_capable:1;
142
143#define BIT_TX_SCHEDULED 0
144#define BIT_TX_CLOSING 1
145 unsigned long tx_bitmask;
146};
147
148struct tls_sw_context_rx {
149 struct crypto_aead *aead_recv;
150 struct crypto_wait async_wait;
151 struct strparser strp;
152 struct sk_buff_head rx_list;
153 void (*saved_data_ready)(struct sock *sk);
154
155 struct sk_buff *recv_pkt;
156 u8 control;
157 u8 async_capable:1;
158 u8 decrypted:1;
159 atomic_t decrypt_pending;
160
161 spinlock_t decrypt_compl_lock;
162 bool async_notify;
163};
164
165struct tls_record_info {
166 struct list_head list;
167 u32 end_seq;
168 int len;
169 int num_frags;
170 skb_frag_t frags[MAX_SKB_FRAGS];
171};
172
173struct tls_offload_context_tx {
174 struct crypto_aead *aead_send;
175 spinlock_t lock;
176 struct list_head records_list;
177 struct tls_record_info *open_record;
178 struct tls_record_info *retransmit_hint;
179 u64 hint_record_sn;
180 u64 unacked_record_sn;
181
182 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
183 void (*sk_destruct)(struct sock *sk);
184 u8 driver_state[] __aligned(8);
185
186
187
188
189#define TLS_DRIVER_STATE_SIZE_TX 16
190};
191
192#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
193 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
194
195enum tls_context_flags {
196
197
198
199
200 TLS_RX_DEV_DEGRADED = 0,
201
202
203
204
205 TLS_TX_SYNC_SCHED = 1,
206
207
208
209
210
211 TLS_RX_DEV_CLOSED = 2,
212};
213
214struct cipher_context {
215 char *iv;
216 char *rec_seq;
217};
218
219union tls_crypto_context {
220 struct tls_crypto_info info;
221 union {
222 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
223 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
224 struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
225 };
226};
227
228struct tls_prot_info {
229 u16 version;
230 u16 cipher_type;
231 u16 prepend_size;
232 u16 tag_size;
233 u16 overhead_size;
234 u16 iv_size;
235 u16 salt_size;
236 u16 rec_seq_size;
237 u16 aad_size;
238 u16 tail_size;
239};
240
241struct tls_context {
242
243 struct tls_prot_info prot_info;
244
245 u8 tx_conf:3;
246 u8 rx_conf:3;
247
248 int (*push_pending_record)(struct sock *sk, int flags);
249 void (*sk_write_space)(struct sock *sk);
250
251 void *priv_ctx_tx;
252 void *priv_ctx_rx;
253
254 struct net_device *netdev;
255
256
257 struct cipher_context tx;
258 struct cipher_context rx;
259
260 struct scatterlist *partially_sent_record;
261 u16 partially_sent_offset;
262
263 bool in_tcp_sendpages;
264 bool pending_open_record_frags;
265
266 struct mutex tx_lock;
267
268
269 unsigned long flags;
270
271
272 struct proto *sk_proto;
273 struct sock *sk;
274
275 void (*sk_destruct)(struct sock *sk);
276
277 union tls_crypto_context crypto_send;
278 union tls_crypto_context crypto_recv;
279
280 struct list_head list;
281 refcount_t refcount;
282 struct rcu_head rcu;
283};
284
285enum tls_offload_ctx_dir {
286 TLS_OFFLOAD_CTX_DIR_RX,
287 TLS_OFFLOAD_CTX_DIR_TX,
288};
289
290struct tlsdev_ops {
291 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
292 enum tls_offload_ctx_dir direction,
293 struct tls_crypto_info *crypto_info,
294 u32 start_offload_tcp_sn);
295 void (*tls_dev_del)(struct net_device *netdev,
296 struct tls_context *ctx,
297 enum tls_offload_ctx_dir direction);
298 int (*tls_dev_resync)(struct net_device *netdev,
299 struct sock *sk, u32 seq, u8 *rcd_sn,
300 enum tls_offload_ctx_dir direction);
301};
302
303enum tls_offload_sync_type {
304 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
305 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
306 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
307};
308
309#define TLS_DEVICE_RESYNC_NH_START_IVAL 2
310#define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
311
312#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
313struct tls_offload_resync_async {
314 atomic64_t req;
315 u16 loglen;
316 u16 rcd_delta;
317 u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
318};
319
320struct tls_offload_context_rx {
321
322 struct tls_sw_context_rx sw;
323 enum tls_offload_sync_type resync_type;
324
325 u8 resync_nh_reset:1;
326
327 u8 resync_nh_do_now:1;
328 union {
329
330 struct {
331 atomic64_t resync_req;
332 };
333
334 struct {
335 u32 decrypted_failed;
336 u32 decrypted_tgt;
337 } resync_nh;
338
339 struct {
340 struct tls_offload_resync_async *resync_async;
341 };
342 };
343 u8 driver_state[] __aligned(8);
344
345
346
347
348#define TLS_DRIVER_STATE_SIZE_RX 8
349};
350
351#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
352 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
353
354struct tls_context *tls_ctx_create(struct sock *sk);
355void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
356void update_sk_prot(struct sock *sk, struct tls_context *ctx);
357
358int wait_on_pending_writer(struct sock *sk, long *timeo);
359int tls_sk_query(struct sock *sk, int optname, char __user *optval,
360 int __user *optlen);
361int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
362 unsigned int optlen);
363
364int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
365void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
366void tls_sw_strparser_done(struct tls_context *tls_ctx);
367int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
368int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
369 int offset, size_t size, int flags);
370int tls_sw_sendpage(struct sock *sk, struct page *page,
371 int offset, size_t size, int flags);
372void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
373void tls_sw_release_resources_tx(struct sock *sk);
374void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
375void tls_sw_free_resources_rx(struct sock *sk);
376void tls_sw_release_resources_rx(struct sock *sk);
377void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
378int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
379 int nonblock, int flags, int *addr_len);
380bool tls_sw_stream_read(const struct sock *sk);
381ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
382 struct pipe_inode_info *pipe,
383 size_t len, unsigned int flags);
384
385int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
386int tls_device_sendpage(struct sock *sk, struct page *page,
387 int offset, size_t size, int flags);
388int tls_tx_records(struct sock *sk, int flags);
389
390struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
391 u32 seq, u64 *p_record_sn);
392
393static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
394{
395 return rec->len == 0;
396}
397
398static inline u32 tls_record_start_seq(struct tls_record_info *rec)
399{
400 return rec->end_seq - rec->len;
401}
402
403int tls_push_sg(struct sock *sk, struct tls_context *ctx,
404 struct scatterlist *sg, u16 first_offset,
405 int flags);
406int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
407 int flags);
408void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
409
410static inline struct tls_msg *tls_msg(struct sk_buff *skb)
411{
412 return (struct tls_msg *)strp_msg(skb);
413}
414
415static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
416{
417 return !!ctx->partially_sent_record;
418}
419
420static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
421{
422 return tls_ctx->pending_open_record_frags;
423}
424
425static inline bool is_tx_ready(struct tls_sw_context_tx *ctx)
426{
427 struct tls_rec *rec;
428
429 rec = list_first_entry(&ctx->tx_list, struct tls_rec, list);
430 if (!rec)
431 return false;
432
433 return READ_ONCE(rec->tx_ready);
434}
435
436static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
437{
438 u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
439
440 switch (config) {
441 case TLS_BASE:
442 return TLS_CONF_BASE;
443 case TLS_SW:
444 return TLS_CONF_SW;
445 case TLS_HW:
446 return TLS_CONF_HW;
447 case TLS_HW_RECORD:
448 return TLS_CONF_HW_RECORD;
449 }
450 return 0;
451}
452
453struct sk_buff *
454tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
455 struct sk_buff *skb);
456struct sk_buff *
457tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
458 struct sk_buff *skb);
459
460static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
461{
462#ifdef CONFIG_SOCK_VALIDATE_XMIT
463 return sk_fullsock(sk) &&
464 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
465 &tls_validate_xmit_skb);
466#else
467 return false;
468#endif
469}
470
471static inline void tls_err_abort(struct sock *sk, int err)
472{
473 sk->sk_err = err;
474 sk->sk_error_report(sk);
475}
476
477static inline bool tls_bigint_increment(unsigned char *seq, int len)
478{
479 int i;
480
481 for (i = len - 1; i >= 0; i--) {
482 ++seq[i];
483 if (seq[i] != 0)
484 break;
485 }
486
487 return (i == -1);
488}
489
490static inline void tls_bigint_subtract(unsigned char *seq, int n)
491{
492 u64 rcd_sn;
493 __be64 *p;
494
495 BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
496
497 p = (__be64 *)seq;
498 rcd_sn = be64_to_cpu(*p);
499 *p = cpu_to_be64(rcd_sn - n);
500}
501
502static inline struct tls_context *tls_get_ctx(const struct sock *sk)
503{
504 struct inet_connection_sock *icsk = inet_csk(sk);
505
506
507
508
509 return (__force void *)icsk->icsk_ulp_data;
510}
511
512static inline void tls_advance_record_sn(struct sock *sk,
513 struct tls_prot_info *prot,
514 struct cipher_context *ctx)
515{
516 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
517 tls_err_abort(sk, EBADMSG);
518
519 if (prot->version != TLS_1_3_VERSION &&
520 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
521 tls_bigint_increment(ctx->iv + prot->salt_size,
522 prot->iv_size);
523}
524
525static inline void tls_fill_prepend(struct tls_context *ctx,
526 char *buf,
527 size_t plaintext_len,
528 unsigned char record_type)
529{
530 struct tls_prot_info *prot = &ctx->prot_info;
531 size_t pkt_len, iv_size = prot->iv_size;
532
533 pkt_len = plaintext_len + prot->tag_size;
534 if (prot->version != TLS_1_3_VERSION &&
535 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) {
536 pkt_len += iv_size;
537
538 memcpy(buf + TLS_NONCE_OFFSET,
539 ctx->tx.iv + prot->salt_size, iv_size);
540 }
541
542
543
544
545 buf[0] = prot->version == TLS_1_3_VERSION ?
546 TLS_RECORD_TYPE_DATA : record_type;
547
548 buf[1] = TLS_1_2_VERSION_MINOR;
549 buf[2] = TLS_1_2_VERSION_MAJOR;
550
551 buf[3] = pkt_len >> 8;
552 buf[4] = pkt_len & 0xFF;
553}
554
555static inline void tls_make_aad(char *buf,
556 size_t size,
557 char *record_sequence,
558 unsigned char record_type,
559 struct tls_prot_info *prot)
560{
561 if (prot->version != TLS_1_3_VERSION) {
562 memcpy(buf, record_sequence, prot->rec_seq_size);
563 buf += 8;
564 } else {
565 size += prot->tag_size;
566 }
567
568 buf[0] = prot->version == TLS_1_3_VERSION ?
569 TLS_RECORD_TYPE_DATA : record_type;
570 buf[1] = TLS_1_2_VERSION_MAJOR;
571 buf[2] = TLS_1_2_VERSION_MINOR;
572 buf[3] = size >> 8;
573 buf[4] = size & 0xFF;
574}
575
576static inline void xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq)
577{
578 int i;
579
580 if (prot->version == TLS_1_3_VERSION ||
581 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
582 for (i = 0; i < 8; i++)
583 iv[i + 4] ^= seq[i];
584 }
585}
586
587
588static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
589 const struct tls_context *tls_ctx)
590{
591 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
592}
593
594static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
595 const struct tls_context *tls_ctx)
596{
597 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
598}
599
600static inline struct tls_offload_context_tx *
601tls_offload_ctx_tx(const struct tls_context *tls_ctx)
602{
603 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
604}
605
606static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
607{
608 struct tls_context *ctx = tls_get_ctx(sk);
609
610 if (!ctx)
611 return false;
612 return !!tls_sw_ctx_tx(ctx);
613}
614
615static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
616{
617 struct tls_context *ctx = tls_get_ctx(sk);
618
619 if (!ctx)
620 return false;
621 return !!tls_sw_ctx_rx(ctx);
622}
623
624void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
625void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
626
627static inline struct tls_offload_context_rx *
628tls_offload_ctx_rx(const struct tls_context *tls_ctx)
629{
630 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
631}
632
633#if IS_ENABLED(CONFIG_TLS_DEVICE)
634static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
635 enum tls_offload_ctx_dir direction)
636{
637 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
638 return tls_offload_ctx_tx(tls_ctx)->driver_state;
639 else
640 return tls_offload_ctx_rx(tls_ctx)->driver_state;
641}
642
643static inline void *
644tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
645{
646 return __tls_driver_ctx(tls_get_ctx(sk), direction);
647}
648#endif
649
650#define RESYNC_REQ BIT(0)
651#define RESYNC_REQ_ASYNC BIT(1)
652
653static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
654{
655 struct tls_context *tls_ctx = tls_get_ctx(sk);
656 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
657
658 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
659}
660
661
662static inline void
663tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
664{
665 struct tls_context *tls_ctx = tls_get_ctx(sk);
666 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
667
668 atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
669 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
670 rx_ctx->resync_async->loglen = 0;
671 rx_ctx->resync_async->rcd_delta = 0;
672}
673
674static inline void
675tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
676{
677 struct tls_context *tls_ctx = tls_get_ctx(sk);
678 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
679
680 atomic64_set(&rx_ctx->resync_async->req,
681 ((u64)ntohl(seq) << 32) | RESYNC_REQ);
682}
683
684static inline void
685tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
686{
687 struct tls_context *tls_ctx = tls_get_ctx(sk);
688
689 tls_offload_ctx_rx(tls_ctx)->resync_type = type;
690}
691
692
693static inline bool tls_offload_tx_resync_pending(struct sock *sk)
694{
695 struct tls_context *tls_ctx = tls_get_ctx(sk);
696 bool ret;
697
698 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
699 smp_mb__after_atomic();
700 return ret;
701}
702
703int __net_init tls_proc_init(struct net *net);
704void __net_exit tls_proc_fini(struct net *net);
705
706int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
707 unsigned char *record_type);
708int decrypt_skb(struct sock *sk, struct sk_buff *skb,
709 struct scatterlist *sgout);
710struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
711
712int tls_sw_fallback_init(struct sock *sk,
713 struct tls_offload_context_tx *offload_ctx,
714 struct tls_crypto_info *crypto_info);
715
716#ifdef CONFIG_TLS_DEVICE
717void tls_device_init(void);
718void tls_device_cleanup(void);
719void tls_device_sk_destruct(struct sock *sk);
720int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
721void tls_device_free_resources_tx(struct sock *sk);
722int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
723void tls_device_offload_cleanup_rx(struct sock *sk);
724void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
725void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
726int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
727 struct sk_buff *skb, struct strp_msg *rxm);
728
729static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
730{
731 if (!sk_fullsock(sk) ||
732 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
733 return false;
734 return tls_get_ctx(sk)->rx_conf == TLS_HW;
735}
736#else
737static inline void tls_device_init(void) {}
738static inline void tls_device_cleanup(void) {}
739
740static inline int
741tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
742{
743 return -EOPNOTSUPP;
744}
745
746static inline void tls_device_free_resources_tx(struct sock *sk) {}
747
748static inline int
749tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
750{
751 return -EOPNOTSUPP;
752}
753
754static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
755static inline void
756tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
757
758static inline int
759tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
760 struct sk_buff *skb, struct strp_msg *rxm)
761{
762 return 0;
763}
764#endif
765#endif
766