1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <crypto/aead.h>
38#include <crypto/aes.h>
39#include <crypto/rng.h>
40#include "crypto.h"
41#include "msg.h"
42#include "bcast.h"
43
44#define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000)
45#define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000)
46#define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000)
47#define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000)
48
49#define TIPC_MAX_TFMS_DEF 10
50#define TIPC_MAX_TFMS_LIM 1000
51
52#define TIPC_REKEYING_INTV_DEF (60 * 24)
53
54
55
56
57enum {
58 KEY_MASTER = 0,
59 KEY_MIN = KEY_MASTER,
60 KEY_1 = 1,
61 KEY_2,
62 KEY_3,
63 KEY_MAX = KEY_3,
64};
65
66
67
68
69enum {
70 STAT_OK,
71 STAT_NOK,
72 STAT_ASYNC,
73 STAT_ASYNC_OK,
74 STAT_ASYNC_NOK,
75 STAT_BADKEYS,
76 STAT_BADMSGS = STAT_BADKEYS,
77 STAT_NOKEYS,
78 STAT_SWITCHES,
79
80 MAX_STATS,
81};
82
83
84static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok",
85 "async_nok", "badmsgs", "nokeys",
86 "switches"};
87
88
89int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
90
91int sysctl_tipc_key_exchange_enabled __read_mostly = 1;
92
93
94
95
96
97
98
99
100
101struct tipc_key {
102#define KEY_BITS (2)
103#define KEY_MASK ((1 << KEY_BITS) - 1)
104 union {
105 struct {
106#if defined(__LITTLE_ENDIAN_BITFIELD)
107 u8 pending:2,
108 active:2,
109 passive:2,
110 reserved:2;
111#elif defined(__BIG_ENDIAN_BITFIELD)
112 u8 reserved:2,
113 passive:2,
114 active:2,
115 pending:2;
116#else
117#error "Please fix <asm/byteorder.h>"
118#endif
119 } __packed;
120 u8 keys;
121 };
122};
123
124
125
126
127
128
129struct tipc_tfm {
130 struct crypto_aead *tfm;
131 struct list_head list;
132};
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150struct tipc_aead {
151#define TIPC_AEAD_HINT_LEN (5)
152 struct tipc_tfm * __percpu *tfm_entry;
153 struct tipc_crypto *crypto;
154 struct tipc_aead *cloned;
155 atomic_t users;
156 u32 salt;
157 u8 authsize;
158 u8 mode;
159 char hint[2 * TIPC_AEAD_HINT_LEN + 1];
160 struct rcu_head rcu;
161 struct tipc_aead_key *key;
162 u16 gen;
163
164 atomic64_t seqno ____cacheline_aligned;
165 refcount_t refcnt ____cacheline_aligned;
166
167} ____cacheline_aligned;
168
169
170
171
172
173struct tipc_crypto_stats {
174 unsigned int stat[MAX_STATS];
175};
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203struct tipc_crypto {
204 struct net *net;
205 struct tipc_node *node;
206 struct tipc_aead __rcu *aead[KEY_MAX + 1];
207 atomic_t peer_rx_active;
208 u16 key_gen;
209 struct tipc_key key;
210 u8 skey_mode;
211 struct tipc_aead_key *skey;
212 struct workqueue_struct *wq;
213 struct delayed_work work;
214#define KEY_DISTR_SCHED 1
215#define KEY_DISTR_COMPL 2
216 atomic_t key_distr;
217 u32 rekeying_intv;
218
219 struct tipc_crypto_stats __percpu *stats;
220 char name[48];
221
222 atomic64_t sndnxt ____cacheline_aligned;
223 unsigned long timer1;
224 unsigned long timer2;
225 union {
226 struct {
227 u8 working:1;
228 u8 key_master:1;
229 u8 legacy_user:1;
230 u8 nokey: 1;
231 };
232 u8 flags;
233 };
234 spinlock_t lock;
235
236} ____cacheline_aligned;
237
238
239struct tipc_crypto_tx_ctx {
240 struct tipc_aead *aead;
241 struct tipc_bearer *bearer;
242 struct tipc_media_addr dst;
243};
244
245
246struct tipc_crypto_rx_ctx {
247 struct tipc_aead *aead;
248 struct tipc_bearer *bearer;
249};
250
251static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead);
252static inline void tipc_aead_put(struct tipc_aead *aead);
253static void tipc_aead_free(struct rcu_head *rp);
254static int tipc_aead_users(struct tipc_aead __rcu *aead);
255static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim);
256static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim);
257static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val);
258static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead);
259static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
260 u8 mode);
261static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src);
262static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
263 unsigned int crypto_ctx_size,
264 u8 **iv, struct aead_request **req,
265 struct scatterlist **sg, int nsg);
266static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
267 struct tipc_bearer *b,
268 struct tipc_media_addr *dst,
269 struct tipc_node *__dnode);
270static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err);
271static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
272 struct sk_buff *skb, struct tipc_bearer *b);
273static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err);
274static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr);
275static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
276 u8 tx_key, struct sk_buff *skb,
277 struct tipc_crypto *__rx);
278static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
279 u8 new_passive,
280 u8 new_active,
281 u8 new_pending);
282static int tipc_crypto_key_attach(struct tipc_crypto *c,
283 struct tipc_aead *aead, u8 pos,
284 bool master_key);
285static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
286static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
287 struct tipc_crypto *rx,
288 struct sk_buff *skb,
289 u8 tx_key);
290static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
291static int tipc_crypto_key_revoke(struct net *net, u8 tx_key);
292static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
293 struct tipc_bearer *b,
294 struct tipc_media_addr *dst,
295 struct tipc_node *__dnode, u8 type);
296static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
297 struct tipc_bearer *b,
298 struct sk_buff **skb, int err);
299static void tipc_crypto_do_cmd(struct net *net, int cmd);
300static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf);
301static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
302 char *buf);
303static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
304 u16 gen, u8 mode, u32 dnode);
305static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
306static void tipc_crypto_work_tx(struct work_struct *work);
307static void tipc_crypto_work_rx(struct work_struct *work);
308static int tipc_aead_key_generate(struct tipc_aead_key *skey);
309
310#define is_tx(crypto) (!(crypto)->node)
311#define is_rx(crypto) (!is_tx(crypto))
312
313#define key_next(cur) ((cur) % KEY_MAX + 1)
314
315#define tipc_aead_rcu_ptr(rcu_ptr, lock) \
316 rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock))
317
318#define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \
319do { \
320 struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \
321 lockdep_is_held(lock)); \
322 rcu_assign_pointer((rcu_ptr), (ptr)); \
323 tipc_aead_put(__tmp); \
324} while (0)
325
326#define tipc_crypto_key_detach(rcu_ptr, lock) \
327 tipc_aead_rcu_replace((rcu_ptr), NULL, lock)
328
329
330
331
332
333
334int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info)
335{
336 int keylen;
337
338
339 if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) {
340 GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)");
341 return -ENODEV;
342 }
343
344
345 if (strcmp(ukey->alg_name, "gcm(aes)")) {
346 GENL_SET_ERR_MSG(info, "not supported yet the algorithm");
347 return -ENOTSUPP;
348 }
349
350
351 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
352 if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 &&
353 keylen != TIPC_AES_GCM_KEY_SIZE_192 &&
354 keylen != TIPC_AES_GCM_KEY_SIZE_256)) {
355 GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)");
356 return -EKEYREJECTED;
357 }
358
359 return 0;
360}
361
362
363
364
365
366
367
368static int tipc_aead_key_generate(struct tipc_aead_key *skey)
369{
370 int rc = 0;
371
372
373 rc = crypto_get_default_rng();
374 if (likely(!rc)) {
375 rc = crypto_rng_get_bytes(crypto_default_rng, skey->key,
376 skey->keylen);
377 crypto_put_default_rng();
378 }
379
380 return rc;
381}
382
383static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
384{
385 struct tipc_aead *tmp;
386
387 rcu_read_lock();
388 tmp = rcu_dereference(aead);
389 if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt)))
390 tmp = NULL;
391 rcu_read_unlock();
392
393 return tmp;
394}
395
396static inline void tipc_aead_put(struct tipc_aead *aead)
397{
398 if (aead && refcount_dec_and_test(&aead->refcnt))
399 call_rcu(&aead->rcu, tipc_aead_free);
400}
401
402
403
404
405
406static void tipc_aead_free(struct rcu_head *rp)
407{
408 struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu);
409 struct tipc_tfm *tfm_entry, *head, *tmp;
410
411 if (aead->cloned) {
412 tipc_aead_put(aead->cloned);
413 } else {
414 head = *get_cpu_ptr(aead->tfm_entry);
415 put_cpu_ptr(aead->tfm_entry);
416 list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
417 crypto_free_aead(tfm_entry->tfm);
418 list_del(&tfm_entry->list);
419 kfree(tfm_entry);
420 }
421
422 crypto_free_aead(head->tfm);
423 list_del(&head->list);
424 kfree(head);
425 }
426 free_percpu(aead->tfm_entry);
427 kfree_sensitive(aead->key);
428 kfree(aead);
429}
430
431static int tipc_aead_users(struct tipc_aead __rcu *aead)
432{
433 struct tipc_aead *tmp;
434 int users = 0;
435
436 rcu_read_lock();
437 tmp = rcu_dereference(aead);
438 if (tmp)
439 users = atomic_read(&tmp->users);
440 rcu_read_unlock();
441
442 return users;
443}
444
445static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim)
446{
447 struct tipc_aead *tmp;
448
449 rcu_read_lock();
450 tmp = rcu_dereference(aead);
451 if (tmp)
452 atomic_add_unless(&tmp->users, 1, lim);
453 rcu_read_unlock();
454}
455
456static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim)
457{
458 struct tipc_aead *tmp;
459
460 rcu_read_lock();
461 tmp = rcu_dereference(aead);
462 if (tmp)
463 atomic_add_unless(&rcu_dereference(aead)->users, -1, lim);
464 rcu_read_unlock();
465}
466
467static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
468{
469 struct tipc_aead *tmp;
470 int cur;
471
472 rcu_read_lock();
473 tmp = rcu_dereference(aead);
474 if (tmp) {
475 do {
476 cur = atomic_read(&tmp->users);
477 if (cur == val)
478 break;
479 } while (atomic_cmpxchg(&tmp->users, cur, val) != cur);
480 }
481 rcu_read_unlock();
482}
483
484
485
486
487
488static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
489{
490 struct tipc_tfm **tfm_entry;
491 struct crypto_aead *tfm;
492
493 tfm_entry = get_cpu_ptr(aead->tfm_entry);
494 *tfm_entry = list_next_entry(*tfm_entry, list);
495 tfm = (*tfm_entry)->tfm;
496 put_cpu_ptr(tfm_entry);
497
498 return tfm;
499}
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
515 u8 mode)
516{
517 struct tipc_tfm *tfm_entry, *head;
518 struct crypto_aead *tfm;
519 struct tipc_aead *tmp;
520 int keylen, err, cpu;
521 int tfm_cnt = 0;
522
523 if (unlikely(*aead))
524 return -EEXIST;
525
526
527 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
528 if (unlikely(!tmp))
529 return -ENOMEM;
530
531
532 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
533
534
535 tmp->tfm_entry = alloc_percpu(struct tipc_tfm *);
536 if (!tmp->tfm_entry) {
537 kfree_sensitive(tmp);
538 return -ENOMEM;
539 }
540
541
542 do {
543 tfm = crypto_alloc_aead(ukey->alg_name, 0, 0);
544 if (IS_ERR(tfm)) {
545 err = PTR_ERR(tfm);
546 break;
547 }
548
549 if (unlikely(!tfm_cnt &&
550 crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) {
551 crypto_free_aead(tfm);
552 err = -ENOTSUPP;
553 break;
554 }
555
556 err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE);
557 err |= crypto_aead_setkey(tfm, ukey->key, keylen);
558 if (unlikely(err)) {
559 crypto_free_aead(tfm);
560 break;
561 }
562
563 tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL);
564 if (unlikely(!tfm_entry)) {
565 crypto_free_aead(tfm);
566 err = -ENOMEM;
567 break;
568 }
569 INIT_LIST_HEAD(&tfm_entry->list);
570 tfm_entry->tfm = tfm;
571
572
573 if (!tfm_cnt) {
574 head = tfm_entry;
575 for_each_possible_cpu(cpu) {
576 *per_cpu_ptr(tmp->tfm_entry, cpu) = head;
577 }
578 } else {
579 list_add_tail(&tfm_entry->list, &head->list);
580 }
581
582 } while (++tfm_cnt < sysctl_tipc_max_tfms);
583
584
585 if (!tfm_cnt) {
586 free_percpu(tmp->tfm_entry);
587 kfree_sensitive(tmp);
588 return err;
589 }
590
591
592 bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN,
593 TIPC_AEAD_HINT_LEN);
594
595
596 tmp->mode = mode;
597 tmp->cloned = NULL;
598 tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
599 tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
600 if (!tmp->key) {
601 tipc_aead_free(&tmp->rcu);
602 return -ENOMEM;
603 }
604 memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
605 atomic_set(&tmp->users, 0);
606 atomic64_set(&tmp->seqno, 0);
607 refcount_set(&tmp->refcnt, 1);
608
609 *aead = tmp;
610 return 0;
611}
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src)
627{
628 struct tipc_aead *aead;
629 int cpu;
630
631 if (!src)
632 return -ENOKEY;
633
634 if (src->mode != CLUSTER_KEY)
635 return -EINVAL;
636
637 if (unlikely(*dst))
638 return -EEXIST;
639
640 aead = kzalloc(sizeof(*aead), GFP_ATOMIC);
641 if (unlikely(!aead))
642 return -ENOMEM;
643
644 aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC);
645 if (unlikely(!aead->tfm_entry)) {
646 kfree_sensitive(aead);
647 return -ENOMEM;
648 }
649
650 for_each_possible_cpu(cpu) {
651 *per_cpu_ptr(aead->tfm_entry, cpu) =
652 *per_cpu_ptr(src->tfm_entry, cpu);
653 }
654
655 memcpy(aead->hint, src->hint, sizeof(src->hint));
656 aead->mode = src->mode;
657 aead->salt = src->salt;
658 aead->authsize = src->authsize;
659 atomic_set(&aead->users, 0);
660 atomic64_set(&aead->seqno, 0);
661 refcount_set(&aead->refcnt, 1);
662
663 WARN_ON(!refcount_inc_not_zero(&src->refcnt));
664 aead->cloned = src;
665
666 *dst = aead;
667 return 0;
668}
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
686 unsigned int crypto_ctx_size,
687 u8 **iv, struct aead_request **req,
688 struct scatterlist **sg, int nsg)
689{
690 unsigned int iv_size, req_size;
691 unsigned int len;
692 u8 *mem;
693
694 iv_size = crypto_aead_ivsize(tfm);
695 req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
696
697 len = crypto_ctx_size;
698 len += iv_size;
699 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
700 len = ALIGN(len, crypto_tfm_ctx_alignment());
701 len += req_size;
702 len = ALIGN(len, __alignof__(struct scatterlist));
703 len += nsg * sizeof(**sg);
704
705 mem = kmalloc(len, GFP_ATOMIC);
706 if (!mem)
707 return NULL;
708
709 *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size,
710 crypto_aead_alignmask(tfm) + 1);
711 *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
712 crypto_tfm_ctx_alignment());
713 *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
714 __alignof__(struct scatterlist));
715
716 return (void *)mem;
717}
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
733 struct tipc_bearer *b,
734 struct tipc_media_addr *dst,
735 struct tipc_node *__dnode)
736{
737 struct crypto_aead *tfm = tipc_aead_tfm_next(aead);
738 struct tipc_crypto_tx_ctx *tx_ctx;
739 struct aead_request *req;
740 struct sk_buff *trailer;
741 struct scatterlist *sg;
742 struct tipc_ehdr *ehdr;
743 int ehsz, len, tailen, nsg, rc;
744 void *ctx;
745 u32 salt;
746 u8 *iv;
747
748
749 len = ALIGN(skb->len, 4);
750 tailen = len - skb->len + aead->authsize;
751
752
753
754
755
756
757
758 SKB_LINEAR_ASSERT(skb);
759 if (tailen > skb_tailroom(skb)) {
760 pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n",
761 skb_tailroom(skb), tailen);
762 }
763
764 if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) {
765 nsg = 1;
766 trailer = skb;
767 } else {
768
769
770
771
772
773
774 nsg = skb_cow_data(skb, tailen, &trailer);
775 if (unlikely(nsg < 0)) {
776 pr_err("TX: skb_cow_data() returned %d\n", nsg);
777 return nsg;
778 }
779 }
780
781 pskb_put(skb, trailer, tailen);
782
783
784 ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg);
785 if (unlikely(!ctx))
786 return -ENOMEM;
787 TIPC_SKB_CB(skb)->crypto_ctx = ctx;
788
789
790 sg_init_table(sg, nsg);
791 rc = skb_to_sgvec(skb, sg, 0, skb->len);
792 if (unlikely(rc < 0)) {
793 pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg);
794 goto exit;
795 }
796
797
798
799
800
801
802 ehdr = (struct tipc_ehdr *)skb->data;
803 salt = aead->salt;
804 if (aead->mode == CLUSTER_KEY)
805 salt ^= __be32_to_cpu(ehdr->addr);
806 else if (__dnode)
807 salt ^= tipc_node_get_addr(__dnode);
808 memcpy(iv, &salt, 4);
809 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
810
811
812 ehsz = tipc_ehdr_size(ehdr);
813 aead_request_set_tfm(req, tfm);
814 aead_request_set_ad(req, ehsz);
815 aead_request_set_crypt(req, sg, sg, len - ehsz, iv);
816
817
818 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
819 tipc_aead_encrypt_done, skb);
820 tx_ctx = (struct tipc_crypto_tx_ctx *)ctx;
821 tx_ctx->aead = aead;
822 tx_ctx->bearer = b;
823 memcpy(&tx_ctx->dst, dst, sizeof(*dst));
824
825
826 if (unlikely(!tipc_bearer_hold(b))) {
827 rc = -ENODEV;
828 goto exit;
829 }
830
831
832 rc = crypto_aead_encrypt(req);
833 if (rc == -EINPROGRESS || rc == -EBUSY)
834 return rc;
835
836 tipc_bearer_put(b);
837
838exit:
839 kfree(ctx);
840 TIPC_SKB_CB(skb)->crypto_ctx = NULL;
841 return rc;
842}
843
844static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err)
845{
846 struct sk_buff *skb = base->data;
847 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
848 struct tipc_bearer *b = tx_ctx->bearer;
849 struct tipc_aead *aead = tx_ctx->aead;
850 struct tipc_crypto *tx = aead->crypto;
851 struct net *net = tx->net;
852
853 switch (err) {
854 case 0:
855 this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
856 rcu_read_lock();
857 if (likely(test_bit(0, &b->up)))
858 b->media->send_msg(net, skb, b, &tx_ctx->dst);
859 else
860 kfree_skb(skb);
861 rcu_read_unlock();
862 break;
863 case -EINPROGRESS:
864 return;
865 default:
866 this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]);
867 kfree_skb(skb);
868 break;
869 }
870
871 kfree(tx_ctx);
872 tipc_bearer_put(b);
873 tipc_aead_put(aead);
874}
875
876
877
878
879
880
881
882
883
884
885
886
887
888static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
889 struct sk_buff *skb, struct tipc_bearer *b)
890{
891 struct tipc_crypto_rx_ctx *rx_ctx;
892 struct aead_request *req;
893 struct crypto_aead *tfm;
894 struct sk_buff *unused;
895 struct scatterlist *sg;
896 struct tipc_ehdr *ehdr;
897 int ehsz, nsg, rc;
898 void *ctx;
899 u32 salt;
900 u8 *iv;
901
902 if (unlikely(!aead))
903 return -ENOKEY;
904
905 nsg = skb_cow_data(skb, 0, &unused);
906 if (unlikely(nsg < 0)) {
907 pr_err("RX: skb_cow_data() returned %d\n", nsg);
908 return nsg;
909 }
910
911
912 tfm = tipc_aead_tfm_next(aead);
913 ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg);
914 if (unlikely(!ctx))
915 return -ENOMEM;
916 TIPC_SKB_CB(skb)->crypto_ctx = ctx;
917
918
919 sg_init_table(sg, nsg);
920 rc = skb_to_sgvec(skb, sg, 0, skb->len);
921 if (unlikely(rc < 0)) {
922 pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg);
923 goto exit;
924 }
925
926
927 ehdr = (struct tipc_ehdr *)skb->data;
928 salt = aead->salt;
929 if (aead->mode == CLUSTER_KEY)
930 salt ^= __be32_to_cpu(ehdr->addr);
931 else if (ehdr->destined)
932 salt ^= tipc_own_addr(net);
933 memcpy(iv, &salt, 4);
934 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
935
936
937 ehsz = tipc_ehdr_size(ehdr);
938 aead_request_set_tfm(req, tfm);
939 aead_request_set_ad(req, ehsz);
940 aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv);
941
942
943 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
944 tipc_aead_decrypt_done, skb);
945 rx_ctx = (struct tipc_crypto_rx_ctx *)ctx;
946 rx_ctx->aead = aead;
947 rx_ctx->bearer = b;
948
949
950 if (unlikely(!tipc_bearer_hold(b))) {
951 rc = -ENODEV;
952 goto exit;
953 }
954
955
956 rc = crypto_aead_decrypt(req);
957 if (rc == -EINPROGRESS || rc == -EBUSY)
958 return rc;
959
960 tipc_bearer_put(b);
961
962exit:
963 kfree(ctx);
964 TIPC_SKB_CB(skb)->crypto_ctx = NULL;
965 return rc;
966}
967
968static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err)
969{
970 struct sk_buff *skb = base->data;
971 struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
972 struct tipc_bearer *b = rx_ctx->bearer;
973 struct tipc_aead *aead = rx_ctx->aead;
974 struct tipc_crypto_stats __percpu *stats = aead->crypto->stats;
975 struct net *net = aead->crypto->net;
976
977 switch (err) {
978 case 0:
979 this_cpu_inc(stats->stat[STAT_ASYNC_OK]);
980 break;
981 case -EINPROGRESS:
982 return;
983 default:
984 this_cpu_inc(stats->stat[STAT_ASYNC_NOK]);
985 break;
986 }
987
988 kfree(rx_ctx);
989 tipc_crypto_rcv_complete(net, aead, b, &skb, err);
990 if (likely(skb)) {
991 if (likely(test_bit(0, &b->up)))
992 tipc_rcv(net, skb, b);
993 else
994 kfree_skb(skb);
995 }
996
997 tipc_bearer_put(b);
998}
999
1000static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr)
1001{
1002 return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
1003}
1004
1005
1006
1007
1008
1009
1010
1011bool tipc_ehdr_validate(struct sk_buff *skb)
1012{
1013 struct tipc_ehdr *ehdr;
1014 int ehsz;
1015
1016 if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE)))
1017 return false;
1018
1019 ehdr = (struct tipc_ehdr *)skb->data;
1020 if (unlikely(ehdr->version != TIPC_EVERSION))
1021 return false;
1022 ehsz = tipc_ehdr_size(ehdr);
1023 if (unlikely(!pskb_may_pull(skb, ehsz)))
1024 return false;
1025 if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE))
1026 return false;
1027
1028 return true;
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
1042 u8 tx_key, struct sk_buff *skb,
1043 struct tipc_crypto *__rx)
1044{
1045 struct tipc_msg *hdr = buf_msg(skb);
1046 struct tipc_ehdr *ehdr;
1047 u32 user = msg_user(hdr);
1048 u64 seqno;
1049 int ehsz;
1050
1051
1052 ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
1053 WARN_ON(skb_headroom(skb) < ehsz);
1054 ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz);
1055
1056
1057
1058
1059
1060 if (!__rx || aead->mode == CLUSTER_KEY)
1061 seqno = atomic64_inc_return(&aead->seqno);
1062 else
1063 seqno = atomic64_inc_return(&__rx->sndnxt);
1064
1065
1066 if (unlikely(!seqno))
1067 return tipc_crypto_key_revoke(net, tx_key);
1068
1069
1070 ehdr->seqno = cpu_to_be64(seqno);
1071
1072
1073 ehdr->version = TIPC_EVERSION;
1074 ehdr->user = 0;
1075 ehdr->keepalive = 0;
1076 ehdr->tx_key = tx_key;
1077 ehdr->destined = (__rx) ? 1 : 0;
1078 ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
1079 ehdr->rx_nokey = (__rx) ? __rx->nokey : 0;
1080 ehdr->master_key = aead->crypto->key_master;
1081 ehdr->reserved_1 = 0;
1082 ehdr->reserved_2 = 0;
1083
1084 switch (user) {
1085 case LINK_CONFIG:
1086 ehdr->user = LINK_CONFIG;
1087 memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN);
1088 break;
1089 default:
1090 if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) {
1091 ehdr->user = LINK_PROTOCOL;
1092 ehdr->keepalive = msg_is_keepalive(hdr);
1093 }
1094 ehdr->addr = hdr->hdr[3];
1095 break;
1096 }
1097
1098 return ehsz;
1099}
1100
1101static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
1102 u8 new_passive,
1103 u8 new_active,
1104 u8 new_pending)
1105{
1106 struct tipc_key old = c->key;
1107 char buf[32];
1108
1109 c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
1110 ((new_active & KEY_MASK) << (KEY_BITS)) |
1111 ((new_pending & KEY_MASK));
1112
1113 pr_debug("%s: key changing %s ::%pS\n", c->name,
1114 tipc_key_change_dump(old, c->key, buf),
1115 __builtin_return_address(0));
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
1131 u8 mode, bool master_key)
1132{
1133 struct tipc_aead *aead = NULL;
1134 int rc = 0;
1135
1136
1137 rc = tipc_aead_init(&aead, ukey, mode);
1138
1139
1140 if (likely(!rc)) {
1141 rc = tipc_crypto_key_attach(c, aead, 0, master_key);
1142 if (rc < 0)
1143 tipc_aead_free(&aead->rcu);
1144 }
1145
1146 return rc;
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static int tipc_crypto_key_attach(struct tipc_crypto *c,
1159 struct tipc_aead *aead, u8 pos,
1160 bool master_key)
1161{
1162 struct tipc_key key;
1163 int rc = -EBUSY;
1164 u8 new_key;
1165
1166 spin_lock_bh(&c->lock);
1167 key = c->key;
1168 if (master_key) {
1169 new_key = KEY_MASTER;
1170 goto attach;
1171 }
1172 if (key.active && key.passive)
1173 goto exit;
1174 if (key.pending) {
1175 if (tipc_aead_users(c->aead[key.pending]) > 0)
1176 goto exit;
1177
1178
1179 new_key = key.pending;
1180 } else {
1181 if (pos) {
1182 if (key.active && pos != key_next(key.active)) {
1183 key.passive = pos;
1184 new_key = pos;
1185 goto attach;
1186 } else if (!key.active && !key.passive) {
1187 key.pending = pos;
1188 new_key = pos;
1189 goto attach;
1190 }
1191 }
1192 key.pending = key_next(key.active ?: key.passive);
1193 new_key = key.pending;
1194 }
1195
1196attach:
1197 aead->crypto = c;
1198 aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen;
1199 tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock);
1200 if (likely(c->key.keys != key.keys))
1201 tipc_crypto_key_set_state(c, key.passive, key.active,
1202 key.pending);
1203 c->working = 1;
1204 c->nokey = 0;
1205 c->key_master |= master_key;
1206 rc = new_key;
1207
1208exit:
1209 spin_unlock_bh(&c->lock);
1210 return rc;
1211}
1212
1213void tipc_crypto_key_flush(struct tipc_crypto *c)
1214{
1215 struct tipc_crypto *tx, *rx;
1216 int k;
1217
1218 spin_lock_bh(&c->lock);
1219 if (is_rx(c)) {
1220
1221 rx = c;
1222 tx = tipc_net(rx->net)->crypto_tx;
1223 if (cancel_delayed_work(&rx->work)) {
1224 kfree(rx->skey);
1225 rx->skey = NULL;
1226 atomic_xchg(&rx->key_distr, 0);
1227 tipc_node_put(rx->node);
1228 }
1229
1230 k = atomic_xchg(&rx->peer_rx_active, 0);
1231 if (k) {
1232 tipc_aead_users_dec(tx->aead[k], 0);
1233
1234 tx->timer1 = jiffies;
1235 }
1236 }
1237
1238 c->flags = 0;
1239 tipc_crypto_key_set_state(c, 0, 0, 0);
1240 for (k = KEY_MIN; k <= KEY_MAX; k++)
1241 tipc_crypto_key_detach(c->aead[k], &c->lock);
1242 atomic64_set(&c->sndnxt, 0);
1243 spin_unlock_bh(&c->lock);
1244}
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
1260{
1261 struct tipc_aead *tmp1, *tmp2 = NULL;
1262 struct tipc_key key;
1263 bool aligned = false;
1264 u8 new_passive = 0;
1265 int x;
1266
1267 spin_lock(&rx->lock);
1268 key = rx->key;
1269 if (key.pending == new_pending) {
1270 aligned = true;
1271 goto exit;
1272 }
1273 if (key.active)
1274 goto exit;
1275 if (!key.pending)
1276 goto exit;
1277 if (tipc_aead_users(rx->aead[key.pending]) > 0)
1278 goto exit;
1279
1280
1281 tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock);
1282 if (!refcount_dec_if_one(&tmp1->refcnt))
1283 goto exit;
1284 rcu_assign_pointer(rx->aead[key.pending], NULL);
1285
1286
1287 if (key.passive) {
1288 tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock));
1289 x = (key.passive - key.pending + new_pending) % KEY_MAX;
1290 new_passive = (x <= 0) ? x + KEY_MAX : x;
1291 }
1292
1293
1294 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
1295 rcu_assign_pointer(rx->aead[new_pending], tmp1);
1296 if (new_passive)
1297 rcu_assign_pointer(rx->aead[new_passive], tmp2);
1298 refcount_set(&tmp1->refcnt, 1);
1299 aligned = true;
1300 pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending,
1301 new_pending);
1302
1303exit:
1304 spin_unlock(&rx->lock);
1305 return aligned;
1306}
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
1322 struct tipc_crypto *rx,
1323 struct sk_buff *skb,
1324 u8 tx_key)
1325{
1326 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb);
1327 struct tipc_aead *aead = NULL;
1328 struct tipc_key key = tx->key;
1329 u8 k, i = 0;
1330
1331
1332 if (!skb_cb->tx_clone_deferred) {
1333 skb_cb->tx_clone_deferred = 1;
1334 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
1335 }
1336
1337 skb_cb->tx_clone_ctx.rx = rx;
1338 if (++skb_cb->tx_clone_ctx.recurs > 2)
1339 return NULL;
1340
1341
1342 spin_lock(&tx->lock);
1343 if (tx_key == KEY_MASTER) {
1344 aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock);
1345 goto done;
1346 }
1347 do {
1348 k = (i == 0) ? key.pending :
1349 ((i == 1) ? key.active : key.passive);
1350 if (!k)
1351 continue;
1352 aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock);
1353 if (!aead)
1354 continue;
1355 if (aead->mode != CLUSTER_KEY ||
1356 aead == skb_cb->tx_clone_ctx.last) {
1357 aead = NULL;
1358 continue;
1359 }
1360
1361 skb_cb->tx_clone_ctx.last = aead;
1362 WARN_ON(skb->next);
1363 skb->next = skb_clone(skb, GFP_ATOMIC);
1364 if (unlikely(!skb->next))
1365 pr_warn("Failed to clone skb for next round if any\n");
1366 break;
1367 } while (++i < 3);
1368
1369done:
1370 if (likely(aead))
1371 WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
1372 spin_unlock(&tx->lock);
1373
1374 return aead;
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb)
1393{
1394 struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb);
1395 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
1396 struct tipc_msg *hdr = buf_msg(skb);
1397 u32 self = tipc_own_addr(rx->net);
1398 u8 cur, new;
1399 unsigned long delay;
1400
1401
1402
1403
1404 rx->key_master = ehdr->master_key;
1405 if (!rx->key_master)
1406 tx->legacy_user = 1;
1407
1408
1409 if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self)
1410 return;
1411
1412
1413 if (ehdr->rx_nokey) {
1414
1415 tx->timer2 = jiffies;
1416
1417 if (tx->key.keys &&
1418 !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) {
1419 get_random_bytes(&delay, 2);
1420 delay %= 5;
1421 delay = msecs_to_jiffies(500 * ++delay);
1422 if (queue_delayed_work(tx->wq, &rx->work, delay))
1423 tipc_node_get(rx->node);
1424 }
1425 } else {
1426
1427 atomic_xchg(&rx->key_distr, 0);
1428 }
1429
1430
1431 cur = atomic_read(&rx->peer_rx_active);
1432 new = ehdr->rx_key_active;
1433 if (tx->key.keys &&
1434 cur != new &&
1435 atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) {
1436 if (new)
1437 tipc_aead_users_inc(tx->aead[new], INT_MAX);
1438 if (cur)
1439 tipc_aead_users_dec(tx->aead[cur], 0);
1440
1441 atomic64_set(&rx->sndnxt, 0);
1442
1443 tx->timer1 = jiffies;
1444
1445 pr_debug("%s: key users changed %d-- %d++, peer %s\n",
1446 tx->name, cur, new, rx->name);
1447 }
1448}
1449
1450static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
1451{
1452 struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
1453 struct tipc_key key;
1454
1455 spin_lock(&tx->lock);
1456 key = tx->key;
1457 WARN_ON(!key.active || tx_key != key.active);
1458
1459
1460 tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
1461 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
1462 spin_unlock(&tx->lock);
1463
1464 pr_warn("%s: key is revoked\n", tx->name);
1465 return -EKEYREVOKED;
1466}
1467
1468int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
1469 struct tipc_node *node)
1470{
1471 struct tipc_crypto *c;
1472
1473 if (*crypto)
1474 return -EEXIST;
1475
1476
1477 c = kzalloc(sizeof(*c), GFP_ATOMIC);
1478 if (!c)
1479 return -ENOMEM;
1480
1481
1482 if (!node) {
1483 c->wq = alloc_ordered_workqueue("tipc_crypto", 0);
1484 if (!c->wq) {
1485 kfree(c);
1486 return -ENOMEM;
1487 }
1488 }
1489
1490
1491 c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
1492 if (!c->stats) {
1493 if (c->wq)
1494 destroy_workqueue(c->wq);
1495 kfree_sensitive(c);
1496 return -ENOMEM;
1497 }
1498
1499 c->flags = 0;
1500 c->net = net;
1501 c->node = node;
1502 get_random_bytes(&c->key_gen, 2);
1503 tipc_crypto_key_set_state(c, 0, 0, 0);
1504 atomic_set(&c->key_distr, 0);
1505 atomic_set(&c->peer_rx_active, 0);
1506 atomic64_set(&c->sndnxt, 0);
1507 c->timer1 = jiffies;
1508 c->timer2 = jiffies;
1509 c->rekeying_intv = TIPC_REKEYING_INTV_DEF;
1510 spin_lock_init(&c->lock);
1511 scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX",
1512 (is_rx(c)) ? tipc_node_get_id_str(c->node) :
1513 tipc_own_id_string(c->net));
1514
1515 if (is_rx(c))
1516 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx);
1517 else
1518 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx);
1519
1520 *crypto = c;
1521 return 0;
1522}
1523
1524void tipc_crypto_stop(struct tipc_crypto **crypto)
1525{
1526 struct tipc_crypto *c = *crypto;
1527 u8 k;
1528
1529 if (!c)
1530 return;
1531
1532
1533 if (is_tx(c)) {
1534 c->rekeying_intv = 0;
1535 cancel_delayed_work_sync(&c->work);
1536 destroy_workqueue(c->wq);
1537 }
1538
1539
1540 rcu_read_lock();
1541 for (k = KEY_MIN; k <= KEY_MAX; k++)
1542 tipc_aead_put(rcu_dereference(c->aead[k]));
1543 rcu_read_unlock();
1544 pr_debug("%s: has been stopped\n", c->name);
1545
1546
1547 free_percpu(c->stats);
1548
1549 *crypto = NULL;
1550 kfree_sensitive(c);
1551}
1552
1553void tipc_crypto_timeout(struct tipc_crypto *rx)
1554{
1555 struct tipc_net *tn = tipc_net(rx->net);
1556 struct tipc_crypto *tx = tn->crypto_tx;
1557 struct tipc_key key;
1558 int cmd;
1559
1560
1561 spin_lock(&tx->lock);
1562 key = tx->key;
1563 if (key.active && tipc_aead_users(tx->aead[key.active]) > 0)
1564 goto s1;
1565 if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0)
1566 goto s1;
1567 if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME))
1568 goto s1;
1569
1570 tipc_crypto_key_set_state(tx, key.passive, key.pending, 0);
1571 if (key.active)
1572 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
1573 this_cpu_inc(tx->stats->stat[STAT_SWITCHES]);
1574 pr_info("%s: key[%d] is activated\n", tx->name, key.pending);
1575
1576s1:
1577 spin_unlock(&tx->lock);
1578
1579
1580 spin_lock(&rx->lock);
1581 key = rx->key;
1582 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0)
1583 goto s2;
1584
1585 if (key.active)
1586 key.passive = key.active;
1587 key.active = key.pending;
1588 rx->timer2 = jiffies;
1589 tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
1590 this_cpu_inc(rx->stats->stat[STAT_SWITCHES]);
1591 pr_info("%s: key[%d] is activated\n", rx->name, key.pending);
1592 goto s5;
1593
1594s2:
1595
1596 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10)
1597 goto s3;
1598
1599 tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
1600 tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock);
1601 pr_debug("%s: key[%d] is removed\n", rx->name, key.pending);
1602 goto s5;
1603
1604s3:
1605
1606 if (!key.active)
1607 goto s4;
1608 if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) &&
1609 tipc_aead_users(rx->aead[key.active]) > 0)
1610 goto s4;
1611
1612 if (key.pending)
1613 key.passive = key.active;
1614 else
1615 key.pending = key.active;
1616 rx->timer2 = jiffies;
1617 tipc_crypto_key_set_state(rx, key.passive, 0, key.pending);
1618 tipc_aead_users_set(rx->aead[key.pending], 0);
1619 pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active);
1620 goto s5;
1621
1622s4:
1623
1624 if (!key.passive)
1625 goto s5;
1626 if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) &&
1627 tipc_aead_users(rx->aead[key.passive]) > -10)
1628 goto s5;
1629
1630 tipc_crypto_key_set_state(rx, 0, key.active, key.pending);
1631 tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock);
1632 pr_debug("%s: key[%d] is freed\n", rx->name, key.passive);
1633
1634s5:
1635 spin_unlock(&rx->lock);
1636
1637
1638
1639
1640 if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD))
1641 tx->legacy_user = 0;
1642
1643
1644 if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM))
1645 return;
1646
1647 cmd = sysctl_tipc_max_tfms;
1648 sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF;
1649 tipc_crypto_do_cmd(rx->net, cmd);
1650}
1651
1652static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
1653 struct tipc_bearer *b,
1654 struct tipc_media_addr *dst,
1655 struct tipc_node *__dnode, u8 type)
1656{
1657 struct sk_buff *skb;
1658
1659 skb = skb_clone(_skb, GFP_ATOMIC);
1660 if (skb) {
1661 TIPC_SKB_CB(skb)->xmit_type = type;
1662 tipc_crypto_xmit(net, &skb, b, dst, __dnode);
1663 if (skb)
1664 b->media->send_msg(net, skb, b, dst);
1665 }
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
1692 struct tipc_bearer *b, struct tipc_media_addr *dst,
1693 struct tipc_node *__dnode)
1694{
1695 struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode);
1696 struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
1697 struct tipc_crypto_stats __percpu *stats = tx->stats;
1698 struct tipc_msg *hdr = buf_msg(*skb);
1699 struct tipc_key key = tx->key;
1700 struct tipc_aead *aead = NULL;
1701 u32 user = msg_user(hdr);
1702 u32 type = msg_type(hdr);
1703 int rc = -ENOKEY;
1704 u8 tx_key = 0;
1705
1706
1707 if (!tx->working)
1708 return 0;
1709
1710
1711 if (unlikely(key.pending)) {
1712 tx_key = key.pending;
1713 if (!tx->key_master && !key.active)
1714 goto encrypt;
1715 if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key)
1716 goto encrypt;
1717 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) {
1718 pr_debug("%s: probing for key[%d]\n", tx->name,
1719 key.pending);
1720 goto encrypt;
1721 }
1722 if (user == LINK_CONFIG || user == LINK_PROTOCOL)
1723 tipc_crypto_clone_msg(net, *skb, b, dst, __dnode,
1724 SKB_PROBING);
1725 }
1726
1727
1728 if (tx->key_master) {
1729 tx_key = KEY_MASTER;
1730 if (!key.active)
1731 goto encrypt;
1732 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) {
1733 pr_debug("%s: gracing for msg (%d %d)\n", tx->name,
1734 user, type);
1735 goto encrypt;
1736 }
1737 if (user == LINK_CONFIG ||
1738 (user == LINK_PROTOCOL && type == RESET_MSG) ||
1739 (user == MSG_CRYPTO && type == KEY_DISTR_MSG) ||
1740 time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) {
1741 if (__rx && __rx->key_master &&
1742 !atomic_read(&__rx->peer_rx_active))
1743 goto encrypt;
1744 if (!__rx) {
1745 if (likely(!tx->legacy_user))
1746 goto encrypt;
1747 tipc_crypto_clone_msg(net, *skb, b, dst,
1748 __dnode, SKB_GRACING);
1749 }
1750 }
1751 }
1752
1753
1754 if (likely(key.active)) {
1755 tx_key = key.active;
1756 goto encrypt;
1757 }
1758
1759 goto exit;
1760
1761encrypt:
1762 aead = tipc_aead_get(tx->aead[tx_key]);
1763 if (unlikely(!aead))
1764 goto exit;
1765 rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx);
1766 if (likely(rc > 0))
1767 rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode);
1768
1769exit:
1770 switch (rc) {
1771 case 0:
1772 this_cpu_inc(stats->stat[STAT_OK]);
1773 break;
1774 case -EINPROGRESS:
1775 case -EBUSY:
1776 this_cpu_inc(stats->stat[STAT_ASYNC]);
1777 *skb = NULL;
1778 return rc;
1779 default:
1780 this_cpu_inc(stats->stat[STAT_NOK]);
1781 if (rc == -ENOKEY)
1782 this_cpu_inc(stats->stat[STAT_NOKEYS]);
1783 else if (rc == -EKEYREVOKED)
1784 this_cpu_inc(stats->stat[STAT_BADKEYS]);
1785 kfree_skb(*skb);
1786 *skb = NULL;
1787 break;
1788 }
1789
1790 tipc_aead_put(aead);
1791 return rc;
1792}
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
1817 struct sk_buff **skb, struct tipc_bearer *b)
1818{
1819 struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
1820 struct tipc_crypto_stats __percpu *stats;
1821 struct tipc_aead *aead = NULL;
1822 struct tipc_key key;
1823 int rc = -ENOKEY;
1824 u8 tx_key, n;
1825
1826 tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
1827
1828
1829
1830
1831 if (unlikely(!rx || tx_key == KEY_MASTER))
1832 goto pick_tx;
1833
1834
1835 key = rx->key;
1836 if (tx_key == key.active || tx_key == key.pending ||
1837 tx_key == key.passive)
1838 goto decrypt;
1839
1840
1841 if (tipc_crypto_key_try_align(rx, tx_key))
1842 goto decrypt;
1843
1844pick_tx:
1845
1846 aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key);
1847 if (aead)
1848 goto decrypt;
1849 goto exit;
1850
1851decrypt:
1852 rcu_read_lock();
1853 if (!aead)
1854 aead = tipc_aead_get(rx->aead[tx_key]);
1855 rc = tipc_aead_decrypt(net, aead, *skb, b);
1856 rcu_read_unlock();
1857
1858exit:
1859 stats = ((rx) ?: tx)->stats;
1860 switch (rc) {
1861 case 0:
1862 this_cpu_inc(stats->stat[STAT_OK]);
1863 break;
1864 case -EINPROGRESS:
1865 case -EBUSY:
1866 this_cpu_inc(stats->stat[STAT_ASYNC]);
1867 *skb = NULL;
1868 return rc;
1869 default:
1870 this_cpu_inc(stats->stat[STAT_NOK]);
1871 if (rc == -ENOKEY) {
1872 kfree_skb(*skb);
1873 *skb = NULL;
1874 if (rx) {
1875
1876
1877
1878
1879 n = key_next(tx_key);
1880 rx->nokey = !(rx->skey ||
1881 rcu_access_pointer(rx->aead[n]));
1882 pr_debug_ratelimited("%s: nokey %d, key %d/%x\n",
1883 rx->name, rx->nokey,
1884 tx_key, rx->key.keys);
1885 tipc_node_put(rx->node);
1886 }
1887 this_cpu_inc(stats->stat[STAT_NOKEYS]);
1888 return rc;
1889 } else if (rc == -EBADMSG) {
1890 this_cpu_inc(stats->stat[STAT_BADMSGS]);
1891 }
1892 break;
1893 }
1894
1895 tipc_crypto_rcv_complete(net, aead, b, skb, rc);
1896 return rc;
1897}
1898
1899static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
1900 struct tipc_bearer *b,
1901 struct sk_buff **skb, int err)
1902{
1903 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb);
1904 struct tipc_crypto *rx = aead->crypto;
1905 struct tipc_aead *tmp = NULL;
1906 struct tipc_ehdr *ehdr;
1907 struct tipc_node *n;
1908
1909
1910 if (unlikely(is_tx(aead->crypto))) {
1911 rx = skb_cb->tx_clone_ctx.rx;
1912 pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
1913 (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
1914 (*skb)->next, skb_cb->flags);
1915 pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
1916 skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
1917 aead->crypto->aead[1], aead->crypto->aead[2],
1918 aead->crypto->aead[3]);
1919 if (unlikely(err)) {
1920 if (err == -EBADMSG && (*skb)->next)
1921 tipc_rcv(net, (*skb)->next, b);
1922 goto free_skb;
1923 }
1924
1925 if (likely((*skb)->next)) {
1926 kfree_skb((*skb)->next);
1927 (*skb)->next = NULL;
1928 }
1929 ehdr = (struct tipc_ehdr *)(*skb)->data;
1930 if (!rx) {
1931 WARN_ON(ehdr->user != LINK_CONFIG);
1932 n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0,
1933 true);
1934 rx = tipc_node_crypto_rx(n);
1935 if (unlikely(!rx))
1936 goto free_skb;
1937 }
1938
1939
1940 if (ehdr->tx_key == KEY_MASTER)
1941 goto rcv;
1942 if (tipc_aead_clone(&tmp, aead) < 0)
1943 goto rcv;
1944 WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
1945 if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
1946 tipc_aead_free(&tmp->rcu);
1947 goto rcv;
1948 }
1949 tipc_aead_put(aead);
1950 aead = tmp;
1951 }
1952
1953 if (unlikely(err)) {
1954 tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN);
1955 goto free_skb;
1956 }
1957
1958
1959 tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1);
1960
1961
1962 rx->timer1 = jiffies;
1963
1964rcv:
1965
1966 ehdr = (struct tipc_ehdr *)(*skb)->data;
1967
1968
1969 if (rx->key.passive && ehdr->tx_key == rx->key.passive)
1970 rx->timer2 = jiffies;
1971
1972 skb_reset_network_header(*skb);
1973 skb_pull(*skb, tipc_ehdr_size(ehdr));
1974 pskb_trim(*skb, (*skb)->len - aead->authsize);
1975
1976
1977 if (unlikely(!tipc_msg_validate(skb))) {
1978 pr_err_ratelimited("Packet dropped after decryption!\n");
1979 goto free_skb;
1980 }
1981
1982
1983 tipc_crypto_key_synch(rx, *skb);
1984
1985
1986 skb_cb->decrypted = 1;
1987
1988
1989 if (likely(!skb_cb->tx_clone_deferred))
1990 goto exit;
1991 skb_cb->tx_clone_deferred = 0;
1992 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
1993 goto exit;
1994
1995free_skb:
1996 kfree_skb(*skb);
1997 *skb = NULL;
1998
1999exit:
2000 tipc_aead_put(aead);
2001 if (rx)
2002 tipc_node_put(rx->node);
2003}
2004
2005static void tipc_crypto_do_cmd(struct net *net, int cmd)
2006{
2007 struct tipc_net *tn = tipc_net(net);
2008 struct tipc_crypto *tx = tn->crypto_tx, *rx;
2009 struct list_head *p;
2010 unsigned int stat;
2011 int i, j, cpu;
2012 char buf[200];
2013
2014
2015 switch (cmd) {
2016 case 0xfff1:
2017 goto print_stats;
2018 default:
2019 return;
2020 }
2021
2022print_stats:
2023
2024 pr_info("\n=============== TIPC Crypto Statistics ===============\n\n");
2025
2026
2027 pr_info("Key status:\n");
2028 pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net),
2029 tipc_crypto_key_dump(tx, buf));
2030
2031 rcu_read_lock();
2032 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
2033 rx = tipc_node_crypto_rx_by_list(p);
2034 pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node),
2035 tipc_crypto_key_dump(rx, buf));
2036 }
2037 rcu_read_unlock();
2038
2039
2040 for (i = 0, j = 0; i < MAX_STATS; i++)
2041 j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]);
2042 pr_info("Counter %s", buf);
2043
2044 memset(buf, '-', 115);
2045 buf[115] = '\0';
2046 pr_info("%s\n", buf);
2047
2048 j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net));
2049 for_each_possible_cpu(cpu) {
2050 for (i = 0; i < MAX_STATS; i++) {
2051 stat = per_cpu_ptr(tx->stats, cpu)->stat[i];
2052 j += scnprintf(buf + j, 200 - j, "|%11d ", stat);
2053 }
2054 pr_info("%s", buf);
2055 j = scnprintf(buf, 200, "%12s", " ");
2056 }
2057
2058 rcu_read_lock();
2059 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
2060 rx = tipc_node_crypto_rx_by_list(p);
2061 j = scnprintf(buf, 200, "RX(%7.7s) ",
2062 tipc_node_get_id_str(rx->node));
2063 for_each_possible_cpu(cpu) {
2064 for (i = 0; i < MAX_STATS; i++) {
2065 stat = per_cpu_ptr(rx->stats, cpu)->stat[i];
2066 j += scnprintf(buf + j, 200 - j, "|%11d ",
2067 stat);
2068 }
2069 pr_info("%s", buf);
2070 j = scnprintf(buf, 200, "%12s", " ");
2071 }
2072 }
2073 rcu_read_unlock();
2074
2075 pr_info("\n======================== Done ========================\n");
2076}
2077
2078static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
2079{
2080 struct tipc_key key = c->key;
2081 struct tipc_aead *aead;
2082 int k, i = 0;
2083 char *s;
2084
2085 for (k = KEY_MIN; k <= KEY_MAX; k++) {
2086 if (k == KEY_MASTER) {
2087 if (is_rx(c))
2088 continue;
2089 if (time_before(jiffies,
2090 c->timer2 + TIPC_TX_GRACE_PERIOD))
2091 s = "ACT";
2092 else
2093 s = "PAS";
2094 } else {
2095 if (k == key.passive)
2096 s = "PAS";
2097 else if (k == key.active)
2098 s = "ACT";
2099 else if (k == key.pending)
2100 s = "PEN";
2101 else
2102 s = "-";
2103 }
2104 i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s);
2105
2106 rcu_read_lock();
2107 aead = rcu_dereference(c->aead[k]);
2108 if (aead)
2109 i += scnprintf(buf + i, 200 - i,
2110 "{\"0x...%s\", \"%s\"}/%d:%d",
2111 aead->hint,
2112 (aead->mode == CLUSTER_KEY) ? "c" : "p",
2113 atomic_read(&aead->users),
2114 refcount_read(&aead->refcnt));
2115 rcu_read_unlock();
2116 i += scnprintf(buf + i, 200 - i, "\n");
2117 }
2118
2119 if (is_rx(c))
2120 i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n",
2121 atomic_read(&c->peer_rx_active));
2122
2123 return buf;
2124}
2125
2126static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
2127 char *buf)
2128{
2129 struct tipc_key *key = &old;
2130 int k, i = 0;
2131 char *s;
2132
2133
2134again:
2135 i += scnprintf(buf + i, 32 - i, "[");
2136 for (k = KEY_1; k <= KEY_3; k++) {
2137 if (k == key->passive)
2138 s = "pas";
2139 else if (k == key->active)
2140 s = "act";
2141 else if (k == key->pending)
2142 s = "pen";
2143 else
2144 s = "-";
2145 i += scnprintf(buf + i, 32 - i,
2146 (k != KEY_3) ? "%s " : "%s", s);
2147 }
2148 if (key != &new) {
2149 i += scnprintf(buf + i, 32 - i, "] -> ");
2150 key = &new;
2151 goto again;
2152 }
2153 i += scnprintf(buf + i, 32 - i, "]");
2154 return buf;
2155}
2156
2157
2158
2159
2160
2161
2162void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb)
2163{
2164 struct tipc_crypto *rx;
2165 struct tipc_msg *hdr;
2166
2167 if (unlikely(skb_linearize(skb)))
2168 goto exit;
2169
2170 hdr = buf_msg(skb);
2171 rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr));
2172 if (unlikely(!rx))
2173 goto exit;
2174
2175 switch (msg_type(hdr)) {
2176 case KEY_DISTR_MSG:
2177 if (tipc_crypto_key_rcv(rx, hdr))
2178 goto exit;
2179 break;
2180 default:
2181 break;
2182 }
2183
2184 tipc_node_put(rx->node);
2185
2186exit:
2187 kfree_skb(skb);
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
2199 struct tipc_node *dest)
2200{
2201 struct tipc_aead *aead;
2202 u32 dnode = tipc_node_get_addr(dest);
2203 int rc = -ENOKEY;
2204
2205 if (!sysctl_tipc_key_exchange_enabled)
2206 return 0;
2207
2208 if (key) {
2209 rcu_read_lock();
2210 aead = tipc_aead_get(tx->aead[key]);
2211 if (likely(aead)) {
2212 rc = tipc_crypto_key_xmit(tx->net, aead->key,
2213 aead->gen, aead->mode,
2214 dnode);
2215 tipc_aead_put(aead);
2216 }
2217 rcu_read_unlock();
2218 }
2219
2220 return rc;
2221}
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
2237 u16 gen, u8 mode, u32 dnode)
2238{
2239 struct sk_buff_head pkts;
2240 struct tipc_msg *hdr;
2241 struct sk_buff *skb;
2242 u16 size, cong_link_cnt;
2243 u8 *data;
2244 int rc;
2245
2246 size = tipc_aead_key_size(skey);
2247 skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
2248 if (!skb)
2249 return -ENOMEM;
2250
2251 hdr = buf_msg(skb);
2252 tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG,
2253 INT_H_SIZE, dnode);
2254 msg_set_size(hdr, INT_H_SIZE + size);
2255 msg_set_key_gen(hdr, gen);
2256 msg_set_key_mode(hdr, mode);
2257
2258 data = msg_data(hdr);
2259 *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen);
2260 memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME);
2261 memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key,
2262 skey->keylen);
2263
2264 __skb_queue_head_init(&pkts);
2265 __skb_queue_tail(&pkts, skb);
2266 if (dnode)
2267 rc = tipc_node_xmit(net, &pkts, dnode, 0);
2268 else
2269 rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt);
2270
2271 return rc;
2272}
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
2286{
2287 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
2288 struct tipc_aead_key *skey = NULL;
2289 u16 key_gen = msg_key_gen(hdr);
2290 u16 size = msg_data_sz(hdr);
2291 u8 *data = msg_data(hdr);
2292 unsigned int keylen;
2293
2294
2295 if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) {
2296 pr_debug("%s: message data size is too small\n", rx->name);
2297 goto exit;
2298 }
2299
2300 keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
2301
2302
2303 if (unlikely(size != keylen + sizeof(struct tipc_aead_key) ||
2304 keylen > TIPC_AEAD_KEY_SIZE_MAX)) {
2305 pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
2306 goto exit;
2307 }
2308
2309 spin_lock(&rx->lock);
2310 if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
2311 pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
2312 rx->skey, key_gen, rx->key_gen);
2313 goto exit_unlock;
2314 }
2315
2316
2317 skey = kmalloc(size, GFP_ATOMIC);
2318 if (unlikely(!skey)) {
2319 pr_err("%s: unable to allocate memory for skey\n", rx->name);
2320 goto exit_unlock;
2321 }
2322
2323
2324 skey->keylen = keylen;
2325 memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
2326 memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
2327 skey->keylen);
2328
2329 rx->key_gen = key_gen;
2330 rx->skey_mode = msg_key_mode(hdr);
2331 rx->skey = skey;
2332 rx->nokey = 0;
2333 mb();
2334
2335exit_unlock:
2336 spin_unlock(&rx->lock);
2337
2338exit:
2339
2340 if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
2341 return true;
2342
2343 return false;
2344}
2345
2346
2347
2348
2349
2350
2351
2352
2353static void tipc_crypto_work_rx(struct work_struct *work)
2354{
2355 struct delayed_work *dwork = to_delayed_work(work);
2356 struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work);
2357 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
2358 unsigned long delay = msecs_to_jiffies(5000);
2359 bool resched = false;
2360 u8 key;
2361 int rc;
2362
2363
2364 if (atomic_cmpxchg(&rx->key_distr,
2365 KEY_DISTR_SCHED,
2366 KEY_DISTR_COMPL) == KEY_DISTR_SCHED) {
2367
2368 key = tx->key.pending ?: tx->key.active;
2369 rc = tipc_crypto_key_distr(tx, key, rx->node);
2370 if (unlikely(rc))
2371 pr_warn("%s: unable to distr key[%d] to %s, err %d\n",
2372 tx->name, key, tipc_node_get_id_str(rx->node),
2373 rc);
2374
2375
2376 resched = true;
2377 } else {
2378 atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0);
2379 }
2380
2381
2382 if (rx->skey) {
2383 rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false);
2384 if (unlikely(rc < 0))
2385 pr_warn("%s: unable to attach received skey, err %d\n",
2386 rx->name, rc);
2387 switch (rc) {
2388 case -EBUSY:
2389 case -ENOMEM:
2390
2391 resched = true;
2392 break;
2393 default:
2394 synchronize_rcu();
2395 kfree(rx->skey);
2396 rx->skey = NULL;
2397 break;
2398 }
2399 }
2400
2401 if (resched && queue_delayed_work(tx->wq, &rx->work, delay))
2402 return;
2403
2404 tipc_node_put(rx->node);
2405}
2406
2407
2408
2409
2410
2411
2412
2413void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
2414 u32 new_intv)
2415{
2416 unsigned long delay;
2417 bool now = false;
2418
2419 if (changed) {
2420 if (new_intv == TIPC_REKEYING_NOW)
2421 now = true;
2422 else
2423 tx->rekeying_intv = new_intv;
2424 cancel_delayed_work_sync(&tx->work);
2425 }
2426
2427 if (tx->rekeying_intv || now) {
2428 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000;
2429 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay));
2430 }
2431}
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442static void tipc_crypto_work_tx(struct work_struct *work)
2443{
2444 struct delayed_work *dwork = to_delayed_work(work);
2445 struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work);
2446 struct tipc_aead_key *skey = NULL;
2447 struct tipc_key key = tx->key;
2448 struct tipc_aead *aead;
2449 int rc = -ENOMEM;
2450
2451 if (unlikely(key.pending))
2452 goto resched;
2453
2454
2455 rcu_read_lock();
2456 aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]);
2457 if (unlikely(!aead)) {
2458 rcu_read_unlock();
2459
2460 return;
2461 }
2462
2463
2464 skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
2465 rcu_read_unlock();
2466
2467
2468 if (likely(skey)) {
2469 rc = tipc_aead_key_generate(skey) ?:
2470 tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false);
2471 if (likely(rc > 0))
2472 rc = tipc_crypto_key_distr(tx, rc, NULL);
2473 kfree_sensitive(skey);
2474 }
2475
2476 if (unlikely(rc))
2477 pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc);
2478
2479resched:
2480
2481 tipc_crypto_rekeying_sched(tx, false, 0);
2482}
2483