1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <crypto/aead.h>
38#include <crypto/aes.h>
39#include <crypto/rng.h>
40#include "crypto.h"
41#include "msg.h"
42#include "bcast.h"
43
44#define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000)
45#define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000)
46#define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000)
47#define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000)
48
49#define TIPC_MAX_TFMS_DEF 10
50#define TIPC_MAX_TFMS_LIM 1000
51
52#define TIPC_REKEYING_INTV_DEF (60 * 24)
53
54
55
56
57enum {
58 KEY_MASTER = 0,
59 KEY_MIN = KEY_MASTER,
60 KEY_1 = 1,
61 KEY_2,
62 KEY_3,
63 KEY_MAX = KEY_3,
64};
65
66
67
68
69enum {
70 STAT_OK,
71 STAT_NOK,
72 STAT_ASYNC,
73 STAT_ASYNC_OK,
74 STAT_ASYNC_NOK,
75 STAT_BADKEYS,
76 STAT_BADMSGS = STAT_BADKEYS,
77 STAT_NOKEYS,
78 STAT_SWITCHES,
79
80 MAX_STATS,
81};
82
83
84static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok",
85 "async_nok", "badmsgs", "nokeys",
86 "switches"};
87
88
89int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
90
91int sysctl_tipc_key_exchange_enabled __read_mostly = 1;
92
93
94
95
96
97
98
99
100
101struct tipc_key {
102#define KEY_BITS (2)
103#define KEY_MASK ((1 << KEY_BITS) - 1)
104 union {
105 struct {
106#if defined(__LITTLE_ENDIAN_BITFIELD)
107 u8 pending:2,
108 active:2,
109 passive:2,
110 reserved:2;
111#elif defined(__BIG_ENDIAN_BITFIELD)
112 u8 reserved:2,
113 passive:2,
114 active:2,
115 pending:2;
116#else
117#error "Please fix <asm/byteorder.h>"
118#endif
119 } __packed;
120 u8 keys;
121 };
122};
123
124
125
126
127
128
129struct tipc_tfm {
130 struct crypto_aead *tfm;
131 struct list_head list;
132};
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150struct tipc_aead {
151#define TIPC_AEAD_HINT_LEN (5)
152 struct tipc_tfm * __percpu *tfm_entry;
153 struct tipc_crypto *crypto;
154 struct tipc_aead *cloned;
155 atomic_t users;
156 u32 salt;
157 u8 authsize;
158 u8 mode;
159 char hint[2 * TIPC_AEAD_HINT_LEN + 1];
160 struct rcu_head rcu;
161 struct tipc_aead_key *key;
162 u16 gen;
163
164 atomic64_t seqno ____cacheline_aligned;
165 refcount_t refcnt ____cacheline_aligned;
166
167} ____cacheline_aligned;
168
169
170
171
172
173struct tipc_crypto_stats {
174 unsigned int stat[MAX_STATS];
175};
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203struct tipc_crypto {
204 struct net *net;
205 struct tipc_node *node;
206 struct tipc_aead __rcu *aead[KEY_MAX + 1];
207 atomic_t peer_rx_active;
208 u16 key_gen;
209 struct tipc_key key;
210 u8 skey_mode;
211 struct tipc_aead_key *skey;
212 struct workqueue_struct *wq;
213 struct delayed_work work;
214#define KEY_DISTR_SCHED 1
215#define KEY_DISTR_COMPL 2
216 atomic_t key_distr;
217 u32 rekeying_intv;
218
219 struct tipc_crypto_stats __percpu *stats;
220 char name[48];
221
222 atomic64_t sndnxt ____cacheline_aligned;
223 unsigned long timer1;
224 unsigned long timer2;
225 union {
226 struct {
227 u8 working:1;
228 u8 key_master:1;
229 u8 legacy_user:1;
230 u8 nokey: 1;
231 };
232 u8 flags;
233 };
234 spinlock_t lock;
235
236} ____cacheline_aligned;
237
238
239struct tipc_crypto_tx_ctx {
240 struct tipc_aead *aead;
241 struct tipc_bearer *bearer;
242 struct tipc_media_addr dst;
243};
244
245
246struct tipc_crypto_rx_ctx {
247 struct tipc_aead *aead;
248 struct tipc_bearer *bearer;
249};
250
251static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead);
252static inline void tipc_aead_put(struct tipc_aead *aead);
253static void tipc_aead_free(struct rcu_head *rp);
254static int tipc_aead_users(struct tipc_aead __rcu *aead);
255static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim);
256static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim);
257static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val);
258static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead);
259static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
260 u8 mode);
261static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src);
262static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
263 unsigned int crypto_ctx_size,
264 u8 **iv, struct aead_request **req,
265 struct scatterlist **sg, int nsg);
266static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
267 struct tipc_bearer *b,
268 struct tipc_media_addr *dst,
269 struct tipc_node *__dnode);
270static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err);
271static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
272 struct sk_buff *skb, struct tipc_bearer *b);
273static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err);
274static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr);
275static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
276 u8 tx_key, struct sk_buff *skb,
277 struct tipc_crypto *__rx);
278static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
279 u8 new_passive,
280 u8 new_active,
281 u8 new_pending);
282static int tipc_crypto_key_attach(struct tipc_crypto *c,
283 struct tipc_aead *aead, u8 pos,
284 bool master_key);
285static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
286static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
287 struct tipc_crypto *rx,
288 struct sk_buff *skb,
289 u8 tx_key);
290static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
291static int tipc_crypto_key_revoke(struct net *net, u8 tx_key);
292static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
293 struct tipc_bearer *b,
294 struct tipc_media_addr *dst,
295 struct tipc_node *__dnode, u8 type);
296static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
297 struct tipc_bearer *b,
298 struct sk_buff **skb, int err);
299static void tipc_crypto_do_cmd(struct net *net, int cmd);
300static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf);
301static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
302 char *buf);
303static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
304 u16 gen, u8 mode, u32 dnode);
305static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
306static void tipc_crypto_work_tx(struct work_struct *work);
307static void tipc_crypto_work_rx(struct work_struct *work);
308static int tipc_aead_key_generate(struct tipc_aead_key *skey);
309
310#define is_tx(crypto) (!(crypto)->node)
311#define is_rx(crypto) (!is_tx(crypto))
312
313#define key_next(cur) ((cur) % KEY_MAX + 1)
314
315#define tipc_aead_rcu_ptr(rcu_ptr, lock) \
316 rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock))
317
318#define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \
319do { \
320 struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \
321 lockdep_is_held(lock)); \
322 rcu_assign_pointer((rcu_ptr), (ptr)); \
323 tipc_aead_put(__tmp); \
324} while (0)
325
326#define tipc_crypto_key_detach(rcu_ptr, lock) \
327 tipc_aead_rcu_replace((rcu_ptr), NULL, lock)
328
329
330
331
332
333
334int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info)
335{
336 int keylen;
337
338
339 if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) {
340 GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)");
341 return -ENODEV;
342 }
343
344
345 if (strcmp(ukey->alg_name, "gcm(aes)")) {
346 GENL_SET_ERR_MSG(info, "not supported yet the algorithm");
347 return -ENOTSUPP;
348 }
349
350
351 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
352 if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 &&
353 keylen != TIPC_AES_GCM_KEY_SIZE_192 &&
354 keylen != TIPC_AES_GCM_KEY_SIZE_256)) {
355 GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)");
356 return -EKEYREJECTED;
357 }
358
359 return 0;
360}
361
362
363
364
365
366
367
368static int tipc_aead_key_generate(struct tipc_aead_key *skey)
369{
370 int rc = 0;
371
372
373 rc = crypto_get_default_rng();
374 if (likely(!rc)) {
375 rc = crypto_rng_get_bytes(crypto_default_rng, skey->key,
376 skey->keylen);
377 crypto_put_default_rng();
378 }
379
380 return rc;
381}
382
383static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
384{
385 struct tipc_aead *tmp;
386
387 rcu_read_lock();
388 tmp = rcu_dereference(aead);
389 if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt)))
390 tmp = NULL;
391 rcu_read_unlock();
392
393 return tmp;
394}
395
396static inline void tipc_aead_put(struct tipc_aead *aead)
397{
398 if (aead && refcount_dec_and_test(&aead->refcnt))
399 call_rcu(&aead->rcu, tipc_aead_free);
400}
401
402
403
404
405
406static void tipc_aead_free(struct rcu_head *rp)
407{
408 struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu);
409 struct tipc_tfm *tfm_entry, *head, *tmp;
410
411 if (aead->cloned) {
412 tipc_aead_put(aead->cloned);
413 } else {
414 head = *get_cpu_ptr(aead->tfm_entry);
415 put_cpu_ptr(aead->tfm_entry);
416 list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
417 crypto_free_aead(tfm_entry->tfm);
418 list_del(&tfm_entry->list);
419 kfree(tfm_entry);
420 }
421
422 crypto_free_aead(head->tfm);
423 list_del(&head->list);
424 kfree(head);
425 }
426 free_percpu(aead->tfm_entry);
427 kfree_sensitive(aead->key);
428 kfree(aead);
429}
430
431static int tipc_aead_users(struct tipc_aead __rcu *aead)
432{
433 struct tipc_aead *tmp;
434 int users = 0;
435
436 rcu_read_lock();
437 tmp = rcu_dereference(aead);
438 if (tmp)
439 users = atomic_read(&tmp->users);
440 rcu_read_unlock();
441
442 return users;
443}
444
445static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim)
446{
447 struct tipc_aead *tmp;
448
449 rcu_read_lock();
450 tmp = rcu_dereference(aead);
451 if (tmp)
452 atomic_add_unless(&tmp->users, 1, lim);
453 rcu_read_unlock();
454}
455
456static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim)
457{
458 struct tipc_aead *tmp;
459
460 rcu_read_lock();
461 tmp = rcu_dereference(aead);
462 if (tmp)
463 atomic_add_unless(&rcu_dereference(aead)->users, -1, lim);
464 rcu_read_unlock();
465}
466
467static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
468{
469 struct tipc_aead *tmp;
470 int cur;
471
472 rcu_read_lock();
473 tmp = rcu_dereference(aead);
474 if (tmp) {
475 do {
476 cur = atomic_read(&tmp->users);
477 if (cur == val)
478 break;
479 } while (atomic_cmpxchg(&tmp->users, cur, val) != cur);
480 }
481 rcu_read_unlock();
482}
483
484
485
486
487
488static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
489{
490 struct tipc_tfm **tfm_entry;
491 struct crypto_aead *tfm;
492
493 tfm_entry = get_cpu_ptr(aead->tfm_entry);
494 *tfm_entry = list_next_entry(*tfm_entry, list);
495 tfm = (*tfm_entry)->tfm;
496 put_cpu_ptr(tfm_entry);
497
498 return tfm;
499}
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
515 u8 mode)
516{
517 struct tipc_tfm *tfm_entry, *head;
518 struct crypto_aead *tfm;
519 struct tipc_aead *tmp;
520 int keylen, err, cpu;
521 int tfm_cnt = 0;
522
523 if (unlikely(*aead))
524 return -EEXIST;
525
526
527 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
528 if (unlikely(!tmp))
529 return -ENOMEM;
530
531
532 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
533
534
535 tmp->tfm_entry = alloc_percpu(struct tipc_tfm *);
536 if (!tmp->tfm_entry) {
537 kfree_sensitive(tmp);
538 return -ENOMEM;
539 }
540
541
542 do {
543 tfm = crypto_alloc_aead(ukey->alg_name, 0, 0);
544 if (IS_ERR(tfm)) {
545 err = PTR_ERR(tfm);
546 break;
547 }
548
549 if (unlikely(!tfm_cnt &&
550 crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) {
551 crypto_free_aead(tfm);
552 err = -ENOTSUPP;
553 break;
554 }
555
556 err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE);
557 err |= crypto_aead_setkey(tfm, ukey->key, keylen);
558 if (unlikely(err)) {
559 crypto_free_aead(tfm);
560 break;
561 }
562
563 tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL);
564 if (unlikely(!tfm_entry)) {
565 crypto_free_aead(tfm);
566 err = -ENOMEM;
567 break;
568 }
569 INIT_LIST_HEAD(&tfm_entry->list);
570 tfm_entry->tfm = tfm;
571
572
573 if (!tfm_cnt) {
574 head = tfm_entry;
575 for_each_possible_cpu(cpu) {
576 *per_cpu_ptr(tmp->tfm_entry, cpu) = head;
577 }
578 } else {
579 list_add_tail(&tfm_entry->list, &head->list);
580 }
581
582 } while (++tfm_cnt < sysctl_tipc_max_tfms);
583
584
585 if (!tfm_cnt) {
586 free_percpu(tmp->tfm_entry);
587 kfree_sensitive(tmp);
588 return err;
589 }
590
591
592 bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN,
593 TIPC_AEAD_HINT_LEN);
594
595
596 tmp->mode = mode;
597 tmp->cloned = NULL;
598 tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
599 tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
600 memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
601 atomic_set(&tmp->users, 0);
602 atomic64_set(&tmp->seqno, 0);
603 refcount_set(&tmp->refcnt, 1);
604
605 *aead = tmp;
606 return 0;
607}
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src)
623{
624 struct tipc_aead *aead;
625 int cpu;
626
627 if (!src)
628 return -ENOKEY;
629
630 if (src->mode != CLUSTER_KEY)
631 return -EINVAL;
632
633 if (unlikely(*dst))
634 return -EEXIST;
635
636 aead = kzalloc(sizeof(*aead), GFP_ATOMIC);
637 if (unlikely(!aead))
638 return -ENOMEM;
639
640 aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC);
641 if (unlikely(!aead->tfm_entry)) {
642 kfree_sensitive(aead);
643 return -ENOMEM;
644 }
645
646 for_each_possible_cpu(cpu) {
647 *per_cpu_ptr(aead->tfm_entry, cpu) =
648 *per_cpu_ptr(src->tfm_entry, cpu);
649 }
650
651 memcpy(aead->hint, src->hint, sizeof(src->hint));
652 aead->mode = src->mode;
653 aead->salt = src->salt;
654 aead->authsize = src->authsize;
655 atomic_set(&aead->users, 0);
656 atomic64_set(&aead->seqno, 0);
657 refcount_set(&aead->refcnt, 1);
658
659 WARN_ON(!refcount_inc_not_zero(&src->refcnt));
660 aead->cloned = src;
661
662 *dst = aead;
663 return 0;
664}
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
682 unsigned int crypto_ctx_size,
683 u8 **iv, struct aead_request **req,
684 struct scatterlist **sg, int nsg)
685{
686 unsigned int iv_size, req_size;
687 unsigned int len;
688 u8 *mem;
689
690 iv_size = crypto_aead_ivsize(tfm);
691 req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
692
693 len = crypto_ctx_size;
694 len += iv_size;
695 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
696 len = ALIGN(len, crypto_tfm_ctx_alignment());
697 len += req_size;
698 len = ALIGN(len, __alignof__(struct scatterlist));
699 len += nsg * sizeof(**sg);
700
701 mem = kmalloc(len, GFP_ATOMIC);
702 if (!mem)
703 return NULL;
704
705 *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size,
706 crypto_aead_alignmask(tfm) + 1);
707 *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
708 crypto_tfm_ctx_alignment());
709 *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
710 __alignof__(struct scatterlist));
711
712 return (void *)mem;
713}
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
729 struct tipc_bearer *b,
730 struct tipc_media_addr *dst,
731 struct tipc_node *__dnode)
732{
733 struct crypto_aead *tfm = tipc_aead_tfm_next(aead);
734 struct tipc_crypto_tx_ctx *tx_ctx;
735 struct aead_request *req;
736 struct sk_buff *trailer;
737 struct scatterlist *sg;
738 struct tipc_ehdr *ehdr;
739 int ehsz, len, tailen, nsg, rc;
740 void *ctx;
741 u32 salt;
742 u8 *iv;
743
744
745 len = ALIGN(skb->len, 4);
746 tailen = len - skb->len + aead->authsize;
747
748
749
750
751
752
753
754 SKB_LINEAR_ASSERT(skb);
755 if (tailen > skb_tailroom(skb)) {
756 pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n",
757 skb_tailroom(skb), tailen);
758 }
759
760 if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) {
761 nsg = 1;
762 trailer = skb;
763 } else {
764
765
766
767
768
769
770 nsg = skb_cow_data(skb, tailen, &trailer);
771 if (unlikely(nsg < 0)) {
772 pr_err("TX: skb_cow_data() returned %d\n", nsg);
773 return nsg;
774 }
775 }
776
777 pskb_put(skb, trailer, tailen);
778
779
780 ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg);
781 if (unlikely(!ctx))
782 return -ENOMEM;
783 TIPC_SKB_CB(skb)->crypto_ctx = ctx;
784
785
786 sg_init_table(sg, nsg);
787 rc = skb_to_sgvec(skb, sg, 0, skb->len);
788 if (unlikely(rc < 0)) {
789 pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg);
790 goto exit;
791 }
792
793
794
795
796
797
798 ehdr = (struct tipc_ehdr *)skb->data;
799 salt = aead->salt;
800 if (aead->mode == CLUSTER_KEY)
801 salt ^= __be32_to_cpu(ehdr->addr);
802 else if (__dnode)
803 salt ^= tipc_node_get_addr(__dnode);
804 memcpy(iv, &salt, 4);
805 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
806
807
808 ehsz = tipc_ehdr_size(ehdr);
809 aead_request_set_tfm(req, tfm);
810 aead_request_set_ad(req, ehsz);
811 aead_request_set_crypt(req, sg, sg, len - ehsz, iv);
812
813
814 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
815 tipc_aead_encrypt_done, skb);
816 tx_ctx = (struct tipc_crypto_tx_ctx *)ctx;
817 tx_ctx->aead = aead;
818 tx_ctx->bearer = b;
819 memcpy(&tx_ctx->dst, dst, sizeof(*dst));
820
821
822 if (unlikely(!tipc_bearer_hold(b))) {
823 rc = -ENODEV;
824 goto exit;
825 }
826
827
828 rc = crypto_aead_encrypt(req);
829 if (rc == -EINPROGRESS || rc == -EBUSY)
830 return rc;
831
832 tipc_bearer_put(b);
833
834exit:
835 kfree(ctx);
836 TIPC_SKB_CB(skb)->crypto_ctx = NULL;
837 return rc;
838}
839
840static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err)
841{
842 struct sk_buff *skb = base->data;
843 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
844 struct tipc_bearer *b = tx_ctx->bearer;
845 struct tipc_aead *aead = tx_ctx->aead;
846 struct tipc_crypto *tx = aead->crypto;
847 struct net *net = tx->net;
848
849 switch (err) {
850 case 0:
851 this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
852 rcu_read_lock();
853 if (likely(test_bit(0, &b->up)))
854 b->media->send_msg(net, skb, b, &tx_ctx->dst);
855 else
856 kfree_skb(skb);
857 rcu_read_unlock();
858 break;
859 case -EINPROGRESS:
860 return;
861 default:
862 this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]);
863 kfree_skb(skb);
864 break;
865 }
866
867 kfree(tx_ctx);
868 tipc_bearer_put(b);
869 tipc_aead_put(aead);
870}
871
872
873
874
875
876
877
878
879
880
881
882
883
884static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
885 struct sk_buff *skb, struct tipc_bearer *b)
886{
887 struct tipc_crypto_rx_ctx *rx_ctx;
888 struct aead_request *req;
889 struct crypto_aead *tfm;
890 struct sk_buff *unused;
891 struct scatterlist *sg;
892 struct tipc_ehdr *ehdr;
893 int ehsz, nsg, rc;
894 void *ctx;
895 u32 salt;
896 u8 *iv;
897
898 if (unlikely(!aead))
899 return -ENOKEY;
900
901 nsg = skb_cow_data(skb, 0, &unused);
902 if (unlikely(nsg < 0)) {
903 pr_err("RX: skb_cow_data() returned %d\n", nsg);
904 return nsg;
905 }
906
907
908 tfm = tipc_aead_tfm_next(aead);
909 ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg);
910 if (unlikely(!ctx))
911 return -ENOMEM;
912 TIPC_SKB_CB(skb)->crypto_ctx = ctx;
913
914
915 sg_init_table(sg, nsg);
916 rc = skb_to_sgvec(skb, sg, 0, skb->len);
917 if (unlikely(rc < 0)) {
918 pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg);
919 goto exit;
920 }
921
922
923 ehdr = (struct tipc_ehdr *)skb->data;
924 salt = aead->salt;
925 if (aead->mode == CLUSTER_KEY)
926 salt ^= __be32_to_cpu(ehdr->addr);
927 else if (ehdr->destined)
928 salt ^= tipc_own_addr(net);
929 memcpy(iv, &salt, 4);
930 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
931
932
933 ehsz = tipc_ehdr_size(ehdr);
934 aead_request_set_tfm(req, tfm);
935 aead_request_set_ad(req, ehsz);
936 aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv);
937
938
939 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
940 tipc_aead_decrypt_done, skb);
941 rx_ctx = (struct tipc_crypto_rx_ctx *)ctx;
942 rx_ctx->aead = aead;
943 rx_ctx->bearer = b;
944
945
946 if (unlikely(!tipc_bearer_hold(b))) {
947 rc = -ENODEV;
948 goto exit;
949 }
950
951
952 rc = crypto_aead_decrypt(req);
953 if (rc == -EINPROGRESS || rc == -EBUSY)
954 return rc;
955
956 tipc_bearer_put(b);
957
958exit:
959 kfree(ctx);
960 TIPC_SKB_CB(skb)->crypto_ctx = NULL;
961 return rc;
962}
963
964static void tipc_aead_decrypt_done(struct crypto_async_request *base, int err)
965{
966 struct sk_buff *skb = base->data;
967 struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
968 struct tipc_bearer *b = rx_ctx->bearer;
969 struct tipc_aead *aead = rx_ctx->aead;
970 struct tipc_crypto_stats __percpu *stats = aead->crypto->stats;
971 struct net *net = aead->crypto->net;
972
973 switch (err) {
974 case 0:
975 this_cpu_inc(stats->stat[STAT_ASYNC_OK]);
976 break;
977 case -EINPROGRESS:
978 return;
979 default:
980 this_cpu_inc(stats->stat[STAT_ASYNC_NOK]);
981 break;
982 }
983
984 kfree(rx_ctx);
985 tipc_crypto_rcv_complete(net, aead, b, &skb, err);
986 if (likely(skb)) {
987 if (likely(test_bit(0, &b->up)))
988 tipc_rcv(net, skb, b);
989 else
990 kfree_skb(skb);
991 }
992
993 tipc_bearer_put(b);
994}
995
996static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr)
997{
998 return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
999}
1000
1001
1002
1003
1004
1005
1006
1007bool tipc_ehdr_validate(struct sk_buff *skb)
1008{
1009 struct tipc_ehdr *ehdr;
1010 int ehsz;
1011
1012 if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE)))
1013 return false;
1014
1015 ehdr = (struct tipc_ehdr *)skb->data;
1016 if (unlikely(ehdr->version != TIPC_EVERSION))
1017 return false;
1018 ehsz = tipc_ehdr_size(ehdr);
1019 if (unlikely(!pskb_may_pull(skb, ehsz)))
1020 return false;
1021 if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE))
1022 return false;
1023
1024 return true;
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
1038 u8 tx_key, struct sk_buff *skb,
1039 struct tipc_crypto *__rx)
1040{
1041 struct tipc_msg *hdr = buf_msg(skb);
1042 struct tipc_ehdr *ehdr;
1043 u32 user = msg_user(hdr);
1044 u64 seqno;
1045 int ehsz;
1046
1047
1048 ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
1049 WARN_ON(skb_headroom(skb) < ehsz);
1050 ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz);
1051
1052
1053
1054
1055
1056 if (!__rx || aead->mode == CLUSTER_KEY)
1057 seqno = atomic64_inc_return(&aead->seqno);
1058 else
1059 seqno = atomic64_inc_return(&__rx->sndnxt);
1060
1061
1062 if (unlikely(!seqno))
1063 return tipc_crypto_key_revoke(net, tx_key);
1064
1065
1066 ehdr->seqno = cpu_to_be64(seqno);
1067
1068
1069 ehdr->version = TIPC_EVERSION;
1070 ehdr->user = 0;
1071 ehdr->keepalive = 0;
1072 ehdr->tx_key = tx_key;
1073 ehdr->destined = (__rx) ? 1 : 0;
1074 ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
1075 ehdr->rx_nokey = (__rx) ? __rx->nokey : 0;
1076 ehdr->master_key = aead->crypto->key_master;
1077 ehdr->reserved_1 = 0;
1078 ehdr->reserved_2 = 0;
1079
1080 switch (user) {
1081 case LINK_CONFIG:
1082 ehdr->user = LINK_CONFIG;
1083 memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN);
1084 break;
1085 default:
1086 if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) {
1087 ehdr->user = LINK_PROTOCOL;
1088 ehdr->keepalive = msg_is_keepalive(hdr);
1089 }
1090 ehdr->addr = hdr->hdr[3];
1091 break;
1092 }
1093
1094 return ehsz;
1095}
1096
1097static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
1098 u8 new_passive,
1099 u8 new_active,
1100 u8 new_pending)
1101{
1102 struct tipc_key old = c->key;
1103 char buf[32];
1104
1105 c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
1106 ((new_active & KEY_MASK) << (KEY_BITS)) |
1107 ((new_pending & KEY_MASK));
1108
1109 pr_debug("%s: key changing %s ::%pS\n", c->name,
1110 tipc_key_change_dump(old, c->key, buf),
1111 __builtin_return_address(0));
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
1127 u8 mode, bool master_key)
1128{
1129 struct tipc_aead *aead = NULL;
1130 int rc = 0;
1131
1132
1133 rc = tipc_aead_init(&aead, ukey, mode);
1134
1135
1136 if (likely(!rc)) {
1137 rc = tipc_crypto_key_attach(c, aead, 0, master_key);
1138 if (rc < 0)
1139 tipc_aead_free(&aead->rcu);
1140 }
1141
1142 return rc;
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154static int tipc_crypto_key_attach(struct tipc_crypto *c,
1155 struct tipc_aead *aead, u8 pos,
1156 bool master_key)
1157{
1158 struct tipc_key key;
1159 int rc = -EBUSY;
1160 u8 new_key;
1161
1162 spin_lock_bh(&c->lock);
1163 key = c->key;
1164 if (master_key) {
1165 new_key = KEY_MASTER;
1166 goto attach;
1167 }
1168 if (key.active && key.passive)
1169 goto exit;
1170 if (key.pending) {
1171 if (tipc_aead_users(c->aead[key.pending]) > 0)
1172 goto exit;
1173
1174
1175 new_key = key.pending;
1176 } else {
1177 if (pos) {
1178 if (key.active && pos != key_next(key.active)) {
1179 key.passive = pos;
1180 new_key = pos;
1181 goto attach;
1182 } else if (!key.active && !key.passive) {
1183 key.pending = pos;
1184 new_key = pos;
1185 goto attach;
1186 }
1187 }
1188 key.pending = key_next(key.active ?: key.passive);
1189 new_key = key.pending;
1190 }
1191
1192attach:
1193 aead->crypto = c;
1194 aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen;
1195 tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock);
1196 if (likely(c->key.keys != key.keys))
1197 tipc_crypto_key_set_state(c, key.passive, key.active,
1198 key.pending);
1199 c->working = 1;
1200 c->nokey = 0;
1201 c->key_master |= master_key;
1202 rc = new_key;
1203
1204exit:
1205 spin_unlock_bh(&c->lock);
1206 return rc;
1207}
1208
1209void tipc_crypto_key_flush(struct tipc_crypto *c)
1210{
1211 struct tipc_crypto *tx, *rx;
1212 int k;
1213
1214 spin_lock_bh(&c->lock);
1215 if (is_rx(c)) {
1216
1217 rx = c;
1218 tx = tipc_net(rx->net)->crypto_tx;
1219 if (cancel_delayed_work(&rx->work)) {
1220 kfree(rx->skey);
1221 rx->skey = NULL;
1222 atomic_xchg(&rx->key_distr, 0);
1223 tipc_node_put(rx->node);
1224 }
1225
1226 k = atomic_xchg(&rx->peer_rx_active, 0);
1227 if (k) {
1228 tipc_aead_users_dec(tx->aead[k], 0);
1229
1230 tx->timer1 = jiffies;
1231 }
1232 }
1233
1234 c->flags = 0;
1235 tipc_crypto_key_set_state(c, 0, 0, 0);
1236 for (k = KEY_MIN; k <= KEY_MAX; k++)
1237 tipc_crypto_key_detach(c->aead[k], &c->lock);
1238 atomic64_set(&c->sndnxt, 0);
1239 spin_unlock_bh(&c->lock);
1240}
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
1256{
1257 struct tipc_aead *tmp1, *tmp2 = NULL;
1258 struct tipc_key key;
1259 bool aligned = false;
1260 u8 new_passive = 0;
1261 int x;
1262
1263 spin_lock(&rx->lock);
1264 key = rx->key;
1265 if (key.pending == new_pending) {
1266 aligned = true;
1267 goto exit;
1268 }
1269 if (key.active)
1270 goto exit;
1271 if (!key.pending)
1272 goto exit;
1273 if (tipc_aead_users(rx->aead[key.pending]) > 0)
1274 goto exit;
1275
1276
1277 tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock);
1278 if (!refcount_dec_if_one(&tmp1->refcnt))
1279 goto exit;
1280 rcu_assign_pointer(rx->aead[key.pending], NULL);
1281
1282
1283 if (key.passive) {
1284 tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock));
1285 x = (key.passive - key.pending + new_pending) % KEY_MAX;
1286 new_passive = (x <= 0) ? x + KEY_MAX : x;
1287 }
1288
1289
1290 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
1291 rcu_assign_pointer(rx->aead[new_pending], tmp1);
1292 if (new_passive)
1293 rcu_assign_pointer(rx->aead[new_passive], tmp2);
1294 refcount_set(&tmp1->refcnt, 1);
1295 aligned = true;
1296 pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending,
1297 new_pending);
1298
1299exit:
1300 spin_unlock(&rx->lock);
1301 return aligned;
1302}
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
1318 struct tipc_crypto *rx,
1319 struct sk_buff *skb,
1320 u8 tx_key)
1321{
1322 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb);
1323 struct tipc_aead *aead = NULL;
1324 struct tipc_key key = tx->key;
1325 u8 k, i = 0;
1326
1327
1328 if (!skb_cb->tx_clone_deferred) {
1329 skb_cb->tx_clone_deferred = 1;
1330 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
1331 }
1332
1333 skb_cb->tx_clone_ctx.rx = rx;
1334 if (++skb_cb->tx_clone_ctx.recurs > 2)
1335 return NULL;
1336
1337
1338 spin_lock(&tx->lock);
1339 if (tx_key == KEY_MASTER) {
1340 aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock);
1341 goto done;
1342 }
1343 do {
1344 k = (i == 0) ? key.pending :
1345 ((i == 1) ? key.active : key.passive);
1346 if (!k)
1347 continue;
1348 aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock);
1349 if (!aead)
1350 continue;
1351 if (aead->mode != CLUSTER_KEY ||
1352 aead == skb_cb->tx_clone_ctx.last) {
1353 aead = NULL;
1354 continue;
1355 }
1356
1357 skb_cb->tx_clone_ctx.last = aead;
1358 WARN_ON(skb->next);
1359 skb->next = skb_clone(skb, GFP_ATOMIC);
1360 if (unlikely(!skb->next))
1361 pr_warn("Failed to clone skb for next round if any\n");
1362 break;
1363 } while (++i < 3);
1364
1365done:
1366 if (likely(aead))
1367 WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
1368 spin_unlock(&tx->lock);
1369
1370 return aead;
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb)
1389{
1390 struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb);
1391 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
1392 struct tipc_msg *hdr = buf_msg(skb);
1393 u32 self = tipc_own_addr(rx->net);
1394 u8 cur, new;
1395 unsigned long delay;
1396
1397
1398
1399
1400 rx->key_master = ehdr->master_key;
1401 if (!rx->key_master)
1402 tx->legacy_user = 1;
1403
1404
1405 if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self)
1406 return;
1407
1408
1409 if (ehdr->rx_nokey) {
1410
1411 tx->timer2 = jiffies;
1412
1413 if (tx->key.keys &&
1414 !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) {
1415 get_random_bytes(&delay, 2);
1416 delay %= 5;
1417 delay = msecs_to_jiffies(500 * ++delay);
1418 if (queue_delayed_work(tx->wq, &rx->work, delay))
1419 tipc_node_get(rx->node);
1420 }
1421 } else {
1422
1423 atomic_xchg(&rx->key_distr, 0);
1424 }
1425
1426
1427 cur = atomic_read(&rx->peer_rx_active);
1428 new = ehdr->rx_key_active;
1429 if (tx->key.keys &&
1430 cur != new &&
1431 atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) {
1432 if (new)
1433 tipc_aead_users_inc(tx->aead[new], INT_MAX);
1434 if (cur)
1435 tipc_aead_users_dec(tx->aead[cur], 0);
1436
1437 atomic64_set(&rx->sndnxt, 0);
1438
1439 tx->timer1 = jiffies;
1440
1441 pr_debug("%s: key users changed %d-- %d++, peer %s\n",
1442 tx->name, cur, new, rx->name);
1443 }
1444}
1445
1446static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
1447{
1448 struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
1449 struct tipc_key key;
1450
1451 spin_lock(&tx->lock);
1452 key = tx->key;
1453 WARN_ON(!key.active || tx_key != key.active);
1454
1455
1456 tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
1457 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
1458 spin_unlock(&tx->lock);
1459
1460 pr_warn("%s: key is revoked\n", tx->name);
1461 return -EKEYREVOKED;
1462}
1463
1464int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
1465 struct tipc_node *node)
1466{
1467 struct tipc_crypto *c;
1468
1469 if (*crypto)
1470 return -EEXIST;
1471
1472
1473 c = kzalloc(sizeof(*c), GFP_ATOMIC);
1474 if (!c)
1475 return -ENOMEM;
1476
1477
1478 if (!node) {
1479 c->wq = alloc_ordered_workqueue("tipc_crypto", 0);
1480 if (!c->wq) {
1481 kfree(c);
1482 return -ENOMEM;
1483 }
1484 }
1485
1486
1487 c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
1488 if (!c->stats) {
1489 if (c->wq)
1490 destroy_workqueue(c->wq);
1491 kfree_sensitive(c);
1492 return -ENOMEM;
1493 }
1494
1495 c->flags = 0;
1496 c->net = net;
1497 c->node = node;
1498 get_random_bytes(&c->key_gen, 2);
1499 tipc_crypto_key_set_state(c, 0, 0, 0);
1500 atomic_set(&c->key_distr, 0);
1501 atomic_set(&c->peer_rx_active, 0);
1502 atomic64_set(&c->sndnxt, 0);
1503 c->timer1 = jiffies;
1504 c->timer2 = jiffies;
1505 c->rekeying_intv = TIPC_REKEYING_INTV_DEF;
1506 spin_lock_init(&c->lock);
1507 scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX",
1508 (is_rx(c)) ? tipc_node_get_id_str(c->node) :
1509 tipc_own_id_string(c->net));
1510
1511 if (is_rx(c))
1512 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx);
1513 else
1514 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx);
1515
1516 *crypto = c;
1517 return 0;
1518}
1519
1520void tipc_crypto_stop(struct tipc_crypto **crypto)
1521{
1522 struct tipc_crypto *c = *crypto;
1523 u8 k;
1524
1525 if (!c)
1526 return;
1527
1528
1529 if (is_tx(c)) {
1530 c->rekeying_intv = 0;
1531 cancel_delayed_work_sync(&c->work);
1532 destroy_workqueue(c->wq);
1533 }
1534
1535
1536 rcu_read_lock();
1537 for (k = KEY_MIN; k <= KEY_MAX; k++)
1538 tipc_aead_put(rcu_dereference(c->aead[k]));
1539 rcu_read_unlock();
1540 pr_debug("%s: has been stopped\n", c->name);
1541
1542
1543 free_percpu(c->stats);
1544
1545 *crypto = NULL;
1546 kfree_sensitive(c);
1547}
1548
1549void tipc_crypto_timeout(struct tipc_crypto *rx)
1550{
1551 struct tipc_net *tn = tipc_net(rx->net);
1552 struct tipc_crypto *tx = tn->crypto_tx;
1553 struct tipc_key key;
1554 int cmd;
1555
1556
1557 spin_lock(&tx->lock);
1558 key = tx->key;
1559 if (key.active && tipc_aead_users(tx->aead[key.active]) > 0)
1560 goto s1;
1561 if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0)
1562 goto s1;
1563 if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME))
1564 goto s1;
1565
1566 tipc_crypto_key_set_state(tx, key.passive, key.pending, 0);
1567 if (key.active)
1568 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
1569 this_cpu_inc(tx->stats->stat[STAT_SWITCHES]);
1570 pr_info("%s: key[%d] is activated\n", tx->name, key.pending);
1571
1572s1:
1573 spin_unlock(&tx->lock);
1574
1575
1576 spin_lock(&rx->lock);
1577 key = rx->key;
1578 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0)
1579 goto s2;
1580
1581 if (key.active)
1582 key.passive = key.active;
1583 key.active = key.pending;
1584 rx->timer2 = jiffies;
1585 tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
1586 this_cpu_inc(rx->stats->stat[STAT_SWITCHES]);
1587 pr_info("%s: key[%d] is activated\n", rx->name, key.pending);
1588 goto s5;
1589
1590s2:
1591
1592 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10)
1593 goto s3;
1594
1595 tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
1596 tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock);
1597 pr_debug("%s: key[%d] is removed\n", rx->name, key.pending);
1598 goto s5;
1599
1600s3:
1601
1602 if (!key.active)
1603 goto s4;
1604 if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) &&
1605 tipc_aead_users(rx->aead[key.active]) > 0)
1606 goto s4;
1607
1608 if (key.pending)
1609 key.passive = key.active;
1610 else
1611 key.pending = key.active;
1612 rx->timer2 = jiffies;
1613 tipc_crypto_key_set_state(rx, key.passive, 0, key.pending);
1614 tipc_aead_users_set(rx->aead[key.pending], 0);
1615 pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active);
1616 goto s5;
1617
1618s4:
1619
1620 if (!key.passive)
1621 goto s5;
1622 if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) &&
1623 tipc_aead_users(rx->aead[key.passive]) > -10)
1624 goto s5;
1625
1626 tipc_crypto_key_set_state(rx, 0, key.active, key.pending);
1627 tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock);
1628 pr_debug("%s: key[%d] is freed\n", rx->name, key.passive);
1629
1630s5:
1631 spin_unlock(&rx->lock);
1632
1633
1634
1635
1636 if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD))
1637 tx->legacy_user = 0;
1638
1639
1640 if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM))
1641 return;
1642
1643 cmd = sysctl_tipc_max_tfms;
1644 sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF;
1645 tipc_crypto_do_cmd(rx->net, cmd);
1646}
1647
1648static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
1649 struct tipc_bearer *b,
1650 struct tipc_media_addr *dst,
1651 struct tipc_node *__dnode, u8 type)
1652{
1653 struct sk_buff *skb;
1654
1655 skb = skb_clone(_skb, GFP_ATOMIC);
1656 if (skb) {
1657 TIPC_SKB_CB(skb)->xmit_type = type;
1658 tipc_crypto_xmit(net, &skb, b, dst, __dnode);
1659 if (skb)
1660 b->media->send_msg(net, skb, b, dst);
1661 }
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
1688 struct tipc_bearer *b, struct tipc_media_addr *dst,
1689 struct tipc_node *__dnode)
1690{
1691 struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode);
1692 struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
1693 struct tipc_crypto_stats __percpu *stats = tx->stats;
1694 struct tipc_msg *hdr = buf_msg(*skb);
1695 struct tipc_key key = tx->key;
1696 struct tipc_aead *aead = NULL;
1697 u32 user = msg_user(hdr);
1698 u32 type = msg_type(hdr);
1699 int rc = -ENOKEY;
1700 u8 tx_key = 0;
1701
1702
1703 if (!tx->working)
1704 return 0;
1705
1706
1707 if (unlikely(key.pending)) {
1708 tx_key = key.pending;
1709 if (!tx->key_master && !key.active)
1710 goto encrypt;
1711 if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key)
1712 goto encrypt;
1713 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) {
1714 pr_debug("%s: probing for key[%d]\n", tx->name,
1715 key.pending);
1716 goto encrypt;
1717 }
1718 if (user == LINK_CONFIG || user == LINK_PROTOCOL)
1719 tipc_crypto_clone_msg(net, *skb, b, dst, __dnode,
1720 SKB_PROBING);
1721 }
1722
1723
1724 if (tx->key_master) {
1725 tx_key = KEY_MASTER;
1726 if (!key.active)
1727 goto encrypt;
1728 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) {
1729 pr_debug("%s: gracing for msg (%d %d)\n", tx->name,
1730 user, type);
1731 goto encrypt;
1732 }
1733 if (user == LINK_CONFIG ||
1734 (user == LINK_PROTOCOL && type == RESET_MSG) ||
1735 (user == MSG_CRYPTO && type == KEY_DISTR_MSG) ||
1736 time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) {
1737 if (__rx && __rx->key_master &&
1738 !atomic_read(&__rx->peer_rx_active))
1739 goto encrypt;
1740 if (!__rx) {
1741 if (likely(!tx->legacy_user))
1742 goto encrypt;
1743 tipc_crypto_clone_msg(net, *skb, b, dst,
1744 __dnode, SKB_GRACING);
1745 }
1746 }
1747 }
1748
1749
1750 if (likely(key.active)) {
1751 tx_key = key.active;
1752 goto encrypt;
1753 }
1754
1755 goto exit;
1756
1757encrypt:
1758 aead = tipc_aead_get(tx->aead[tx_key]);
1759 if (unlikely(!aead))
1760 goto exit;
1761 rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx);
1762 if (likely(rc > 0))
1763 rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode);
1764
1765exit:
1766 switch (rc) {
1767 case 0:
1768 this_cpu_inc(stats->stat[STAT_OK]);
1769 break;
1770 case -EINPROGRESS:
1771 case -EBUSY:
1772 this_cpu_inc(stats->stat[STAT_ASYNC]);
1773 *skb = NULL;
1774 return rc;
1775 default:
1776 this_cpu_inc(stats->stat[STAT_NOK]);
1777 if (rc == -ENOKEY)
1778 this_cpu_inc(stats->stat[STAT_NOKEYS]);
1779 else if (rc == -EKEYREVOKED)
1780 this_cpu_inc(stats->stat[STAT_BADKEYS]);
1781 kfree_skb(*skb);
1782 *skb = NULL;
1783 break;
1784 }
1785
1786 tipc_aead_put(aead);
1787 return rc;
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
1813 struct sk_buff **skb, struct tipc_bearer *b)
1814{
1815 struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
1816 struct tipc_crypto_stats __percpu *stats;
1817 struct tipc_aead *aead = NULL;
1818 struct tipc_key key;
1819 int rc = -ENOKEY;
1820 u8 tx_key, n;
1821
1822 tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
1823
1824
1825
1826
1827 if (unlikely(!rx || tx_key == KEY_MASTER))
1828 goto pick_tx;
1829
1830
1831 key = rx->key;
1832 if (tx_key == key.active || tx_key == key.pending ||
1833 tx_key == key.passive)
1834 goto decrypt;
1835
1836
1837 if (tipc_crypto_key_try_align(rx, tx_key))
1838 goto decrypt;
1839
1840pick_tx:
1841
1842 aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key);
1843 if (aead)
1844 goto decrypt;
1845 goto exit;
1846
1847decrypt:
1848 rcu_read_lock();
1849 if (!aead)
1850 aead = tipc_aead_get(rx->aead[tx_key]);
1851 rc = tipc_aead_decrypt(net, aead, *skb, b);
1852 rcu_read_unlock();
1853
1854exit:
1855 stats = ((rx) ?: tx)->stats;
1856 switch (rc) {
1857 case 0:
1858 this_cpu_inc(stats->stat[STAT_OK]);
1859 break;
1860 case -EINPROGRESS:
1861 case -EBUSY:
1862 this_cpu_inc(stats->stat[STAT_ASYNC]);
1863 *skb = NULL;
1864 return rc;
1865 default:
1866 this_cpu_inc(stats->stat[STAT_NOK]);
1867 if (rc == -ENOKEY) {
1868 kfree_skb(*skb);
1869 *skb = NULL;
1870 if (rx) {
1871
1872
1873
1874
1875 n = key_next(tx_key);
1876 rx->nokey = !(rx->skey ||
1877 rcu_access_pointer(rx->aead[n]));
1878 pr_debug_ratelimited("%s: nokey %d, key %d/%x\n",
1879 rx->name, rx->nokey,
1880 tx_key, rx->key.keys);
1881 tipc_node_put(rx->node);
1882 }
1883 this_cpu_inc(stats->stat[STAT_NOKEYS]);
1884 return rc;
1885 } else if (rc == -EBADMSG) {
1886 this_cpu_inc(stats->stat[STAT_BADMSGS]);
1887 }
1888 break;
1889 }
1890
1891 tipc_crypto_rcv_complete(net, aead, b, skb, rc);
1892 return rc;
1893}
1894
1895static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
1896 struct tipc_bearer *b,
1897 struct sk_buff **skb, int err)
1898{
1899 struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb);
1900 struct tipc_crypto *rx = aead->crypto;
1901 struct tipc_aead *tmp = NULL;
1902 struct tipc_ehdr *ehdr;
1903 struct tipc_node *n;
1904
1905
1906 if (unlikely(is_tx(aead->crypto))) {
1907 rx = skb_cb->tx_clone_ctx.rx;
1908 pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
1909 (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
1910 (*skb)->next, skb_cb->flags);
1911 pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
1912 skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
1913 aead->crypto->aead[1], aead->crypto->aead[2],
1914 aead->crypto->aead[3]);
1915 if (unlikely(err)) {
1916 if (err == -EBADMSG && (*skb)->next)
1917 tipc_rcv(net, (*skb)->next, b);
1918 goto free_skb;
1919 }
1920
1921 if (likely((*skb)->next)) {
1922 kfree_skb((*skb)->next);
1923 (*skb)->next = NULL;
1924 }
1925 ehdr = (struct tipc_ehdr *)(*skb)->data;
1926 if (!rx) {
1927 WARN_ON(ehdr->user != LINK_CONFIG);
1928 n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0,
1929 true);
1930 rx = tipc_node_crypto_rx(n);
1931 if (unlikely(!rx))
1932 goto free_skb;
1933 }
1934
1935
1936 if (ehdr->tx_key == KEY_MASTER)
1937 goto rcv;
1938 if (tipc_aead_clone(&tmp, aead) < 0)
1939 goto rcv;
1940 WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
1941 if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
1942 tipc_aead_free(&tmp->rcu);
1943 goto rcv;
1944 }
1945 tipc_aead_put(aead);
1946 aead = tmp;
1947 }
1948
1949 if (unlikely(err)) {
1950 tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN);
1951 goto free_skb;
1952 }
1953
1954
1955 tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1);
1956
1957
1958 rx->timer1 = jiffies;
1959
1960rcv:
1961
1962 ehdr = (struct tipc_ehdr *)(*skb)->data;
1963
1964
1965 if (rx->key.passive && ehdr->tx_key == rx->key.passive)
1966 rx->timer2 = jiffies;
1967
1968 skb_reset_network_header(*skb);
1969 skb_pull(*skb, tipc_ehdr_size(ehdr));
1970 pskb_trim(*skb, (*skb)->len - aead->authsize);
1971
1972
1973 if (unlikely(!tipc_msg_validate(skb))) {
1974 pr_err_ratelimited("Packet dropped after decryption!\n");
1975 goto free_skb;
1976 }
1977
1978
1979 tipc_crypto_key_synch(rx, *skb);
1980
1981
1982 skb_cb->decrypted = 1;
1983
1984
1985 if (likely(!skb_cb->tx_clone_deferred))
1986 goto exit;
1987 skb_cb->tx_clone_deferred = 0;
1988 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
1989 goto exit;
1990
1991free_skb:
1992 kfree_skb(*skb);
1993 *skb = NULL;
1994
1995exit:
1996 tipc_aead_put(aead);
1997 if (rx)
1998 tipc_node_put(rx->node);
1999}
2000
2001static void tipc_crypto_do_cmd(struct net *net, int cmd)
2002{
2003 struct tipc_net *tn = tipc_net(net);
2004 struct tipc_crypto *tx = tn->crypto_tx, *rx;
2005 struct list_head *p;
2006 unsigned int stat;
2007 int i, j, cpu;
2008 char buf[200];
2009
2010
2011 switch (cmd) {
2012 case 0xfff1:
2013 goto print_stats;
2014 default:
2015 return;
2016 }
2017
2018print_stats:
2019
2020 pr_info("\n=============== TIPC Crypto Statistics ===============\n\n");
2021
2022
2023 pr_info("Key status:\n");
2024 pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net),
2025 tipc_crypto_key_dump(tx, buf));
2026
2027 rcu_read_lock();
2028 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
2029 rx = tipc_node_crypto_rx_by_list(p);
2030 pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node),
2031 tipc_crypto_key_dump(rx, buf));
2032 }
2033 rcu_read_unlock();
2034
2035
2036 for (i = 0, j = 0; i < MAX_STATS; i++)
2037 j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]);
2038 pr_info("Counter %s", buf);
2039
2040 memset(buf, '-', 115);
2041 buf[115] = '\0';
2042 pr_info("%s\n", buf);
2043
2044 j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net));
2045 for_each_possible_cpu(cpu) {
2046 for (i = 0; i < MAX_STATS; i++) {
2047 stat = per_cpu_ptr(tx->stats, cpu)->stat[i];
2048 j += scnprintf(buf + j, 200 - j, "|%11d ", stat);
2049 }
2050 pr_info("%s", buf);
2051 j = scnprintf(buf, 200, "%12s", " ");
2052 }
2053
2054 rcu_read_lock();
2055 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
2056 rx = tipc_node_crypto_rx_by_list(p);
2057 j = scnprintf(buf, 200, "RX(%7.7s) ",
2058 tipc_node_get_id_str(rx->node));
2059 for_each_possible_cpu(cpu) {
2060 for (i = 0; i < MAX_STATS; i++) {
2061 stat = per_cpu_ptr(rx->stats, cpu)->stat[i];
2062 j += scnprintf(buf + j, 200 - j, "|%11d ",
2063 stat);
2064 }
2065 pr_info("%s", buf);
2066 j = scnprintf(buf, 200, "%12s", " ");
2067 }
2068 }
2069 rcu_read_unlock();
2070
2071 pr_info("\n======================== Done ========================\n");
2072}
2073
2074static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
2075{
2076 struct tipc_key key = c->key;
2077 struct tipc_aead *aead;
2078 int k, i = 0;
2079 char *s;
2080
2081 for (k = KEY_MIN; k <= KEY_MAX; k++) {
2082 if (k == KEY_MASTER) {
2083 if (is_rx(c))
2084 continue;
2085 if (time_before(jiffies,
2086 c->timer2 + TIPC_TX_GRACE_PERIOD))
2087 s = "ACT";
2088 else
2089 s = "PAS";
2090 } else {
2091 if (k == key.passive)
2092 s = "PAS";
2093 else if (k == key.active)
2094 s = "ACT";
2095 else if (k == key.pending)
2096 s = "PEN";
2097 else
2098 s = "-";
2099 }
2100 i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s);
2101
2102 rcu_read_lock();
2103 aead = rcu_dereference(c->aead[k]);
2104 if (aead)
2105 i += scnprintf(buf + i, 200 - i,
2106 "{\"0x...%s\", \"%s\"}/%d:%d",
2107 aead->hint,
2108 (aead->mode == CLUSTER_KEY) ? "c" : "p",
2109 atomic_read(&aead->users),
2110 refcount_read(&aead->refcnt));
2111 rcu_read_unlock();
2112 i += scnprintf(buf + i, 200 - i, "\n");
2113 }
2114
2115 if (is_rx(c))
2116 i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n",
2117 atomic_read(&c->peer_rx_active));
2118
2119 return buf;
2120}
2121
2122static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
2123 char *buf)
2124{
2125 struct tipc_key *key = &old;
2126 int k, i = 0;
2127 char *s;
2128
2129
2130again:
2131 i += scnprintf(buf + i, 32 - i, "[");
2132 for (k = KEY_1; k <= KEY_3; k++) {
2133 if (k == key->passive)
2134 s = "pas";
2135 else if (k == key->active)
2136 s = "act";
2137 else if (k == key->pending)
2138 s = "pen";
2139 else
2140 s = "-";
2141 i += scnprintf(buf + i, 32 - i,
2142 (k != KEY_3) ? "%s " : "%s", s);
2143 }
2144 if (key != &new) {
2145 i += scnprintf(buf + i, 32 - i, "] -> ");
2146 key = &new;
2147 goto again;
2148 }
2149 i += scnprintf(buf + i, 32 - i, "]");
2150 return buf;
2151}
2152
2153
2154
2155
2156
2157
2158void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb)
2159{
2160 struct tipc_crypto *rx;
2161 struct tipc_msg *hdr;
2162
2163 if (unlikely(skb_linearize(skb)))
2164 goto exit;
2165
2166 hdr = buf_msg(skb);
2167 rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr));
2168 if (unlikely(!rx))
2169 goto exit;
2170
2171 switch (msg_type(hdr)) {
2172 case KEY_DISTR_MSG:
2173 if (tipc_crypto_key_rcv(rx, hdr))
2174 goto exit;
2175 break;
2176 default:
2177 break;
2178 }
2179
2180 tipc_node_put(rx->node);
2181
2182exit:
2183 kfree_skb(skb);
2184}
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
2195 struct tipc_node *dest)
2196{
2197 struct tipc_aead *aead;
2198 u32 dnode = tipc_node_get_addr(dest);
2199 int rc = -ENOKEY;
2200
2201 if (!sysctl_tipc_key_exchange_enabled)
2202 return 0;
2203
2204 if (key) {
2205 rcu_read_lock();
2206 aead = tipc_aead_get(tx->aead[key]);
2207 if (likely(aead)) {
2208 rc = tipc_crypto_key_xmit(tx->net, aead->key,
2209 aead->gen, aead->mode,
2210 dnode);
2211 tipc_aead_put(aead);
2212 }
2213 rcu_read_unlock();
2214 }
2215
2216 return rc;
2217}
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
2233 u16 gen, u8 mode, u32 dnode)
2234{
2235 struct sk_buff_head pkts;
2236 struct tipc_msg *hdr;
2237 struct sk_buff *skb;
2238 u16 size, cong_link_cnt;
2239 u8 *data;
2240 int rc;
2241
2242 size = tipc_aead_key_size(skey);
2243 skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
2244 if (!skb)
2245 return -ENOMEM;
2246
2247 hdr = buf_msg(skb);
2248 tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG,
2249 INT_H_SIZE, dnode);
2250 msg_set_size(hdr, INT_H_SIZE + size);
2251 msg_set_key_gen(hdr, gen);
2252 msg_set_key_mode(hdr, mode);
2253
2254 data = msg_data(hdr);
2255 *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen);
2256 memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME);
2257 memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key,
2258 skey->keylen);
2259
2260 __skb_queue_head_init(&pkts);
2261 __skb_queue_tail(&pkts, skb);
2262 if (dnode)
2263 rc = tipc_node_xmit(net, &pkts, dnode, 0);
2264 else
2265 rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt);
2266
2267 return rc;
2268}
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
2282{
2283 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
2284 struct tipc_aead_key *skey = NULL;
2285 u16 key_gen = msg_key_gen(hdr);
2286 u16 size = msg_data_sz(hdr);
2287 u8 *data = msg_data(hdr);
2288 unsigned int keylen;
2289
2290
2291 if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) {
2292 pr_debug("%s: message data size is too small\n", rx->name);
2293 goto exit;
2294 }
2295
2296 keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
2297
2298
2299 if (unlikely(size != keylen + sizeof(struct tipc_aead_key) ||
2300 keylen > TIPC_AEAD_KEY_SIZE_MAX)) {
2301 pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
2302 goto exit;
2303 }
2304
2305 spin_lock(&rx->lock);
2306 if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
2307 pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
2308 rx->skey, key_gen, rx->key_gen);
2309 goto exit_unlock;
2310 }
2311
2312
2313 skey = kmalloc(size, GFP_ATOMIC);
2314 if (unlikely(!skey)) {
2315 pr_err("%s: unable to allocate memory for skey\n", rx->name);
2316 goto exit_unlock;
2317 }
2318
2319
2320 skey->keylen = keylen;
2321 memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
2322 memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
2323 skey->keylen);
2324
2325 rx->key_gen = key_gen;
2326 rx->skey_mode = msg_key_mode(hdr);
2327 rx->skey = skey;
2328 rx->nokey = 0;
2329 mb();
2330
2331exit_unlock:
2332 spin_unlock(&rx->lock);
2333
2334exit:
2335
2336 if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
2337 return true;
2338
2339 return false;
2340}
2341
2342
2343
2344
2345
2346
2347
2348
2349static void tipc_crypto_work_rx(struct work_struct *work)
2350{
2351 struct delayed_work *dwork = to_delayed_work(work);
2352 struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work);
2353 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
2354 unsigned long delay = msecs_to_jiffies(5000);
2355 bool resched = false;
2356 u8 key;
2357 int rc;
2358
2359
2360 if (atomic_cmpxchg(&rx->key_distr,
2361 KEY_DISTR_SCHED,
2362 KEY_DISTR_COMPL) == KEY_DISTR_SCHED) {
2363
2364 key = tx->key.pending ?: tx->key.active;
2365 rc = tipc_crypto_key_distr(tx, key, rx->node);
2366 if (unlikely(rc))
2367 pr_warn("%s: unable to distr key[%d] to %s, err %d\n",
2368 tx->name, key, tipc_node_get_id_str(rx->node),
2369 rc);
2370
2371
2372 resched = true;
2373 } else {
2374 atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0);
2375 }
2376
2377
2378 if (rx->skey) {
2379 rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false);
2380 if (unlikely(rc < 0))
2381 pr_warn("%s: unable to attach received skey, err %d\n",
2382 rx->name, rc);
2383 switch (rc) {
2384 case -EBUSY:
2385 case -ENOMEM:
2386
2387 resched = true;
2388 break;
2389 default:
2390 synchronize_rcu();
2391 kfree(rx->skey);
2392 rx->skey = NULL;
2393 break;
2394 }
2395 }
2396
2397 if (resched && queue_delayed_work(tx->wq, &rx->work, delay))
2398 return;
2399
2400 tipc_node_put(rx->node);
2401}
2402
2403
2404
2405
2406
2407
2408
2409void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
2410 u32 new_intv)
2411{
2412 unsigned long delay;
2413 bool now = false;
2414
2415 if (changed) {
2416 if (new_intv == TIPC_REKEYING_NOW)
2417 now = true;
2418 else
2419 tx->rekeying_intv = new_intv;
2420 cancel_delayed_work_sync(&tx->work);
2421 }
2422
2423 if (tx->rekeying_intv || now) {
2424 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000;
2425 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay));
2426 }
2427}
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438static void tipc_crypto_work_tx(struct work_struct *work)
2439{
2440 struct delayed_work *dwork = to_delayed_work(work);
2441 struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work);
2442 struct tipc_aead_key *skey = NULL;
2443 struct tipc_key key = tx->key;
2444 struct tipc_aead *aead;
2445 int rc = -ENOMEM;
2446
2447 if (unlikely(key.pending))
2448 goto resched;
2449
2450
2451 rcu_read_lock();
2452 aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]);
2453 if (unlikely(!aead)) {
2454 rcu_read_unlock();
2455
2456 return;
2457 }
2458
2459
2460 skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
2461 rcu_read_unlock();
2462
2463
2464 if (likely(skey)) {
2465 rc = tipc_aead_key_generate(skey) ?:
2466 tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false);
2467 if (likely(rc > 0))
2468 rc = tipc_crypto_key_distr(tx, rc, NULL);
2469 kfree_sensitive(skey);
2470 }
2471
2472 if (unlikely(rc))
2473 pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc);
2474
2475resched:
2476
2477 tipc_crypto_rekeying_sched(tx, false, 0);
2478}
2479