1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
311
312#include <linux/utsname.h>
313#include <linux/module.h>
314#include <linux/kernel.h>
315#include <linux/major.h>
316#include <linux/string.h>
317#include <linux/fcntl.h>
318#include <linux/slab.h>
319#include <linux/random.h>
320#include <linux/poll.h>
321#include <linux/init.h>
322#include <linux/fs.h>
323#include <linux/genhd.h>
324#include <linux/interrupt.h>
325#include <linux/mm.h>
326#include <linux/nodemask.h>
327#include <linux/spinlock.h>
328#include <linux/kthread.h>
329#include <linux/percpu.h>
330#include <linux/cryptohash.h>
331#include <linux/fips.h>
332#include <linux/ptrace.h>
333#include <linux/workqueue.h>
334#include <linux/irq.h>
335#include <linux/ratelimit.h>
336#include <linux/syscalls.h>
337#include <linux/completion.h>
338#include <linux/uuid.h>
339#include <crypto/chacha.h>
340
341#include <asm/processor.h>
342#include <linux/uaccess.h>
343#include <asm/irq.h>
344#include <asm/irq_regs.h>
345#include <asm/io.h>
346
347#define CREATE_TRACE_POINTS
348#include <trace/events/random.h>
349
350
351
352
353
354
355#define INPUT_POOL_SHIFT 12
356#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
357#define OUTPUT_POOL_SHIFT 10
358#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
359#define EXTRACT_SIZE 10
360
361
362#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
363
364
365
366
367
368
369
370
371#define ENTROPY_SHIFT 3
372#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
373
374
375
376
377
378
379static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426static const struct poolinfo {
427 int poolbitshift, poolwords, poolbytes, poolfracbits;
428#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
429 int tap1, tap2, tap3, tap4, tap5;
430} poolinfo_table[] = {
431
432
433 { S(128), 104, 76, 51, 25, 1 },
434};
435
436
437
438
439static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
440static struct fasync_struct *fasync;
441
442static DEFINE_SPINLOCK(random_ready_list_lock);
443static LIST_HEAD(random_ready_list);
444
445struct crng_state {
446 __u32 state[16];
447 unsigned long init_time;
448 spinlock_t lock;
449};
450
451static struct crng_state primary_crng = {
452 .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
453};
454
455
456
457
458
459
460
461
462
463static int crng_init = 0;
464#define crng_ready() (likely(crng_init > 1))
465static int crng_init_cnt = 0;
466static unsigned long crng_global_init_time = 0;
467#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
468static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
469static void _crng_backtrack_protect(struct crng_state *crng,
470 __u8 tmp[CHACHA_BLOCK_SIZE], int used);
471static void process_random_ready_list(void);
472static void _get_random_bytes(void *buf, int nbytes);
473
474static struct ratelimit_state unseeded_warning =
475 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
476static struct ratelimit_state urandom_warning =
477 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
478
479static int ratelimit_disable __read_mostly;
480
481module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
482MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
483
484
485
486
487
488
489
490
491struct entropy_store;
492struct entropy_store {
493
494 const struct poolinfo *poolinfo;
495 __u32 *pool;
496 const char *name;
497
498
499 spinlock_t lock;
500 unsigned short add_ptr;
501 unsigned short input_rotate;
502 int entropy_count;
503 unsigned int initialized:1;
504 unsigned int last_data_init:1;
505 __u8 last_data[EXTRACT_SIZE];
506};
507
508static ssize_t extract_entropy(struct entropy_store *r, void *buf,
509 size_t nbytes, int min, int rsvd);
510static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
511 size_t nbytes, int fips);
512
513static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
514static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
515
516static struct entropy_store input_pool = {
517 .poolinfo = &poolinfo_table[0],
518 .name = "input",
519 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
520 .pool = input_pool_data
521};
522
523static __u32 const twist_table[8] = {
524 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
525 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
526
527
528
529
530
531
532
533
534
535
536
537static void _mix_pool_bytes(struct entropy_store *r, const void *in,
538 int nbytes)
539{
540 unsigned long i, tap1, tap2, tap3, tap4, tap5;
541 int input_rotate;
542 int wordmask = r->poolinfo->poolwords - 1;
543 const char *bytes = in;
544 __u32 w;
545
546 tap1 = r->poolinfo->tap1;
547 tap2 = r->poolinfo->tap2;
548 tap3 = r->poolinfo->tap3;
549 tap4 = r->poolinfo->tap4;
550 tap5 = r->poolinfo->tap5;
551
552 input_rotate = r->input_rotate;
553 i = r->add_ptr;
554
555
556 while (nbytes--) {
557 w = rol32(*bytes++, input_rotate);
558 i = (i - 1) & wordmask;
559
560
561 w ^= r->pool[i];
562 w ^= r->pool[(i + tap1) & wordmask];
563 w ^= r->pool[(i + tap2) & wordmask];
564 w ^= r->pool[(i + tap3) & wordmask];
565 w ^= r->pool[(i + tap4) & wordmask];
566 w ^= r->pool[(i + tap5) & wordmask];
567
568
569 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
570
571
572
573
574
575
576
577 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
578 }
579
580 r->input_rotate = input_rotate;
581 r->add_ptr = i;
582}
583
584static void __mix_pool_bytes(struct entropy_store *r, const void *in,
585 int nbytes)
586{
587 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
588 _mix_pool_bytes(r, in, nbytes);
589}
590
591static void mix_pool_bytes(struct entropy_store *r, const void *in,
592 int nbytes)
593{
594 unsigned long flags;
595
596 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
597 spin_lock_irqsave(&r->lock, flags);
598 _mix_pool_bytes(r, in, nbytes);
599 spin_unlock_irqrestore(&r->lock, flags);
600}
601
602struct fast_pool {
603 __u32 pool[4];
604 unsigned long last;
605 unsigned short reg_idx;
606 unsigned char count;
607};
608
609
610
611
612
613
614static void fast_mix(struct fast_pool *f)
615{
616 __u32 a = f->pool[0], b = f->pool[1];
617 __u32 c = f->pool[2], d = f->pool[3];
618
619 a += b; c += d;
620 b = rol32(b, 6); d = rol32(d, 27);
621 d ^= a; b ^= c;
622
623 a += b; c += d;
624 b = rol32(b, 16); d = rol32(d, 14);
625 d ^= a; b ^= c;
626
627 a += b; c += d;
628 b = rol32(b, 6); d = rol32(d, 27);
629 d ^= a; b ^= c;
630
631 a += b; c += d;
632 b = rol32(b, 16); d = rol32(d, 14);
633 d ^= a; b ^= c;
634
635 f->pool[0] = a; f->pool[1] = b;
636 f->pool[2] = c; f->pool[3] = d;
637 f->count++;
638}
639
640static void process_random_ready_list(void)
641{
642 unsigned long flags;
643 struct random_ready_callback *rdy, *tmp;
644
645 spin_lock_irqsave(&random_ready_list_lock, flags);
646 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
647 struct module *owner = rdy->owner;
648
649 list_del_init(&rdy->list);
650 rdy->func(rdy);
651 module_put(owner);
652 }
653 spin_unlock_irqrestore(&random_ready_list_lock, flags);
654}
655
656
657
658
659
660
661static void credit_entropy_bits(struct entropy_store *r, int nbits)
662{
663 int entropy_count, orig, has_initialized = 0;
664 const int pool_size = r->poolinfo->poolfracbits;
665 int nfrac = nbits << ENTROPY_SHIFT;
666
667 if (!nbits)
668 return;
669
670retry:
671 entropy_count = orig = READ_ONCE(r->entropy_count);
672 if (nfrac < 0) {
673
674 entropy_count += nfrac;
675 } else {
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697 int pnfrac = nfrac;
698 const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
699
700
701 do {
702 unsigned int anfrac = min(pnfrac, pool_size/2);
703 unsigned int add =
704 ((pool_size - entropy_count)*anfrac*3) >> s;
705
706 entropy_count += add;
707 pnfrac -= anfrac;
708 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
709 }
710
711 if (WARN_ON(entropy_count < 0)) {
712 pr_warn("negative entropy/overflow: pool %s count %d\n",
713 r->name, entropy_count);
714 entropy_count = 0;
715 } else if (entropy_count > pool_size)
716 entropy_count = pool_size;
717 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
718 goto retry;
719
720 if (has_initialized) {
721 r->initialized = 1;
722 kill_fasync(&fasync, SIGIO, POLL_IN);
723 }
724
725 trace_credit_entropy_bits(r->name, nbits,
726 entropy_count >> ENTROPY_SHIFT, _RET_IP_);
727
728 if (r == &input_pool) {
729 int entropy_bits = entropy_count >> ENTROPY_SHIFT;
730
731 if (crng_init < 2) {
732 if (entropy_bits < 128)
733 return;
734 crng_reseed(&primary_crng, r);
735 entropy_bits = ENTROPY_BITS(r);
736 }
737 }
738}
739
740static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
741{
742 const int nbits_max = r->poolinfo->poolwords * 32;
743
744 if (nbits < 0)
745 return -EINVAL;
746
747
748 nbits = min(nbits, nbits_max);
749
750 credit_entropy_bits(r, nbits);
751 return 0;
752}
753
754
755
756
757
758
759
760#define CRNG_RESEED_INTERVAL (300*HZ)
761
762static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
763
764#ifdef CONFIG_NUMA
765
766
767
768
769
770
771static struct crng_state **crng_node_pool __read_mostly;
772#endif
773
774static void invalidate_batched_entropy(void);
775static void numa_crng_init(void);
776
777static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
778static int __init parse_trust_cpu(char *arg)
779{
780 return kstrtobool(arg, &trust_cpu);
781}
782early_param("random.trust_cpu", parse_trust_cpu);
783
784static void crng_initialize(struct crng_state *crng)
785{
786 int i;
787 int arch_init = 1;
788 unsigned long rv;
789
790 memcpy(&crng->state[0], "expand 32-byte k", 16);
791 if (crng == &primary_crng)
792 _extract_entropy(&input_pool, &crng->state[4],
793 sizeof(__u32) * 12, 0);
794 else
795 _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
796 for (i = 4; i < 16; i++) {
797 if (!arch_get_random_seed_long(&rv) &&
798 !arch_get_random_long(&rv)) {
799 rv = random_get_entropy();
800 arch_init = 0;
801 }
802 crng->state[i] ^= rv;
803 }
804 if (trust_cpu && arch_init && crng == &primary_crng) {
805 invalidate_batched_entropy();
806 numa_crng_init();
807 crng_init = 2;
808 pr_notice("crng done (trusting CPU's manufacturer)\n");
809 }
810 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
811}
812
813#ifdef CONFIG_NUMA
814static void do_numa_crng_init(struct work_struct *work)
815{
816 int i;
817 struct crng_state *crng;
818 struct crng_state **pool;
819
820 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
821 for_each_online_node(i) {
822 crng = kmalloc_node(sizeof(struct crng_state),
823 GFP_KERNEL | __GFP_NOFAIL, i);
824 spin_lock_init(&crng->lock);
825 crng_initialize(crng);
826 pool[i] = crng;
827 }
828 mb();
829 if (cmpxchg(&crng_node_pool, NULL, pool)) {
830 for_each_node(i)
831 kfree(pool[i]);
832 kfree(pool);
833 }
834}
835
836static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
837
838static void numa_crng_init(void)
839{
840 schedule_work(&numa_crng_init_work);
841}
842#else
843static void numa_crng_init(void) {}
844#endif
845
846
847
848
849
850static int crng_fast_load(const char *cp, size_t len)
851{
852 unsigned long flags;
853 char *p;
854
855 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
856 return 0;
857 if (crng_init != 0) {
858 spin_unlock_irqrestore(&primary_crng.lock, flags);
859 return 0;
860 }
861 p = (unsigned char *) &primary_crng.state[4];
862 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
863 p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
864 cp++; crng_init_cnt++; len--;
865 }
866 spin_unlock_irqrestore(&primary_crng.lock, flags);
867 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
868 invalidate_batched_entropy();
869 crng_init = 1;
870 pr_notice("fast init done\n");
871 }
872 return 1;
873}
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889static int crng_slow_load(const char *cp, size_t len)
890{
891 unsigned long flags;
892 static unsigned char lfsr = 1;
893 unsigned char tmp;
894 unsigned i, max = CHACHA_KEY_SIZE;
895 const char * src_buf = cp;
896 char * dest_buf = (char *) &primary_crng.state[4];
897
898 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
899 return 0;
900 if (crng_init != 0) {
901 spin_unlock_irqrestore(&primary_crng.lock, flags);
902 return 0;
903 }
904 if (len > max)
905 max = len;
906
907 for (i = 0; i < max ; i++) {
908 tmp = lfsr;
909 lfsr >>= 1;
910 if (tmp & 1)
911 lfsr ^= 0xE1;
912 tmp = dest_buf[i % CHACHA_KEY_SIZE];
913 dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
914 lfsr += (tmp << 3) | (tmp >> 5);
915 }
916 spin_unlock_irqrestore(&primary_crng.lock, flags);
917 return 1;
918}
919
920static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
921{
922 unsigned long flags;
923 int i, num;
924 union {
925 __u8 block[CHACHA_BLOCK_SIZE];
926 __u32 key[8];
927 } buf;
928
929 if (r) {
930 num = extract_entropy(r, &buf, 32, 16, 0);
931 if (num == 0)
932 return;
933 } else {
934 _extract_crng(&primary_crng, buf.block);
935 _crng_backtrack_protect(&primary_crng, buf.block,
936 CHACHA_KEY_SIZE);
937 }
938 spin_lock_irqsave(&crng->lock, flags);
939 for (i = 0; i < 8; i++) {
940 unsigned long rv;
941 if (!arch_get_random_seed_long(&rv) &&
942 !arch_get_random_long(&rv))
943 rv = random_get_entropy();
944 crng->state[i+4] ^= buf.key[i] ^ rv;
945 }
946 memzero_explicit(&buf, sizeof(buf));
947 crng->init_time = jiffies;
948 spin_unlock_irqrestore(&crng->lock, flags);
949 if (crng == &primary_crng && crng_init < 2) {
950 invalidate_batched_entropy();
951 numa_crng_init();
952 crng_init = 2;
953 process_random_ready_list();
954 wake_up_interruptible(&crng_init_wait);
955 kill_fasync(&fasync, SIGIO, POLL_IN);
956 pr_notice("crng init done\n");
957 if (unseeded_warning.missed) {
958 pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
959 unseeded_warning.missed);
960 unseeded_warning.missed = 0;
961 }
962 if (urandom_warning.missed) {
963 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
964 urandom_warning.missed);
965 urandom_warning.missed = 0;
966 }
967 }
968}
969
970static void _extract_crng(struct crng_state *crng,
971 __u8 out[CHACHA_BLOCK_SIZE])
972{
973 unsigned long v, flags;
974
975 if (crng_ready() &&
976 (time_after(crng_global_init_time, crng->init_time) ||
977 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
978 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
979 spin_lock_irqsave(&crng->lock, flags);
980 if (arch_get_random_long(&v))
981 crng->state[14] ^= v;
982 chacha20_block(&crng->state[0], out);
983 if (crng->state[12] == 0)
984 crng->state[13]++;
985 spin_unlock_irqrestore(&crng->lock, flags);
986}
987
988static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
989{
990 struct crng_state *crng = NULL;
991
992#ifdef CONFIG_NUMA
993 if (crng_node_pool)
994 crng = crng_node_pool[numa_node_id()];
995 if (crng == NULL)
996#endif
997 crng = &primary_crng;
998 _extract_crng(crng, out);
999}
1000
1001
1002
1003
1004
1005static void _crng_backtrack_protect(struct crng_state *crng,
1006 __u8 tmp[CHACHA_BLOCK_SIZE], int used)
1007{
1008 unsigned long flags;
1009 __u32 *s, *d;
1010 int i;
1011
1012 used = round_up(used, sizeof(__u32));
1013 if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
1014 extract_crng(tmp);
1015 used = 0;
1016 }
1017 spin_lock_irqsave(&crng->lock, flags);
1018 s = (__u32 *) &tmp[used];
1019 d = &crng->state[4];
1020 for (i=0; i < 8; i++)
1021 *d++ ^= *s++;
1022 spin_unlock_irqrestore(&crng->lock, flags);
1023}
1024
1025static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
1026{
1027 struct crng_state *crng = NULL;
1028
1029#ifdef CONFIG_NUMA
1030 if (crng_node_pool)
1031 crng = crng_node_pool[numa_node_id()];
1032 if (crng == NULL)
1033#endif
1034 crng = &primary_crng;
1035 _crng_backtrack_protect(crng, tmp, used);
1036}
1037
1038static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1039{
1040 ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
1041 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1042 int large_request = (nbytes > 256);
1043
1044 while (nbytes) {
1045 if (large_request && need_resched()) {
1046 if (signal_pending(current)) {
1047 if (ret == 0)
1048 ret = -ERESTARTSYS;
1049 break;
1050 }
1051 schedule();
1052 }
1053
1054 extract_crng(tmp);
1055 i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
1056 if (copy_to_user(buf, tmp, i)) {
1057 ret = -EFAULT;
1058 break;
1059 }
1060
1061 nbytes -= i;
1062 buf += i;
1063 ret += i;
1064 }
1065 crng_backtrack_protect(tmp, i);
1066
1067
1068 memzero_explicit(tmp, sizeof(tmp));
1069
1070 return ret;
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081struct timer_rand_state {
1082 cycles_t last_time;
1083 long last_delta, last_delta2;
1084};
1085
1086#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096void add_device_randomness(const void *buf, unsigned int size)
1097{
1098 unsigned long time = random_get_entropy() ^ jiffies;
1099 unsigned long flags;
1100
1101 if (!crng_ready() && size)
1102 crng_slow_load(buf, size);
1103
1104 trace_add_device_randomness(size, _RET_IP_);
1105 spin_lock_irqsave(&input_pool.lock, flags);
1106 _mix_pool_bytes(&input_pool, buf, size);
1107 _mix_pool_bytes(&input_pool, &time, sizeof(time));
1108 spin_unlock_irqrestore(&input_pool.lock, flags);
1109}
1110EXPORT_SYMBOL(add_device_randomness);
1111
1112static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1125{
1126 struct entropy_store *r;
1127 struct {
1128 long jiffies;
1129 unsigned cycles;
1130 unsigned num;
1131 } sample;
1132 long delta, delta2, delta3;
1133
1134 sample.jiffies = jiffies;
1135 sample.cycles = random_get_entropy();
1136 sample.num = num;
1137 r = &input_pool;
1138 mix_pool_bytes(r, &sample, sizeof(sample));
1139
1140
1141
1142
1143
1144
1145 delta = sample.jiffies - state->last_time;
1146 state->last_time = sample.jiffies;
1147
1148 delta2 = delta - state->last_delta;
1149 state->last_delta = delta;
1150
1151 delta3 = delta2 - state->last_delta2;
1152 state->last_delta2 = delta2;
1153
1154 if (delta < 0)
1155 delta = -delta;
1156 if (delta2 < 0)
1157 delta2 = -delta2;
1158 if (delta3 < 0)
1159 delta3 = -delta3;
1160 if (delta > delta2)
1161 delta = delta2;
1162 if (delta > delta3)
1163 delta = delta3;
1164
1165
1166
1167
1168
1169
1170 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
1171}
1172
1173void add_input_randomness(unsigned int type, unsigned int code,
1174 unsigned int value)
1175{
1176 static unsigned char last_value;
1177
1178
1179 if (value == last_value)
1180 return;
1181
1182 last_value = value;
1183 add_timer_randomness(&input_timer_state,
1184 (type << 4) ^ code ^ (code >> 4) ^ value);
1185 trace_add_input_randomness(ENTROPY_BITS(&input_pool));
1186}
1187EXPORT_SYMBOL_GPL(add_input_randomness);
1188
1189static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1190
1191#ifdef ADD_INTERRUPT_BENCH
1192static unsigned long avg_cycles, avg_deviation;
1193
1194#define AVG_SHIFT 8
1195#define FIXED_1_2 (1 << (AVG_SHIFT-1))
1196
1197static void add_interrupt_bench(cycles_t start)
1198{
1199 long delta = random_get_entropy() - start;
1200
1201
1202 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
1203 avg_cycles += delta;
1204
1205 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
1206 avg_deviation += delta;
1207}
1208#else
1209#define add_interrupt_bench(x)
1210#endif
1211
1212static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1213{
1214 __u32 *ptr = (__u32 *) regs;
1215 unsigned int idx;
1216
1217 if (regs == NULL)
1218 return 0;
1219 idx = READ_ONCE(f->reg_idx);
1220 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1221 idx = 0;
1222 ptr += idx++;
1223 WRITE_ONCE(f->reg_idx, idx);
1224 return *ptr;
1225}
1226
1227void add_interrupt_randomness(int irq, int irq_flags)
1228{
1229 struct entropy_store *r;
1230 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1231 struct pt_regs *regs = get_irq_regs();
1232 unsigned long now = jiffies;
1233 cycles_t cycles = random_get_entropy();
1234 __u32 c_high, j_high;
1235 __u64 ip;
1236 unsigned long seed;
1237 int credit = 0;
1238
1239 if (cycles == 0)
1240 cycles = get_reg(fast_pool, regs);
1241 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
1242 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
1243 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
1244 fast_pool->pool[1] ^= now ^ c_high;
1245 ip = regs ? instruction_pointer(regs) : _RET_IP_;
1246 fast_pool->pool[2] ^= ip;
1247 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
1248 get_reg(fast_pool, regs);
1249
1250 fast_mix(fast_pool);
1251 add_interrupt_bench(cycles);
1252
1253 if (unlikely(crng_init == 0)) {
1254 if ((fast_pool->count >= 64) &&
1255 crng_fast_load((char *) fast_pool->pool,
1256 sizeof(fast_pool->pool))) {
1257 fast_pool->count = 0;
1258 fast_pool->last = now;
1259 }
1260 return;
1261 }
1262
1263 if ((fast_pool->count < 64) &&
1264 !time_after(now, fast_pool->last + HZ))
1265 return;
1266
1267 r = &input_pool;
1268 if (!spin_trylock(&r->lock))
1269 return;
1270
1271 fast_pool->last = now;
1272 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
1273
1274
1275
1276
1277
1278
1279
1280 if (arch_get_random_seed_long(&seed)) {
1281 __mix_pool_bytes(r, &seed, sizeof(seed));
1282 credit = 1;
1283 }
1284 spin_unlock(&r->lock);
1285
1286 fast_pool->count = 0;
1287
1288
1289 credit_entropy_bits(r, credit + 1);
1290}
1291EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1292
1293#ifdef CONFIG_BLOCK
1294void add_disk_randomness(struct gendisk *disk)
1295{
1296 if (!disk || !disk->random)
1297 return;
1298
1299 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1300 trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
1301}
1302EXPORT_SYMBOL_GPL(add_disk_randomness);
1303#endif
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315static size_t account(struct entropy_store *r, size_t nbytes, int min,
1316 int reserved)
1317{
1318 int entropy_count, orig, have_bytes;
1319 size_t ibytes, nfrac;
1320
1321 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1322
1323
1324retry:
1325 entropy_count = orig = READ_ONCE(r->entropy_count);
1326 ibytes = nbytes;
1327
1328 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1329
1330 if ((have_bytes -= reserved) < 0)
1331 have_bytes = 0;
1332 ibytes = min_t(size_t, ibytes, have_bytes);
1333 if (ibytes < min)
1334 ibytes = 0;
1335
1336 if (WARN_ON(entropy_count < 0)) {
1337 pr_warn("negative entropy count: pool %s count %d\n",
1338 r->name, entropy_count);
1339 entropy_count = 0;
1340 }
1341 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1342 if ((size_t) entropy_count > nfrac)
1343 entropy_count -= nfrac;
1344 else
1345 entropy_count = 0;
1346
1347 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1348 goto retry;
1349
1350 trace_debit_entropy(r->name, 8 * ibytes);
1351 if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
1352 wake_up_interruptible(&random_write_wait);
1353 kill_fasync(&fasync, SIGIO, POLL_OUT);
1354 }
1355
1356 return ibytes;
1357}
1358
1359
1360
1361
1362
1363
1364
1365static void extract_buf(struct entropy_store *r, __u8 *out)
1366{
1367 int i;
1368 union {
1369 __u32 w[5];
1370 unsigned long l[LONGS(20)];
1371 } hash;
1372 __u32 workspace[SHA_WORKSPACE_WORDS];
1373 unsigned long flags;
1374
1375
1376
1377
1378
1379 sha_init(hash.w);
1380 for (i = 0; i < LONGS(20); i++) {
1381 unsigned long v;
1382 if (!arch_get_random_long(&v))
1383 break;
1384 hash.l[i] = v;
1385 }
1386
1387
1388 spin_lock_irqsave(&r->lock, flags);
1389 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1390 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1402 spin_unlock_irqrestore(&r->lock, flags);
1403
1404 memzero_explicit(workspace, sizeof(workspace));
1405
1406
1407
1408
1409
1410
1411 hash.w[0] ^= hash.w[3];
1412 hash.w[1] ^= hash.w[4];
1413 hash.w[2] ^= rol32(hash.w[2], 16);
1414
1415 memcpy(out, &hash, EXTRACT_SIZE);
1416 memzero_explicit(&hash, sizeof(hash));
1417}
1418
1419static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1420 size_t nbytes, int fips)
1421{
1422 ssize_t ret = 0, i;
1423 __u8 tmp[EXTRACT_SIZE];
1424 unsigned long flags;
1425
1426 while (nbytes) {
1427 extract_buf(r, tmp);
1428
1429 if (fips) {
1430 spin_lock_irqsave(&r->lock, flags);
1431 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1432 panic("Hardware RNG duplicated output!\n");
1433 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1434 spin_unlock_irqrestore(&r->lock, flags);
1435 }
1436 i = min_t(int, nbytes, EXTRACT_SIZE);
1437 memcpy(buf, tmp, i);
1438 nbytes -= i;
1439 buf += i;
1440 ret += i;
1441 }
1442
1443
1444 memzero_explicit(tmp, sizeof(tmp));
1445
1446 return ret;
1447}
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1459 size_t nbytes, int min, int reserved)
1460{
1461 __u8 tmp[EXTRACT_SIZE];
1462 unsigned long flags;
1463
1464
1465 if (fips_enabled) {
1466 spin_lock_irqsave(&r->lock, flags);
1467 if (!r->last_data_init) {
1468 r->last_data_init = 1;
1469 spin_unlock_irqrestore(&r->lock, flags);
1470 trace_extract_entropy(r->name, EXTRACT_SIZE,
1471 ENTROPY_BITS(r), _RET_IP_);
1472 extract_buf(r, tmp);
1473 spin_lock_irqsave(&r->lock, flags);
1474 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1475 }
1476 spin_unlock_irqrestore(&r->lock, flags);
1477 }
1478
1479 trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1480 nbytes = account(r, nbytes, min, reserved);
1481
1482 return _extract_entropy(r, buf, nbytes, fips_enabled);
1483}
1484
1485#define warn_unseeded_randomness(previous) \
1486 _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1487
1488static void _warn_unseeded_randomness(const char *func_name, void *caller,
1489 void **previous)
1490{
1491#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1492 const bool print_once = false;
1493#else
1494 static bool print_once __read_mostly;
1495#endif
1496
1497 if (print_once ||
1498 crng_ready() ||
1499 (previous && (caller == READ_ONCE(*previous))))
1500 return;
1501 WRITE_ONCE(*previous, caller);
1502#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1503 print_once = true;
1504#endif
1505 if (__ratelimit(&unseeded_warning))
1506 printk_deferred(KERN_NOTICE "random: %s called from %pS "
1507 "with crng_init=%d\n", func_name, caller,
1508 crng_init);
1509}
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521static void _get_random_bytes(void *buf, int nbytes)
1522{
1523 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1524
1525 trace_get_random_bytes(nbytes, _RET_IP_);
1526
1527 while (nbytes >= CHACHA_BLOCK_SIZE) {
1528 extract_crng(buf);
1529 buf += CHACHA_BLOCK_SIZE;
1530 nbytes -= CHACHA_BLOCK_SIZE;
1531 }
1532
1533 if (nbytes > 0) {
1534 extract_crng(tmp);
1535 memcpy(buf, tmp, nbytes);
1536 crng_backtrack_protect(tmp, nbytes);
1537 } else
1538 crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
1539 memzero_explicit(tmp, sizeof(tmp));
1540}
1541
1542void get_random_bytes(void *buf, int nbytes)
1543{
1544 static void *previous;
1545
1546 warn_unseeded_randomness(&previous);
1547 _get_random_bytes(buf, nbytes);
1548}
1549EXPORT_SYMBOL(get_random_bytes);
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565static void entropy_timer(struct timer_list *t)
1566{
1567 credit_entropy_bits(&input_pool, 1);
1568}
1569
1570
1571
1572
1573
1574static void try_to_generate_entropy(void)
1575{
1576 struct {
1577 unsigned long now;
1578 struct timer_list timer;
1579 } stack;
1580
1581 stack.now = random_get_entropy();
1582
1583
1584 if (stack.now == random_get_entropy())
1585 return;
1586
1587 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1588 while (!crng_ready()) {
1589 if (!timer_pending(&stack.timer))
1590 mod_timer(&stack.timer, jiffies+1);
1591 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1592 schedule();
1593 stack.now = random_get_entropy();
1594 }
1595
1596 del_timer_sync(&stack.timer);
1597 destroy_timer_on_stack(&stack.timer);
1598 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1599}
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611int wait_for_random_bytes(void)
1612{
1613 if (likely(crng_ready()))
1614 return 0;
1615
1616 do {
1617 int ret;
1618 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1619 if (ret)
1620 return ret > 0 ? 0 : ret;
1621
1622 try_to_generate_entropy();
1623 } while (!crng_ready());
1624
1625 return 0;
1626}
1627EXPORT_SYMBOL(wait_for_random_bytes);
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638bool rng_is_initialized(void)
1639{
1640 return crng_ready();
1641}
1642EXPORT_SYMBOL(rng_is_initialized);
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652int add_random_ready_callback(struct random_ready_callback *rdy)
1653{
1654 struct module *owner;
1655 unsigned long flags;
1656 int err = -EALREADY;
1657
1658 if (crng_ready())
1659 return err;
1660
1661 owner = rdy->owner;
1662 if (!try_module_get(owner))
1663 return -ENOENT;
1664
1665 spin_lock_irqsave(&random_ready_list_lock, flags);
1666 if (crng_ready())
1667 goto out;
1668
1669 owner = NULL;
1670
1671 list_add(&rdy->list, &random_ready_list);
1672 err = 0;
1673
1674out:
1675 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1676
1677 module_put(owner);
1678
1679 return err;
1680}
1681EXPORT_SYMBOL(add_random_ready_callback);
1682
1683
1684
1685
1686void del_random_ready_callback(struct random_ready_callback *rdy)
1687{
1688 unsigned long flags;
1689 struct module *owner = NULL;
1690
1691 spin_lock_irqsave(&random_ready_list_lock, flags);
1692 if (!list_empty(&rdy->list)) {
1693 list_del_init(&rdy->list);
1694 owner = rdy->owner;
1695 }
1696 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1697
1698 module_put(owner);
1699}
1700EXPORT_SYMBOL(del_random_ready_callback);
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714int __must_check get_random_bytes_arch(void *buf, int nbytes)
1715{
1716 int left = nbytes;
1717 char *p = buf;
1718
1719 trace_get_random_bytes_arch(left, _RET_IP_);
1720 while (left) {
1721 unsigned long v;
1722 int chunk = min_t(int, left, sizeof(unsigned long));
1723
1724 if (!arch_get_random_long(&v))
1725 break;
1726
1727 memcpy(p, &v, chunk);
1728 p += chunk;
1729 left -= chunk;
1730 }
1731
1732 return nbytes - left;
1733}
1734EXPORT_SYMBOL(get_random_bytes_arch);
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745static void __init init_std_data(struct entropy_store *r)
1746{
1747 int i;
1748 ktime_t now = ktime_get_real();
1749 unsigned long rv;
1750
1751 mix_pool_bytes(r, &now, sizeof(now));
1752 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1753 if (!arch_get_random_seed_long(&rv) &&
1754 !arch_get_random_long(&rv))
1755 rv = random_get_entropy();
1756 mix_pool_bytes(r, &rv, sizeof(rv));
1757 }
1758 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1759}
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771int __init rand_initialize(void)
1772{
1773 init_std_data(&input_pool);
1774 crng_initialize(&primary_crng);
1775 crng_global_init_time = jiffies;
1776 if (ratelimit_disable) {
1777 urandom_warning.interval = 0;
1778 unseeded_warning.interval = 0;
1779 }
1780 return 0;
1781}
1782
1783#ifdef CONFIG_BLOCK
1784void rand_initialize_disk(struct gendisk *disk)
1785{
1786 struct timer_rand_state *state;
1787
1788
1789
1790
1791
1792 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1793 if (state) {
1794 state->last_time = INITIAL_JIFFIES;
1795 disk->random = state;
1796 }
1797}
1798#endif
1799
1800static ssize_t
1801urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
1802 loff_t *ppos)
1803{
1804 int ret;
1805
1806 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1807 ret = extract_crng_user(buf, nbytes);
1808 trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
1809 return ret;
1810}
1811
1812static ssize_t
1813urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1814{
1815 unsigned long flags;
1816 static int maxwarn = 10;
1817
1818 if (!crng_ready() && maxwarn > 0) {
1819 maxwarn--;
1820 if (__ratelimit(&urandom_warning))
1821 pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1822 current->comm, nbytes);
1823 spin_lock_irqsave(&primary_crng.lock, flags);
1824 crng_init_cnt = 0;
1825 spin_unlock_irqrestore(&primary_crng.lock, flags);
1826 }
1827
1828 return urandom_read_nowarn(file, buf, nbytes, ppos);
1829}
1830
1831static ssize_t
1832random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1833{
1834 int ret;
1835
1836 ret = wait_for_random_bytes();
1837 if (ret != 0)
1838 return ret;
1839 return urandom_read_nowarn(file, buf, nbytes, ppos);
1840}
1841
1842static __poll_t
1843random_poll(struct file *file, poll_table * wait)
1844{
1845 __poll_t mask;
1846
1847 poll_wait(file, &crng_init_wait, wait);
1848 poll_wait(file, &random_write_wait, wait);
1849 mask = 0;
1850 if (crng_ready())
1851 mask |= EPOLLIN | EPOLLRDNORM;
1852 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
1853 mask |= EPOLLOUT | EPOLLWRNORM;
1854 return mask;
1855}
1856
1857static int
1858write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1859{
1860 size_t bytes;
1861 __u32 t, buf[16];
1862 const char __user *p = buffer;
1863
1864 while (count > 0) {
1865 int b, i = 0;
1866
1867 bytes = min(count, sizeof(buf));
1868 if (copy_from_user(&buf, p, bytes))
1869 return -EFAULT;
1870
1871 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1872 if (!arch_get_random_int(&t))
1873 break;
1874 buf[i] ^= t;
1875 }
1876
1877 count -= bytes;
1878 p += bytes;
1879
1880 mix_pool_bytes(r, buf, bytes);
1881 cond_resched();
1882 }
1883
1884 return 0;
1885}
1886
1887static ssize_t random_write(struct file *file, const char __user *buffer,
1888 size_t count, loff_t *ppos)
1889{
1890 size_t ret;
1891
1892 ret = write_pool(&input_pool, buffer, count);
1893 if (ret)
1894 return ret;
1895
1896 return (ssize_t)count;
1897}
1898
1899static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1900{
1901 int size, ent_count;
1902 int __user *p = (int __user *)arg;
1903 int retval;
1904
1905 switch (cmd) {
1906 case RNDGETENTCNT:
1907
1908 ent_count = ENTROPY_BITS(&input_pool);
1909 if (put_user(ent_count, p))
1910 return -EFAULT;
1911 return 0;
1912 case RNDADDTOENTCNT:
1913 if (!capable(CAP_SYS_ADMIN))
1914 return -EPERM;
1915 if (get_user(ent_count, p))
1916 return -EFAULT;
1917 return credit_entropy_bits_safe(&input_pool, ent_count);
1918 case RNDADDENTROPY:
1919 if (!capable(CAP_SYS_ADMIN))
1920 return -EPERM;
1921 if (get_user(ent_count, p++))
1922 return -EFAULT;
1923 if (ent_count < 0)
1924 return -EINVAL;
1925 if (get_user(size, p++))
1926 return -EFAULT;
1927 retval = write_pool(&input_pool, (const char __user *)p,
1928 size);
1929 if (retval < 0)
1930 return retval;
1931 return credit_entropy_bits_safe(&input_pool, ent_count);
1932 case RNDZAPENTCNT:
1933 case RNDCLEARPOOL:
1934
1935
1936
1937
1938 if (!capable(CAP_SYS_ADMIN))
1939 return -EPERM;
1940 input_pool.entropy_count = 0;
1941 return 0;
1942 case RNDRESEEDCRNG:
1943 if (!capable(CAP_SYS_ADMIN))
1944 return -EPERM;
1945 if (crng_init < 2)
1946 return -ENODATA;
1947 crng_reseed(&primary_crng, NULL);
1948 crng_global_init_time = jiffies - 1;
1949 return 0;
1950 default:
1951 return -EINVAL;
1952 }
1953}
1954
1955static int random_fasync(int fd, struct file *filp, int on)
1956{
1957 return fasync_helper(fd, filp, on, &fasync);
1958}
1959
1960const struct file_operations random_fops = {
1961 .read = random_read,
1962 .write = random_write,
1963 .poll = random_poll,
1964 .unlocked_ioctl = random_ioctl,
1965 .compat_ioctl = compat_ptr_ioctl,
1966 .fasync = random_fasync,
1967 .llseek = noop_llseek,
1968};
1969
1970const struct file_operations urandom_fops = {
1971 .read = urandom_read,
1972 .write = random_write,
1973 .unlocked_ioctl = random_ioctl,
1974 .compat_ioctl = compat_ptr_ioctl,
1975 .fasync = random_fasync,
1976 .llseek = noop_llseek,
1977};
1978
1979SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
1980 unsigned int, flags)
1981{
1982 int ret;
1983
1984 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
1985 return -EINVAL;
1986
1987
1988
1989
1990
1991 if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
1992 return -EINVAL;
1993
1994 if (count > INT_MAX)
1995 count = INT_MAX;
1996
1997 if (!(flags & GRND_INSECURE) && !crng_ready()) {
1998 if (flags & GRND_NONBLOCK)
1999 return -EAGAIN;
2000 ret = wait_for_random_bytes();
2001 if (unlikely(ret))
2002 return ret;
2003 }
2004 return urandom_read_nowarn(NULL, buf, count, NULL);
2005}
2006
2007
2008
2009
2010
2011
2012
2013#ifdef CONFIG_SYSCTL
2014
2015#include <linux/sysctl.h>
2016
2017static int min_write_thresh;
2018static int max_write_thresh = INPUT_POOL_WORDS * 32;
2019static int random_min_urandom_seed = 60;
2020static char sysctl_bootid[16];
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031static int proc_do_uuid(struct ctl_table *table, int write,
2032 void __user *buffer, size_t *lenp, loff_t *ppos)
2033{
2034 struct ctl_table fake_table;
2035 unsigned char buf[64], tmp_uuid[16], *uuid;
2036
2037 uuid = table->data;
2038 if (!uuid) {
2039 uuid = tmp_uuid;
2040 generate_random_uuid(uuid);
2041 } else {
2042 static DEFINE_SPINLOCK(bootid_spinlock);
2043
2044 spin_lock(&bootid_spinlock);
2045 if (!uuid[8])
2046 generate_random_uuid(uuid);
2047 spin_unlock(&bootid_spinlock);
2048 }
2049
2050 sprintf(buf, "%pU", uuid);
2051
2052 fake_table.data = buf;
2053 fake_table.maxlen = sizeof(buf);
2054
2055 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
2056}
2057
2058
2059
2060
2061static int proc_do_entropy(struct ctl_table *table, int write,
2062 void __user *buffer, size_t *lenp, loff_t *ppos)
2063{
2064 struct ctl_table fake_table;
2065 int entropy_count;
2066
2067 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
2068
2069 fake_table.data = &entropy_count;
2070 fake_table.maxlen = sizeof(entropy_count);
2071
2072 return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
2073}
2074
2075static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
2076extern struct ctl_table random_table[];
2077struct ctl_table random_table[] = {
2078 {
2079 .procname = "poolsize",
2080 .data = &sysctl_poolsize,
2081 .maxlen = sizeof(int),
2082 .mode = 0444,
2083 .proc_handler = proc_dointvec,
2084 },
2085 {
2086 .procname = "entropy_avail",
2087 .maxlen = sizeof(int),
2088 .mode = 0444,
2089 .proc_handler = proc_do_entropy,
2090 .data = &input_pool.entropy_count,
2091 },
2092 {
2093 .procname = "write_wakeup_threshold",
2094 .data = &random_write_wakeup_bits,
2095 .maxlen = sizeof(int),
2096 .mode = 0644,
2097 .proc_handler = proc_dointvec_minmax,
2098 .extra1 = &min_write_thresh,
2099 .extra2 = &max_write_thresh,
2100 },
2101 {
2102 .procname = "urandom_min_reseed_secs",
2103 .data = &random_min_urandom_seed,
2104 .maxlen = sizeof(int),
2105 .mode = 0644,
2106 .proc_handler = proc_dointvec,
2107 },
2108 {
2109 .procname = "boot_id",
2110 .data = &sysctl_bootid,
2111 .maxlen = 16,
2112 .mode = 0444,
2113 .proc_handler = proc_do_uuid,
2114 },
2115 {
2116 .procname = "uuid",
2117 .maxlen = 16,
2118 .mode = 0444,
2119 .proc_handler = proc_do_uuid,
2120 },
2121#ifdef ADD_INTERRUPT_BENCH
2122 {
2123 .procname = "add_interrupt_avg_cycles",
2124 .data = &avg_cycles,
2125 .maxlen = sizeof(avg_cycles),
2126 .mode = 0444,
2127 .proc_handler = proc_doulongvec_minmax,
2128 },
2129 {
2130 .procname = "add_interrupt_avg_deviation",
2131 .data = &avg_deviation,
2132 .maxlen = sizeof(avg_deviation),
2133 .mode = 0444,
2134 .proc_handler = proc_doulongvec_minmax,
2135 },
2136#endif
2137 { }
2138};
2139#endif
2140
2141struct batched_entropy {
2142 union {
2143 u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
2144 u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2145 };
2146 unsigned int position;
2147 spinlock_t batch_lock;
2148};
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2159 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2160};
2161
2162u64 get_random_u64(void)
2163{
2164 u64 ret;
2165 unsigned long flags;
2166 struct batched_entropy *batch;
2167 static void *previous;
2168
2169#if BITS_PER_LONG == 64
2170 if (arch_get_random_long((unsigned long *)&ret))
2171 return ret;
2172#else
2173 if (arch_get_random_long((unsigned long *)&ret) &&
2174 arch_get_random_long((unsigned long *)&ret + 1))
2175 return ret;
2176#endif
2177
2178 warn_unseeded_randomness(&previous);
2179
2180 batch = raw_cpu_ptr(&batched_entropy_u64);
2181 spin_lock_irqsave(&batch->batch_lock, flags);
2182 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2183 extract_crng((u8 *)batch->entropy_u64);
2184 batch->position = 0;
2185 }
2186 ret = batch->entropy_u64[batch->position++];
2187 spin_unlock_irqrestore(&batch->batch_lock, flags);
2188 return ret;
2189}
2190EXPORT_SYMBOL(get_random_u64);
2191
2192static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2193 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2194};
2195u32 get_random_u32(void)
2196{
2197 u32 ret;
2198 unsigned long flags;
2199 struct batched_entropy *batch;
2200 static void *previous;
2201
2202 if (arch_get_random_int(&ret))
2203 return ret;
2204
2205 warn_unseeded_randomness(&previous);
2206
2207 batch = raw_cpu_ptr(&batched_entropy_u32);
2208 spin_lock_irqsave(&batch->batch_lock, flags);
2209 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2210 extract_crng((u8 *)batch->entropy_u32);
2211 batch->position = 0;
2212 }
2213 ret = batch->entropy_u32[batch->position++];
2214 spin_unlock_irqrestore(&batch->batch_lock, flags);
2215 return ret;
2216}
2217EXPORT_SYMBOL(get_random_u32);
2218
2219
2220
2221
2222
2223static void invalidate_batched_entropy(void)
2224{
2225 int cpu;
2226 unsigned long flags;
2227
2228 for_each_possible_cpu (cpu) {
2229 struct batched_entropy *batched_entropy;
2230
2231 batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2232 spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2233 batched_entropy->position = 0;
2234 spin_unlock(&batched_entropy->batch_lock);
2235
2236 batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2237 spin_lock(&batched_entropy->batch_lock);
2238 batched_entropy->position = 0;
2239 spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2240 }
2241}
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257unsigned long
2258randomize_page(unsigned long start, unsigned long range)
2259{
2260 if (!PAGE_ALIGNED(start)) {
2261 range -= PAGE_ALIGN(start) - start;
2262 start = PAGE_ALIGN(start);
2263 }
2264
2265 if (start > ULONG_MAX - range)
2266 range = ULONG_MAX - start;
2267
2268 range >>= PAGE_SHIFT;
2269
2270 if (range == 0)
2271 return start;
2272
2273 return start + (get_random_long() % range << PAGE_SHIFT);
2274}
2275
2276
2277
2278
2279
2280void add_hwgenerator_randomness(const char *buffer, size_t count,
2281 size_t entropy)
2282{
2283 struct entropy_store *poolp = &input_pool;
2284
2285 if (unlikely(crng_init == 0)) {
2286 crng_fast_load(buffer, count);
2287 return;
2288 }
2289
2290
2291
2292
2293
2294 wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2295 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2296 mix_pool_bytes(poolp, buffer, count);
2297 credit_entropy_bits(poolp, entropy);
2298}
2299EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2300
2301
2302
2303
2304
2305
2306void add_bootloader_randomness(const void *buf, unsigned int size)
2307{
2308 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
2309 add_hwgenerator_randomness(buf, size, size * 8);
2310 else
2311 add_device_randomness(buf, size);
2312}
2313EXPORT_SYMBOL_GPL(add_bootloader_randomness);
2314