1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310#include <linux/utsname.h>
311#include <linux/module.h>
312#include <linux/kernel.h>
313#include <linux/major.h>
314#include <linux/string.h>
315#include <linux/fcntl.h>
316#include <linux/slab.h>
317#include <linux/random.h>
318#include <linux/poll.h>
319#include <linux/init.h>
320#include <linux/fs.h>
321#include <linux/genhd.h>
322#include <linux/interrupt.h>
323#include <linux/mm.h>
324#include <linux/nodemask.h>
325#include <linux/spinlock.h>
326#include <linux/kthread.h>
327#include <linux/percpu.h>
328#include <linux/cryptohash.h>
329#include <linux/fips.h>
330#include <linux/ptrace.h>
331#include <linux/workqueue.h>
332#include <linux/irq.h>
333#include <linux/ratelimit.h>
334#include <linux/syscalls.h>
335#include <linux/completion.h>
336#include <linux/uuid.h>
337#include <crypto/chacha.h>
338
339#include <asm/processor.h>
340#include <linux/uaccess.h>
341#include <asm/irq.h>
342#include <asm/irq_regs.h>
343#include <asm/io.h>
344
345#define CREATE_TRACE_POINTS
346#include <trace/events/random.h>
347
348
349
350
351
352
353#define INPUT_POOL_SHIFT 12
354#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
355#define OUTPUT_POOL_SHIFT 10
356#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
357#define SEC_XFER_SIZE 512
358#define EXTRACT_SIZE 10
359
360
361#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
362
363
364
365
366
367
368
369
370#define ENTROPY_SHIFT 3
371#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
372
373
374
375
376
377static int random_read_wakeup_bits = 64;
378
379
380
381
382
383
384static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431static const struct poolinfo {
432 int poolbitshift, poolwords, poolbytes, poolfracbits;
433#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
434 int tap1, tap2, tap3, tap4, tap5;
435} poolinfo_table[] = {
436
437
438 { S(128), 104, 76, 51, 25, 1 },
439
440
441 { S(32), 26, 19, 14, 7, 1 },
442#if 0
443
444 { S(2048), 1638, 1231, 819, 411, 1 },
445
446
447 { S(1024), 817, 615, 412, 204, 1 },
448
449
450 { S(1024), 819, 616, 410, 207, 2 },
451
452
453 { S(512), 411, 308, 208, 104, 1 },
454
455
456 { S(512), 409, 307, 206, 102, 2 },
457
458 { S(512), 409, 309, 205, 103, 2 },
459
460
461 { S(256), 205, 155, 101, 52, 1 },
462
463
464 { S(128), 103, 78, 51, 27, 2 },
465
466
467 { S(64), 52, 39, 26, 14, 1 },
468#endif
469};
470
471
472
473
474static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
475static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
476static struct fasync_struct *fasync;
477
478static DEFINE_SPINLOCK(random_ready_list_lock);
479static LIST_HEAD(random_ready_list);
480
481struct crng_state {
482 __u32 state[16];
483 unsigned long init_time;
484 spinlock_t lock;
485};
486
487static struct crng_state primary_crng = {
488 .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
489};
490
491
492
493
494
495
496
497
498
499static int crng_init = 0;
500#define crng_ready() (likely(crng_init > 1))
501static int crng_init_cnt = 0;
502static unsigned long crng_global_init_time = 0;
503#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
504static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
505static void _crng_backtrack_protect(struct crng_state *crng,
506 __u8 tmp[CHACHA_BLOCK_SIZE], int used);
507static void process_random_ready_list(void);
508static void _get_random_bytes(void *buf, int nbytes);
509
510static struct ratelimit_state unseeded_warning =
511 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
512static struct ratelimit_state urandom_warning =
513 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
514
515static int ratelimit_disable __read_mostly;
516
517module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
518MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
519
520
521
522
523
524
525
526
527struct entropy_store;
528struct entropy_store {
529
530 const struct poolinfo *poolinfo;
531 __u32 *pool;
532 const char *name;
533 struct entropy_store *pull;
534 struct work_struct push_work;
535
536
537 unsigned long last_pulled;
538 spinlock_t lock;
539 unsigned short add_ptr;
540 unsigned short input_rotate;
541 int entropy_count;
542 unsigned int initialized:1;
543 unsigned int last_data_init:1;
544 __u8 last_data[EXTRACT_SIZE];
545};
546
547static ssize_t extract_entropy(struct entropy_store *r, void *buf,
548 size_t nbytes, int min, int rsvd);
549static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
550 size_t nbytes, int fips);
551
552static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
553static void push_to_pool(struct work_struct *work);
554static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
555static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
556
557static struct entropy_store input_pool = {
558 .poolinfo = &poolinfo_table[0],
559 .name = "input",
560 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
561 .pool = input_pool_data
562};
563
564static struct entropy_store blocking_pool = {
565 .poolinfo = &poolinfo_table[1],
566 .name = "blocking",
567 .pull = &input_pool,
568 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
569 .pool = blocking_pool_data,
570 .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
571 push_to_pool),
572};
573
574static __u32 const twist_table[8] = {
575 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
576 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
577
578
579
580
581
582
583
584
585
586
587
588static void _mix_pool_bytes(struct entropy_store *r, const void *in,
589 int nbytes)
590{
591 unsigned long i, tap1, tap2, tap3, tap4, tap5;
592 int input_rotate;
593 int wordmask = r->poolinfo->poolwords - 1;
594 const char *bytes = in;
595 __u32 w;
596
597 tap1 = r->poolinfo->tap1;
598 tap2 = r->poolinfo->tap2;
599 tap3 = r->poolinfo->tap3;
600 tap4 = r->poolinfo->tap4;
601 tap5 = r->poolinfo->tap5;
602
603 input_rotate = r->input_rotate;
604 i = r->add_ptr;
605
606
607 while (nbytes--) {
608 w = rol32(*bytes++, input_rotate);
609 i = (i - 1) & wordmask;
610
611
612 w ^= r->pool[i];
613 w ^= r->pool[(i + tap1) & wordmask];
614 w ^= r->pool[(i + tap2) & wordmask];
615 w ^= r->pool[(i + tap3) & wordmask];
616 w ^= r->pool[(i + tap4) & wordmask];
617 w ^= r->pool[(i + tap5) & wordmask];
618
619
620 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
621
622
623
624
625
626
627
628 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
629 }
630
631 r->input_rotate = input_rotate;
632 r->add_ptr = i;
633}
634
635static void __mix_pool_bytes(struct entropy_store *r, const void *in,
636 int nbytes)
637{
638 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
639 _mix_pool_bytes(r, in, nbytes);
640}
641
642static void mix_pool_bytes(struct entropy_store *r, const void *in,
643 int nbytes)
644{
645 unsigned long flags;
646
647 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
648 spin_lock_irqsave(&r->lock, flags);
649 _mix_pool_bytes(r, in, nbytes);
650 spin_unlock_irqrestore(&r->lock, flags);
651}
652
653struct fast_pool {
654 __u32 pool[4];
655 unsigned long last;
656 unsigned short reg_idx;
657 unsigned char count;
658};
659
660
661
662
663
664
665static void fast_mix(struct fast_pool *f)
666{
667 __u32 a = f->pool[0], b = f->pool[1];
668 __u32 c = f->pool[2], d = f->pool[3];
669
670 a += b; c += d;
671 b = rol32(b, 6); d = rol32(d, 27);
672 d ^= a; b ^= c;
673
674 a += b; c += d;
675 b = rol32(b, 16); d = rol32(d, 14);
676 d ^= a; b ^= c;
677
678 a += b; c += d;
679 b = rol32(b, 6); d = rol32(d, 27);
680 d ^= a; b ^= c;
681
682 a += b; c += d;
683 b = rol32(b, 16); d = rol32(d, 14);
684 d ^= a; b ^= c;
685
686 f->pool[0] = a; f->pool[1] = b;
687 f->pool[2] = c; f->pool[3] = d;
688 f->count++;
689}
690
691static void process_random_ready_list(void)
692{
693 unsigned long flags;
694 struct random_ready_callback *rdy, *tmp;
695
696 spin_lock_irqsave(&random_ready_list_lock, flags);
697 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
698 struct module *owner = rdy->owner;
699
700 list_del_init(&rdy->list);
701 rdy->func(rdy);
702 module_put(owner);
703 }
704 spin_unlock_irqrestore(&random_ready_list_lock, flags);
705}
706
707
708
709
710
711
712static void credit_entropy_bits(struct entropy_store *r, int nbits)
713{
714 int entropy_count, orig, has_initialized = 0;
715 const int pool_size = r->poolinfo->poolfracbits;
716 int nfrac = nbits << ENTROPY_SHIFT;
717
718 if (!nbits)
719 return;
720
721retry:
722 entropy_count = orig = READ_ONCE(r->entropy_count);
723 if (nfrac < 0) {
724
725 entropy_count += nfrac;
726 } else {
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748 int pnfrac = nfrac;
749 const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
750
751
752 do {
753 unsigned int anfrac = min(pnfrac, pool_size/2);
754 unsigned int add =
755 ((pool_size - entropy_count)*anfrac*3) >> s;
756
757 entropy_count += add;
758 pnfrac -= anfrac;
759 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
760 }
761
762 if (unlikely(entropy_count < 0)) {
763 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
764 r->name, entropy_count);
765 WARN_ON(1);
766 entropy_count = 0;
767 } else if (entropy_count > pool_size)
768 entropy_count = pool_size;
769 if ((r == &blocking_pool) && !r->initialized &&
770 (entropy_count >> ENTROPY_SHIFT) > 128)
771 has_initialized = 1;
772 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
773 goto retry;
774
775 if (has_initialized) {
776 r->initialized = 1;
777 wake_up_interruptible(&random_read_wait);
778 kill_fasync(&fasync, SIGIO, POLL_IN);
779 }
780
781 trace_credit_entropy_bits(r->name, nbits,
782 entropy_count >> ENTROPY_SHIFT, _RET_IP_);
783
784 if (r == &input_pool) {
785 int entropy_bits = entropy_count >> ENTROPY_SHIFT;
786 struct entropy_store *other = &blocking_pool;
787
788 if (crng_init < 2) {
789 if (entropy_bits < 128)
790 return;
791 crng_reseed(&primary_crng, r);
792 entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
793 }
794
795
796 if (entropy_bits >= random_read_wakeup_bits &&
797 !other->initialized) {
798 schedule_work(&other->push_work);
799 return;
800 }
801
802
803 if (entropy_bits >= random_read_wakeup_bits &&
804 wq_has_sleeper(&random_read_wait)) {
805 wake_up_interruptible(&random_read_wait);
806 kill_fasync(&fasync, SIGIO, POLL_IN);
807 }
808
809
810
811
812 if (!work_pending(&other->push_work) &&
813 (ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) &&
814 (ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes))
815 schedule_work(&other->push_work);
816 }
817}
818
819static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
820{
821 const int nbits_max = r->poolinfo->poolwords * 32;
822
823 if (nbits < 0)
824 return -EINVAL;
825
826
827 nbits = min(nbits, nbits_max);
828
829 credit_entropy_bits(r, nbits);
830 return 0;
831}
832
833
834
835
836
837
838
839#define CRNG_RESEED_INTERVAL (300*HZ)
840
841static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
842
843#ifdef CONFIG_NUMA
844
845
846
847
848
849
850static struct crng_state **crng_node_pool __read_mostly;
851#endif
852
853static void invalidate_batched_entropy(void);
854static void numa_crng_init(void);
855
856static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
857static int __init parse_trust_cpu(char *arg)
858{
859 return kstrtobool(arg, &trust_cpu);
860}
861early_param("random.trust_cpu", parse_trust_cpu);
862
863static void crng_initialize(struct crng_state *crng)
864{
865 int i;
866 int arch_init = 1;
867 unsigned long rv;
868
869 memcpy(&crng->state[0], "expand 32-byte k", 16);
870 if (crng == &primary_crng)
871 _extract_entropy(&input_pool, &crng->state[4],
872 sizeof(__u32) * 12, 0);
873 else
874 _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
875 for (i = 4; i < 16; i++) {
876 if (!arch_get_random_seed_long(&rv) &&
877 !arch_get_random_long(&rv)) {
878 rv = random_get_entropy();
879 arch_init = 0;
880 }
881 crng->state[i] ^= rv;
882 }
883 if (trust_cpu && arch_init && crng == &primary_crng) {
884 invalidate_batched_entropy();
885 numa_crng_init();
886 crng_init = 2;
887 pr_notice("random: crng done (trusting CPU's manufacturer)\n");
888 }
889 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
890}
891
892#ifdef CONFIG_NUMA
893static void do_numa_crng_init(struct work_struct *work)
894{
895 int i;
896 struct crng_state *crng;
897 struct crng_state **pool;
898
899 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
900 for_each_online_node(i) {
901 crng = kmalloc_node(sizeof(struct crng_state),
902 GFP_KERNEL | __GFP_NOFAIL, i);
903 spin_lock_init(&crng->lock);
904 crng_initialize(crng);
905 pool[i] = crng;
906 }
907 mb();
908 if (cmpxchg(&crng_node_pool, NULL, pool)) {
909 for_each_node(i)
910 kfree(pool[i]);
911 kfree(pool);
912 }
913}
914
915static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
916
917static void numa_crng_init(void)
918{
919 schedule_work(&numa_crng_init_work);
920}
921#else
922static void numa_crng_init(void) {}
923#endif
924
925
926
927
928
929static int crng_fast_load(const char *cp, size_t len)
930{
931 unsigned long flags;
932 char *p;
933
934 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
935 return 0;
936 if (crng_init != 0) {
937 spin_unlock_irqrestore(&primary_crng.lock, flags);
938 return 0;
939 }
940 p = (unsigned char *) &primary_crng.state[4];
941 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
942 p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
943 cp++; crng_init_cnt++; len--;
944 }
945 spin_unlock_irqrestore(&primary_crng.lock, flags);
946 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
947 invalidate_batched_entropy();
948 crng_init = 1;
949 wake_up_interruptible(&crng_init_wait);
950 pr_notice("random: fast init done\n");
951 }
952 return 1;
953}
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969static int crng_slow_load(const char *cp, size_t len)
970{
971 unsigned long flags;
972 static unsigned char lfsr = 1;
973 unsigned char tmp;
974 unsigned i, max = CHACHA_KEY_SIZE;
975 const char * src_buf = cp;
976 char * dest_buf = (char *) &primary_crng.state[4];
977
978 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
979 return 0;
980 if (crng_init != 0) {
981 spin_unlock_irqrestore(&primary_crng.lock, flags);
982 return 0;
983 }
984 if (len > max)
985 max = len;
986
987 for (i = 0; i < max ; i++) {
988 tmp = lfsr;
989 lfsr >>= 1;
990 if (tmp & 1)
991 lfsr ^= 0xE1;
992 tmp = dest_buf[i % CHACHA_KEY_SIZE];
993 dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
994 lfsr += (tmp << 3) | (tmp >> 5);
995 }
996 spin_unlock_irqrestore(&primary_crng.lock, flags);
997 return 1;
998}
999
1000static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
1001{
1002 unsigned long flags;
1003 int i, num;
1004 union {
1005 __u8 block[CHACHA_BLOCK_SIZE];
1006 __u32 key[8];
1007 } buf;
1008
1009 if (r) {
1010 num = extract_entropy(r, &buf, 32, 16, 0);
1011 if (num == 0)
1012 return;
1013 } else {
1014 _extract_crng(&primary_crng, buf.block);
1015 _crng_backtrack_protect(&primary_crng, buf.block,
1016 CHACHA_KEY_SIZE);
1017 }
1018 spin_lock_irqsave(&crng->lock, flags);
1019 for (i = 0; i < 8; i++) {
1020 unsigned long rv;
1021 if (!arch_get_random_seed_long(&rv) &&
1022 !arch_get_random_long(&rv))
1023 rv = random_get_entropy();
1024 crng->state[i+4] ^= buf.key[i] ^ rv;
1025 }
1026 memzero_explicit(&buf, sizeof(buf));
1027 crng->init_time = jiffies;
1028 spin_unlock_irqrestore(&crng->lock, flags);
1029 if (crng == &primary_crng && crng_init < 2) {
1030 invalidate_batched_entropy();
1031 numa_crng_init();
1032 crng_init = 2;
1033 process_random_ready_list();
1034 wake_up_interruptible(&crng_init_wait);
1035 pr_notice("random: crng init done\n");
1036 if (unseeded_warning.missed) {
1037 pr_notice("random: %d get_random_xx warning(s) missed "
1038 "due to ratelimiting\n",
1039 unseeded_warning.missed);
1040 unseeded_warning.missed = 0;
1041 }
1042 if (urandom_warning.missed) {
1043 pr_notice("random: %d urandom warning(s) missed "
1044 "due to ratelimiting\n",
1045 urandom_warning.missed);
1046 urandom_warning.missed = 0;
1047 }
1048 }
1049}
1050
1051static void _extract_crng(struct crng_state *crng,
1052 __u8 out[CHACHA_BLOCK_SIZE])
1053{
1054 unsigned long v, flags;
1055
1056 if (crng_ready() &&
1057 (time_after(crng_global_init_time, crng->init_time) ||
1058 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
1059 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
1060 spin_lock_irqsave(&crng->lock, flags);
1061 if (arch_get_random_long(&v))
1062 crng->state[14] ^= v;
1063 chacha20_block(&crng->state[0], out);
1064 if (crng->state[12] == 0)
1065 crng->state[13]++;
1066 spin_unlock_irqrestore(&crng->lock, flags);
1067}
1068
1069static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
1070{
1071 struct crng_state *crng = NULL;
1072
1073#ifdef CONFIG_NUMA
1074 if (crng_node_pool)
1075 crng = crng_node_pool[numa_node_id()];
1076 if (crng == NULL)
1077#endif
1078 crng = &primary_crng;
1079 _extract_crng(crng, out);
1080}
1081
1082
1083
1084
1085
1086static void _crng_backtrack_protect(struct crng_state *crng,
1087 __u8 tmp[CHACHA_BLOCK_SIZE], int used)
1088{
1089 unsigned long flags;
1090 __u32 *s, *d;
1091 int i;
1092
1093 used = round_up(used, sizeof(__u32));
1094 if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
1095 extract_crng(tmp);
1096 used = 0;
1097 }
1098 spin_lock_irqsave(&crng->lock, flags);
1099 s = (__u32 *) &tmp[used];
1100 d = &crng->state[4];
1101 for (i=0; i < 8; i++)
1102 *d++ ^= *s++;
1103 spin_unlock_irqrestore(&crng->lock, flags);
1104}
1105
1106static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
1107{
1108 struct crng_state *crng = NULL;
1109
1110#ifdef CONFIG_NUMA
1111 if (crng_node_pool)
1112 crng = crng_node_pool[numa_node_id()];
1113 if (crng == NULL)
1114#endif
1115 crng = &primary_crng;
1116 _crng_backtrack_protect(crng, tmp, used);
1117}
1118
1119static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1120{
1121 ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
1122 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1123 int large_request = (nbytes > 256);
1124
1125 while (nbytes) {
1126 if (large_request && need_resched()) {
1127 if (signal_pending(current)) {
1128 if (ret == 0)
1129 ret = -ERESTARTSYS;
1130 break;
1131 }
1132 schedule();
1133 }
1134
1135 extract_crng(tmp);
1136 i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
1137 if (copy_to_user(buf, tmp, i)) {
1138 ret = -EFAULT;
1139 break;
1140 }
1141
1142 nbytes -= i;
1143 buf += i;
1144 ret += i;
1145 }
1146 crng_backtrack_protect(tmp, i);
1147
1148
1149 memzero_explicit(tmp, sizeof(tmp));
1150
1151 return ret;
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162struct timer_rand_state {
1163 cycles_t last_time;
1164 long last_delta, last_delta2;
1165};
1166
1167#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177void add_device_randomness(const void *buf, unsigned int size)
1178{
1179 unsigned long time = random_get_entropy() ^ jiffies;
1180 unsigned long flags;
1181
1182 if (!crng_ready() && size)
1183 crng_slow_load(buf, size);
1184
1185 trace_add_device_randomness(size, _RET_IP_);
1186 spin_lock_irqsave(&input_pool.lock, flags);
1187 _mix_pool_bytes(&input_pool, buf, size);
1188 _mix_pool_bytes(&input_pool, &time, sizeof(time));
1189 spin_unlock_irqrestore(&input_pool.lock, flags);
1190}
1191EXPORT_SYMBOL(add_device_randomness);
1192
1193static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1206{
1207 struct entropy_store *r;
1208 struct {
1209 long jiffies;
1210 unsigned cycles;
1211 unsigned num;
1212 } sample;
1213 long delta, delta2, delta3;
1214
1215 sample.jiffies = jiffies;
1216 sample.cycles = random_get_entropy();
1217 sample.num = num;
1218 r = &input_pool;
1219 mix_pool_bytes(r, &sample, sizeof(sample));
1220
1221
1222
1223
1224
1225
1226 delta = sample.jiffies - state->last_time;
1227 state->last_time = sample.jiffies;
1228
1229 delta2 = delta - state->last_delta;
1230 state->last_delta = delta;
1231
1232 delta3 = delta2 - state->last_delta2;
1233 state->last_delta2 = delta2;
1234
1235 if (delta < 0)
1236 delta = -delta;
1237 if (delta2 < 0)
1238 delta2 = -delta2;
1239 if (delta3 < 0)
1240 delta3 = -delta3;
1241 if (delta > delta2)
1242 delta = delta2;
1243 if (delta > delta3)
1244 delta = delta3;
1245
1246
1247
1248
1249
1250
1251 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
1252}
1253
1254void add_input_randomness(unsigned int type, unsigned int code,
1255 unsigned int value)
1256{
1257 static unsigned char last_value;
1258
1259
1260 if (value == last_value)
1261 return;
1262
1263 last_value = value;
1264 add_timer_randomness(&input_timer_state,
1265 (type << 4) ^ code ^ (code >> 4) ^ value);
1266 trace_add_input_randomness(ENTROPY_BITS(&input_pool));
1267}
1268EXPORT_SYMBOL_GPL(add_input_randomness);
1269
1270static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1271
1272#ifdef ADD_INTERRUPT_BENCH
1273static unsigned long avg_cycles, avg_deviation;
1274
1275#define AVG_SHIFT 8
1276#define FIXED_1_2 (1 << (AVG_SHIFT-1))
1277
1278static void add_interrupt_bench(cycles_t start)
1279{
1280 long delta = random_get_entropy() - start;
1281
1282
1283 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
1284 avg_cycles += delta;
1285
1286 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
1287 avg_deviation += delta;
1288}
1289#else
1290#define add_interrupt_bench(x)
1291#endif
1292
1293static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1294{
1295 __u32 *ptr = (__u32 *) regs;
1296 unsigned int idx;
1297
1298 if (regs == NULL)
1299 return 0;
1300 idx = READ_ONCE(f->reg_idx);
1301 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1302 idx = 0;
1303 ptr += idx++;
1304 WRITE_ONCE(f->reg_idx, idx);
1305 return *ptr;
1306}
1307
1308void add_interrupt_randomness(int irq, int irq_flags)
1309{
1310 struct entropy_store *r;
1311 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1312 struct pt_regs *regs = get_irq_regs();
1313 unsigned long now = jiffies;
1314 cycles_t cycles = random_get_entropy();
1315 __u32 c_high, j_high;
1316 __u64 ip;
1317 unsigned long seed;
1318 int credit = 0;
1319
1320 if (cycles == 0)
1321 cycles = get_reg(fast_pool, regs);
1322 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
1323 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
1324 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
1325 fast_pool->pool[1] ^= now ^ c_high;
1326 ip = regs ? instruction_pointer(regs) : _RET_IP_;
1327 fast_pool->pool[2] ^= ip;
1328 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
1329 get_reg(fast_pool, regs);
1330
1331 fast_mix(fast_pool);
1332 add_interrupt_bench(cycles);
1333
1334 if (unlikely(crng_init == 0)) {
1335 if ((fast_pool->count >= 64) &&
1336 crng_fast_load((char *) fast_pool->pool,
1337 sizeof(fast_pool->pool))) {
1338 fast_pool->count = 0;
1339 fast_pool->last = now;
1340 }
1341 return;
1342 }
1343
1344 if ((fast_pool->count < 64) &&
1345 !time_after(now, fast_pool->last + HZ))
1346 return;
1347
1348 r = &input_pool;
1349 if (!spin_trylock(&r->lock))
1350 return;
1351
1352 fast_pool->last = now;
1353 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
1354
1355
1356
1357
1358
1359
1360
1361 if (arch_get_random_seed_long(&seed)) {
1362 __mix_pool_bytes(r, &seed, sizeof(seed));
1363 credit = 1;
1364 }
1365 spin_unlock(&r->lock);
1366
1367 fast_pool->count = 0;
1368
1369
1370 credit_entropy_bits(r, credit + 1);
1371}
1372EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1373
1374#ifdef CONFIG_BLOCK
1375void add_disk_randomness(struct gendisk *disk)
1376{
1377 if (!disk || !disk->random)
1378 return;
1379
1380 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1381 trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
1382}
1383EXPORT_SYMBOL_GPL(add_disk_randomness);
1384#endif
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
1398static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1399{
1400 if (!r->pull ||
1401 r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
1402 r->entropy_count > r->poolinfo->poolfracbits)
1403 return;
1404
1405 _xfer_secondary_pool(r, nbytes);
1406}
1407
1408static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1409{
1410 __u32 tmp[OUTPUT_POOL_WORDS];
1411
1412 int bytes = nbytes;
1413
1414
1415 bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
1416
1417 bytes = min_t(int, bytes, sizeof(tmp));
1418
1419 trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
1420 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
1421 bytes = extract_entropy(r->pull, tmp, bytes,
1422 random_read_wakeup_bits / 8, 0);
1423 mix_pool_bytes(r, tmp, bytes);
1424 credit_entropy_bits(r, bytes*8);
1425}
1426
1427
1428
1429
1430
1431
1432
1433static void push_to_pool(struct work_struct *work)
1434{
1435 struct entropy_store *r = container_of(work, struct entropy_store,
1436 push_work);
1437 BUG_ON(!r);
1438 _xfer_secondary_pool(r, random_read_wakeup_bits/8);
1439 trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
1440 r->pull->entropy_count >> ENTROPY_SHIFT);
1441}
1442
1443
1444
1445
1446
1447static size_t account(struct entropy_store *r, size_t nbytes, int min,
1448 int reserved)
1449{
1450 int entropy_count, orig, have_bytes;
1451 size_t ibytes, nfrac;
1452
1453 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1454
1455
1456retry:
1457 entropy_count = orig = READ_ONCE(r->entropy_count);
1458 ibytes = nbytes;
1459
1460 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1461
1462 if ((have_bytes -= reserved) < 0)
1463 have_bytes = 0;
1464 ibytes = min_t(size_t, ibytes, have_bytes);
1465 if (ibytes < min)
1466 ibytes = 0;
1467
1468 if (unlikely(entropy_count < 0)) {
1469 pr_warn("random: negative entropy count: pool %s count %d\n",
1470 r->name, entropy_count);
1471 WARN_ON(1);
1472 entropy_count = 0;
1473 }
1474 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1475 if ((size_t) entropy_count > nfrac)
1476 entropy_count -= nfrac;
1477 else
1478 entropy_count = 0;
1479
1480 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1481 goto retry;
1482
1483 trace_debit_entropy(r->name, 8 * ibytes);
1484 if (ibytes &&
1485 (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
1486 wake_up_interruptible(&random_write_wait);
1487 kill_fasync(&fasync, SIGIO, POLL_OUT);
1488 }
1489
1490 return ibytes;
1491}
1492
1493
1494
1495
1496
1497
1498
1499static void extract_buf(struct entropy_store *r, __u8 *out)
1500{
1501 int i;
1502 union {
1503 __u32 w[5];
1504 unsigned long l[LONGS(20)];
1505 } hash;
1506 __u32 workspace[SHA_WORKSPACE_WORDS];
1507 unsigned long flags;
1508
1509
1510
1511
1512
1513 sha_init(hash.w);
1514 for (i = 0; i < LONGS(20); i++) {
1515 unsigned long v;
1516 if (!arch_get_random_long(&v))
1517 break;
1518 hash.l[i] = v;
1519 }
1520
1521
1522 spin_lock_irqsave(&r->lock, flags);
1523 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1524 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1536 spin_unlock_irqrestore(&r->lock, flags);
1537
1538 memzero_explicit(workspace, sizeof(workspace));
1539
1540
1541
1542
1543
1544
1545 hash.w[0] ^= hash.w[3];
1546 hash.w[1] ^= hash.w[4];
1547 hash.w[2] ^= rol32(hash.w[2], 16);
1548
1549 memcpy(out, &hash, EXTRACT_SIZE);
1550 memzero_explicit(&hash, sizeof(hash));
1551}
1552
1553static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1554 size_t nbytes, int fips)
1555{
1556 ssize_t ret = 0, i;
1557 __u8 tmp[EXTRACT_SIZE];
1558 unsigned long flags;
1559
1560 while (nbytes) {
1561 extract_buf(r, tmp);
1562
1563 if (fips) {
1564 spin_lock_irqsave(&r->lock, flags);
1565 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1566 panic("Hardware RNG duplicated output!\n");
1567 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1568 spin_unlock_irqrestore(&r->lock, flags);
1569 }
1570 i = min_t(int, nbytes, EXTRACT_SIZE);
1571 memcpy(buf, tmp, i);
1572 nbytes -= i;
1573 buf += i;
1574 ret += i;
1575 }
1576
1577
1578 memzero_explicit(tmp, sizeof(tmp));
1579
1580 return ret;
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1593 size_t nbytes, int min, int reserved)
1594{
1595 __u8 tmp[EXTRACT_SIZE];
1596 unsigned long flags;
1597
1598
1599 if (fips_enabled) {
1600 spin_lock_irqsave(&r->lock, flags);
1601 if (!r->last_data_init) {
1602 r->last_data_init = 1;
1603 spin_unlock_irqrestore(&r->lock, flags);
1604 trace_extract_entropy(r->name, EXTRACT_SIZE,
1605 ENTROPY_BITS(r), _RET_IP_);
1606 xfer_secondary_pool(r, EXTRACT_SIZE);
1607 extract_buf(r, tmp);
1608 spin_lock_irqsave(&r->lock, flags);
1609 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1610 }
1611 spin_unlock_irqrestore(&r->lock, flags);
1612 }
1613
1614 trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1615 xfer_secondary_pool(r, nbytes);
1616 nbytes = account(r, nbytes, min, reserved);
1617
1618 return _extract_entropy(r, buf, nbytes, fips_enabled);
1619}
1620
1621
1622
1623
1624
1625static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1626 size_t nbytes)
1627{
1628 ssize_t ret = 0, i;
1629 __u8 tmp[EXTRACT_SIZE];
1630 int large_request = (nbytes > 256);
1631
1632 trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1633 if (!r->initialized && r->pull) {
1634 xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8);
1635 if (!r->initialized)
1636 return 0;
1637 }
1638 xfer_secondary_pool(r, nbytes);
1639 nbytes = account(r, nbytes, 0, 0);
1640
1641 while (nbytes) {
1642 if (large_request && need_resched()) {
1643 if (signal_pending(current)) {
1644 if (ret == 0)
1645 ret = -ERESTARTSYS;
1646 break;
1647 }
1648 schedule();
1649 }
1650
1651 extract_buf(r, tmp);
1652 i = min_t(int, nbytes, EXTRACT_SIZE);
1653 if (copy_to_user(buf, tmp, i)) {
1654 ret = -EFAULT;
1655 break;
1656 }
1657
1658 nbytes -= i;
1659 buf += i;
1660 ret += i;
1661 }
1662
1663
1664 memzero_explicit(tmp, sizeof(tmp));
1665
1666 return ret;
1667}
1668
1669#define warn_unseeded_randomness(previous) \
1670 _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1671
1672static void _warn_unseeded_randomness(const char *func_name, void *caller,
1673 void **previous)
1674{
1675#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1676 const bool print_once = false;
1677#else
1678 static bool print_once __read_mostly;
1679#endif
1680
1681 if (print_once ||
1682 crng_ready() ||
1683 (previous && (caller == READ_ONCE(*previous))))
1684 return;
1685 WRITE_ONCE(*previous, caller);
1686#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1687 print_once = true;
1688#endif
1689 if (__ratelimit(&unseeded_warning))
1690 pr_notice("random: %s called from %pS with crng_init=%d\n",
1691 func_name, caller, crng_init);
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704static void _get_random_bytes(void *buf, int nbytes)
1705{
1706 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1707
1708 trace_get_random_bytes(nbytes, _RET_IP_);
1709
1710 while (nbytes >= CHACHA_BLOCK_SIZE) {
1711 extract_crng(buf);
1712 buf += CHACHA_BLOCK_SIZE;
1713 nbytes -= CHACHA_BLOCK_SIZE;
1714 }
1715
1716 if (nbytes > 0) {
1717 extract_crng(tmp);
1718 memcpy(buf, tmp, nbytes);
1719 crng_backtrack_protect(tmp, nbytes);
1720 } else
1721 crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
1722 memzero_explicit(tmp, sizeof(tmp));
1723}
1724
1725void get_random_bytes(void *buf, int nbytes)
1726{
1727 static void *previous;
1728
1729 warn_unseeded_randomness(&previous);
1730 _get_random_bytes(buf, nbytes);
1731}
1732EXPORT_SYMBOL(get_random_bytes);
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748static void entropy_timer(struct timer_list *t)
1749{
1750 credit_entropy_bits(&input_pool, 1);
1751}
1752
1753
1754
1755
1756
1757static void try_to_generate_entropy(void)
1758{
1759 struct {
1760 unsigned long now;
1761 struct timer_list timer;
1762 } stack;
1763
1764 stack.now = random_get_entropy();
1765
1766
1767 if (stack.now == random_get_entropy())
1768 return;
1769
1770 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1771 while (!crng_ready()) {
1772 if (!timer_pending(&stack.timer))
1773 mod_timer(&stack.timer, jiffies+1);
1774 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1775 schedule();
1776 stack.now = random_get_entropy();
1777 }
1778
1779 del_timer_sync(&stack.timer);
1780 destroy_timer_on_stack(&stack.timer);
1781 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794int wait_for_random_bytes(void)
1795{
1796 if (likely(crng_ready()))
1797 return 0;
1798
1799 do {
1800 int ret;
1801 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1802 if (ret)
1803 return ret > 0 ? 0 : ret;
1804
1805 try_to_generate_entropy();
1806 } while (!crng_ready());
1807
1808 return 0;
1809}
1810EXPORT_SYMBOL(wait_for_random_bytes);
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821bool rng_is_initialized(void)
1822{
1823 return crng_ready();
1824}
1825EXPORT_SYMBOL(rng_is_initialized);
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835int add_random_ready_callback(struct random_ready_callback *rdy)
1836{
1837 struct module *owner;
1838 unsigned long flags;
1839 int err = -EALREADY;
1840
1841 if (crng_ready())
1842 return err;
1843
1844 owner = rdy->owner;
1845 if (!try_module_get(owner))
1846 return -ENOENT;
1847
1848 spin_lock_irqsave(&random_ready_list_lock, flags);
1849 if (crng_ready())
1850 goto out;
1851
1852 owner = NULL;
1853
1854 list_add(&rdy->list, &random_ready_list);
1855 err = 0;
1856
1857out:
1858 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1859
1860 module_put(owner);
1861
1862 return err;
1863}
1864EXPORT_SYMBOL(add_random_ready_callback);
1865
1866
1867
1868
1869void del_random_ready_callback(struct random_ready_callback *rdy)
1870{
1871 unsigned long flags;
1872 struct module *owner = NULL;
1873
1874 spin_lock_irqsave(&random_ready_list_lock, flags);
1875 if (!list_empty(&rdy->list)) {
1876 list_del_init(&rdy->list);
1877 owner = rdy->owner;
1878 }
1879 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1880
1881 module_put(owner);
1882}
1883EXPORT_SYMBOL(del_random_ready_callback);
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897int __must_check get_random_bytes_arch(void *buf, int nbytes)
1898{
1899 int left = nbytes;
1900 char *p = buf;
1901
1902 trace_get_random_bytes_arch(left, _RET_IP_);
1903 while (left) {
1904 unsigned long v;
1905 int chunk = min_t(int, left, sizeof(unsigned long));
1906
1907 if (!arch_get_random_long(&v))
1908 break;
1909
1910 memcpy(p, &v, chunk);
1911 p += chunk;
1912 left -= chunk;
1913 }
1914
1915 return nbytes - left;
1916}
1917EXPORT_SYMBOL(get_random_bytes_arch);
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928static void __init init_std_data(struct entropy_store *r)
1929{
1930 int i;
1931 ktime_t now = ktime_get_real();
1932 unsigned long rv;
1933
1934 r->last_pulled = jiffies;
1935 mix_pool_bytes(r, &now, sizeof(now));
1936 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1937 if (!arch_get_random_seed_long(&rv) &&
1938 !arch_get_random_long(&rv))
1939 rv = random_get_entropy();
1940 mix_pool_bytes(r, &rv, sizeof(rv));
1941 }
1942 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955int __init rand_initialize(void)
1956{
1957 init_std_data(&input_pool);
1958 init_std_data(&blocking_pool);
1959 crng_initialize(&primary_crng);
1960 crng_global_init_time = jiffies;
1961 if (ratelimit_disable) {
1962 urandom_warning.interval = 0;
1963 unseeded_warning.interval = 0;
1964 }
1965 return 0;
1966}
1967
1968#ifdef CONFIG_BLOCK
1969void rand_initialize_disk(struct gendisk *disk)
1970{
1971 struct timer_rand_state *state;
1972
1973
1974
1975
1976
1977 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1978 if (state) {
1979 state->last_time = INITIAL_JIFFIES;
1980 disk->random = state;
1981 }
1982}
1983#endif
1984
1985static ssize_t
1986_random_read(int nonblock, char __user *buf, size_t nbytes)
1987{
1988 ssize_t n;
1989
1990 if (nbytes == 0)
1991 return 0;
1992
1993 nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
1994 while (1) {
1995 n = extract_entropy_user(&blocking_pool, buf, nbytes);
1996 if (n < 0)
1997 return n;
1998 trace_random_read(n*8, (nbytes-n)*8,
1999 ENTROPY_BITS(&blocking_pool),
2000 ENTROPY_BITS(&input_pool));
2001 if (n > 0)
2002 return n;
2003
2004
2005 if (nonblock)
2006 return -EAGAIN;
2007
2008 wait_event_interruptible(random_read_wait,
2009 blocking_pool.initialized &&
2010 (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits));
2011 if (signal_pending(current))
2012 return -ERESTARTSYS;
2013 }
2014}
2015
2016static ssize_t
2017random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
2018{
2019 return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
2020}
2021
2022static ssize_t
2023urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
2024{
2025 unsigned long flags;
2026 static int maxwarn = 10;
2027 int ret;
2028
2029 if (!crng_ready() && maxwarn > 0) {
2030 maxwarn--;
2031 if (__ratelimit(&urandom_warning))
2032 printk(KERN_NOTICE "random: %s: uninitialized "
2033 "urandom read (%zd bytes read)\n",
2034 current->comm, nbytes);
2035 spin_lock_irqsave(&primary_crng.lock, flags);
2036 crng_init_cnt = 0;
2037 spin_unlock_irqrestore(&primary_crng.lock, flags);
2038 }
2039 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
2040 ret = extract_crng_user(buf, nbytes);
2041 trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
2042 return ret;
2043}
2044
2045static __poll_t
2046random_poll(struct file *file, poll_table * wait)
2047{
2048 __poll_t mask;
2049
2050 poll_wait(file, &random_read_wait, wait);
2051 poll_wait(file, &random_write_wait, wait);
2052 mask = 0;
2053 if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
2054 mask |= EPOLLIN | EPOLLRDNORM;
2055 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
2056 mask |= EPOLLOUT | EPOLLWRNORM;
2057 return mask;
2058}
2059
2060static int
2061write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
2062{
2063 size_t bytes;
2064 __u32 t, buf[16];
2065 const char __user *p = buffer;
2066
2067 while (count > 0) {
2068 int b, i = 0;
2069
2070 bytes = min(count, sizeof(buf));
2071 if (copy_from_user(&buf, p, bytes))
2072 return -EFAULT;
2073
2074 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
2075 if (!arch_get_random_int(&t))
2076 break;
2077 buf[i] ^= t;
2078 }
2079
2080 count -= bytes;
2081 p += bytes;
2082
2083 mix_pool_bytes(r, buf, bytes);
2084 cond_resched();
2085 }
2086
2087 return 0;
2088}
2089
2090static ssize_t random_write(struct file *file, const char __user *buffer,
2091 size_t count, loff_t *ppos)
2092{
2093 size_t ret;
2094
2095 ret = write_pool(&input_pool, buffer, count);
2096 if (ret)
2097 return ret;
2098
2099 return (ssize_t)count;
2100}
2101
2102static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
2103{
2104 int size, ent_count;
2105 int __user *p = (int __user *)arg;
2106 int retval;
2107
2108 switch (cmd) {
2109 case RNDGETENTCNT:
2110
2111 ent_count = ENTROPY_BITS(&input_pool);
2112 if (put_user(ent_count, p))
2113 return -EFAULT;
2114 return 0;
2115 case RNDADDTOENTCNT:
2116 if (!capable(CAP_SYS_ADMIN))
2117 return -EPERM;
2118 if (get_user(ent_count, p))
2119 return -EFAULT;
2120 return credit_entropy_bits_safe(&input_pool, ent_count);
2121 case RNDADDENTROPY:
2122 if (!capable(CAP_SYS_ADMIN))
2123 return -EPERM;
2124 if (get_user(ent_count, p++))
2125 return -EFAULT;
2126 if (ent_count < 0)
2127 return -EINVAL;
2128 if (get_user(size, p++))
2129 return -EFAULT;
2130 retval = write_pool(&input_pool, (const char __user *)p,
2131 size);
2132 if (retval < 0)
2133 return retval;
2134 return credit_entropy_bits_safe(&input_pool, ent_count);
2135 case RNDZAPENTCNT:
2136 case RNDCLEARPOOL:
2137
2138
2139
2140
2141 if (!capable(CAP_SYS_ADMIN))
2142 return -EPERM;
2143 input_pool.entropy_count = 0;
2144 blocking_pool.entropy_count = 0;
2145 return 0;
2146 case RNDRESEEDCRNG:
2147 if (!capable(CAP_SYS_ADMIN))
2148 return -EPERM;
2149 if (crng_init < 2)
2150 return -ENODATA;
2151 crng_reseed(&primary_crng, NULL);
2152 crng_global_init_time = jiffies - 1;
2153 return 0;
2154 default:
2155 return -EINVAL;
2156 }
2157}
2158
2159static int random_fasync(int fd, struct file *filp, int on)
2160{
2161 return fasync_helper(fd, filp, on, &fasync);
2162}
2163
2164const struct file_operations random_fops = {
2165 .read = random_read,
2166 .write = random_write,
2167 .poll = random_poll,
2168 .unlocked_ioctl = random_ioctl,
2169 .compat_ioctl = compat_ptr_ioctl,
2170 .fasync = random_fasync,
2171 .llseek = noop_llseek,
2172};
2173
2174const struct file_operations urandom_fops = {
2175 .read = urandom_read,
2176 .write = random_write,
2177 .unlocked_ioctl = random_ioctl,
2178 .compat_ioctl = compat_ptr_ioctl,
2179 .fasync = random_fasync,
2180 .llseek = noop_llseek,
2181};
2182
2183SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
2184 unsigned int, flags)
2185{
2186 int ret;
2187
2188 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
2189 return -EINVAL;
2190
2191 if (count > INT_MAX)
2192 count = INT_MAX;
2193
2194 if (flags & GRND_RANDOM)
2195 return _random_read(flags & GRND_NONBLOCK, buf, count);
2196
2197 if (!crng_ready()) {
2198 if (flags & GRND_NONBLOCK)
2199 return -EAGAIN;
2200 ret = wait_for_random_bytes();
2201 if (unlikely(ret))
2202 return ret;
2203 }
2204 return urandom_read(NULL, buf, count, NULL);
2205}
2206
2207
2208
2209
2210
2211
2212
2213#ifdef CONFIG_SYSCTL
2214
2215#include <linux/sysctl.h>
2216
2217static int min_read_thresh = 8, min_write_thresh;
2218static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
2219static int max_write_thresh = INPUT_POOL_WORDS * 32;
2220static int random_min_urandom_seed = 60;
2221static char sysctl_bootid[16];
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232static int proc_do_uuid(struct ctl_table *table, int write,
2233 void __user *buffer, size_t *lenp, loff_t *ppos)
2234{
2235 struct ctl_table fake_table;
2236 unsigned char buf[64], tmp_uuid[16], *uuid;
2237
2238 uuid = table->data;
2239 if (!uuid) {
2240 uuid = tmp_uuid;
2241 generate_random_uuid(uuid);
2242 } else {
2243 static DEFINE_SPINLOCK(bootid_spinlock);
2244
2245 spin_lock(&bootid_spinlock);
2246 if (!uuid[8])
2247 generate_random_uuid(uuid);
2248 spin_unlock(&bootid_spinlock);
2249 }
2250
2251 sprintf(buf, "%pU", uuid);
2252
2253 fake_table.data = buf;
2254 fake_table.maxlen = sizeof(buf);
2255
2256 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
2257}
2258
2259
2260
2261
2262static int proc_do_entropy(struct ctl_table *table, int write,
2263 void __user *buffer, size_t *lenp, loff_t *ppos)
2264{
2265 struct ctl_table fake_table;
2266 int entropy_count;
2267
2268 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
2269
2270 fake_table.data = &entropy_count;
2271 fake_table.maxlen = sizeof(entropy_count);
2272
2273 return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
2274}
2275
2276static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
2277extern struct ctl_table random_table[];
2278struct ctl_table random_table[] = {
2279 {
2280 .procname = "poolsize",
2281 .data = &sysctl_poolsize,
2282 .maxlen = sizeof(int),
2283 .mode = 0444,
2284 .proc_handler = proc_dointvec,
2285 },
2286 {
2287 .procname = "entropy_avail",
2288 .maxlen = sizeof(int),
2289 .mode = 0444,
2290 .proc_handler = proc_do_entropy,
2291 .data = &input_pool.entropy_count,
2292 },
2293 {
2294 .procname = "read_wakeup_threshold",
2295 .data = &random_read_wakeup_bits,
2296 .maxlen = sizeof(int),
2297 .mode = 0644,
2298 .proc_handler = proc_dointvec_minmax,
2299 .extra1 = &min_read_thresh,
2300 .extra2 = &max_read_thresh,
2301 },
2302 {
2303 .procname = "write_wakeup_threshold",
2304 .data = &random_write_wakeup_bits,
2305 .maxlen = sizeof(int),
2306 .mode = 0644,
2307 .proc_handler = proc_dointvec_minmax,
2308 .extra1 = &min_write_thresh,
2309 .extra2 = &max_write_thresh,
2310 },
2311 {
2312 .procname = "urandom_min_reseed_secs",
2313 .data = &random_min_urandom_seed,
2314 .maxlen = sizeof(int),
2315 .mode = 0644,
2316 .proc_handler = proc_dointvec,
2317 },
2318 {
2319 .procname = "boot_id",
2320 .data = &sysctl_bootid,
2321 .maxlen = 16,
2322 .mode = 0444,
2323 .proc_handler = proc_do_uuid,
2324 },
2325 {
2326 .procname = "uuid",
2327 .maxlen = 16,
2328 .mode = 0444,
2329 .proc_handler = proc_do_uuid,
2330 },
2331#ifdef ADD_INTERRUPT_BENCH
2332 {
2333 .procname = "add_interrupt_avg_cycles",
2334 .data = &avg_cycles,
2335 .maxlen = sizeof(avg_cycles),
2336 .mode = 0444,
2337 .proc_handler = proc_doulongvec_minmax,
2338 },
2339 {
2340 .procname = "add_interrupt_avg_deviation",
2341 .data = &avg_deviation,
2342 .maxlen = sizeof(avg_deviation),
2343 .mode = 0444,
2344 .proc_handler = proc_doulongvec_minmax,
2345 },
2346#endif
2347 { }
2348};
2349#endif
2350
2351struct batched_entropy {
2352 union {
2353 u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
2354 u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2355 };
2356 unsigned int position;
2357 spinlock_t batch_lock;
2358};
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2369 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2370};
2371
2372u64 get_random_u64(void)
2373{
2374 u64 ret;
2375 unsigned long flags;
2376 struct batched_entropy *batch;
2377 static void *previous;
2378
2379#if BITS_PER_LONG == 64
2380 if (arch_get_random_long((unsigned long *)&ret))
2381 return ret;
2382#else
2383 if (arch_get_random_long((unsigned long *)&ret) &&
2384 arch_get_random_long((unsigned long *)&ret + 1))
2385 return ret;
2386#endif
2387
2388 warn_unseeded_randomness(&previous);
2389
2390 batch = raw_cpu_ptr(&batched_entropy_u64);
2391 spin_lock_irqsave(&batch->batch_lock, flags);
2392 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2393 extract_crng((u8 *)batch->entropy_u64);
2394 batch->position = 0;
2395 }
2396 ret = batch->entropy_u64[batch->position++];
2397 spin_unlock_irqrestore(&batch->batch_lock, flags);
2398 return ret;
2399}
2400EXPORT_SYMBOL(get_random_u64);
2401
2402static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2403 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2404};
2405u32 get_random_u32(void)
2406{
2407 u32 ret;
2408 unsigned long flags;
2409 struct batched_entropy *batch;
2410 static void *previous;
2411
2412 if (arch_get_random_int(&ret))
2413 return ret;
2414
2415 warn_unseeded_randomness(&previous);
2416
2417 batch = raw_cpu_ptr(&batched_entropy_u32);
2418 spin_lock_irqsave(&batch->batch_lock, flags);
2419 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2420 extract_crng((u8 *)batch->entropy_u32);
2421 batch->position = 0;
2422 }
2423 ret = batch->entropy_u32[batch->position++];
2424 spin_unlock_irqrestore(&batch->batch_lock, flags);
2425 return ret;
2426}
2427EXPORT_SYMBOL(get_random_u32);
2428
2429
2430
2431
2432
2433static void invalidate_batched_entropy(void)
2434{
2435 int cpu;
2436 unsigned long flags;
2437
2438 for_each_possible_cpu (cpu) {
2439 struct batched_entropy *batched_entropy;
2440
2441 batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2442 spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2443 batched_entropy->position = 0;
2444 spin_unlock(&batched_entropy->batch_lock);
2445
2446 batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2447 spin_lock(&batched_entropy->batch_lock);
2448 batched_entropy->position = 0;
2449 spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2450 }
2451}
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467unsigned long
2468randomize_page(unsigned long start, unsigned long range)
2469{
2470 if (!PAGE_ALIGNED(start)) {
2471 range -= PAGE_ALIGN(start) - start;
2472 start = PAGE_ALIGN(start);
2473 }
2474
2475 if (start > ULONG_MAX - range)
2476 range = ULONG_MAX - start;
2477
2478 range >>= PAGE_SHIFT;
2479
2480 if (range == 0)
2481 return start;
2482
2483 return start + (get_random_long() % range << PAGE_SHIFT);
2484}
2485
2486
2487
2488
2489
2490void add_hwgenerator_randomness(const char *buffer, size_t count,
2491 size_t entropy)
2492{
2493 struct entropy_store *poolp = &input_pool;
2494
2495 if (unlikely(crng_init == 0)) {
2496 crng_fast_load(buffer, count);
2497 return;
2498 }
2499
2500
2501
2502
2503
2504 wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2505 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2506 mix_pool_bytes(poolp, buffer, count);
2507 credit_entropy_bits(poolp, entropy);
2508}
2509EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2510
2511
2512
2513
2514
2515
2516void add_bootloader_randomness(const void *buf, unsigned int size)
2517{
2518 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
2519 add_hwgenerator_randomness(buf, size, size * 8);
2520 else
2521 add_device_randomness(buf, size);
2522}
2523EXPORT_SYMBOL_GPL(add_bootloader_randomness);
2524