1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
311
312#include <linux/utsname.h>
313#include <linux/module.h>
314#include <linux/kernel.h>
315#include <linux/major.h>
316#include <linux/string.h>
317#include <linux/fcntl.h>
318#include <linux/slab.h>
319#include <linux/random.h>
320#include <linux/poll.h>
321#include <linux/init.h>
322#include <linux/fs.h>
323#include <linux/genhd.h>
324#include <linux/interrupt.h>
325#include <linux/mm.h>
326#include <linux/nodemask.h>
327#include <linux/spinlock.h>
328#include <linux/kthread.h>
329#include <linux/percpu.h>
330#include <linux/fips.h>
331#include <linux/ptrace.h>
332#include <linux/workqueue.h>
333#include <linux/irq.h>
334#include <linux/ratelimit.h>
335#include <linux/syscalls.h>
336#include <linux/completion.h>
337#include <linux/uuid.h>
338#include <crypto/chacha.h>
339#include <crypto/sha.h>
340
341#include <asm/processor.h>
342#include <linux/uaccess.h>
343#include <asm/irq.h>
344#include <asm/irq_regs.h>
345#include <asm/io.h>
346
347#define CREATE_TRACE_POINTS
348#include <trace/events/random.h>
349
350
351
352
353
354
355#define INPUT_POOL_SHIFT 12
356#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
357#define OUTPUT_POOL_SHIFT 10
358#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
359#define EXTRACT_SIZE 10
360
361
362#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
363
364
365
366
367
368
369
370
371#define ENTROPY_SHIFT 3
372#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
373
374
375
376
377
378
379static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426static const struct poolinfo {
427 int poolbitshift, poolwords, poolbytes, poolfracbits;
428#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
429 int tap1, tap2, tap3, tap4, tap5;
430} poolinfo_table[] = {
431
432
433 { S(128), 104, 76, 51, 25, 1 },
434};
435
436
437
438
439static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
440static struct fasync_struct *fasync;
441
442static DEFINE_SPINLOCK(random_ready_list_lock);
443static LIST_HEAD(random_ready_list);
444
445struct crng_state {
446 __u32 state[16];
447 unsigned long init_time;
448 spinlock_t lock;
449};
450
451static struct crng_state primary_crng = {
452 .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
453};
454
455
456
457
458
459
460
461
462
463static int crng_init = 0;
464#define crng_ready() (likely(crng_init > 1))
465static int crng_init_cnt = 0;
466static unsigned long crng_global_init_time = 0;
467#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
468static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
469static void _crng_backtrack_protect(struct crng_state *crng,
470 __u8 tmp[CHACHA_BLOCK_SIZE], int used);
471static void process_random_ready_list(void);
472static void _get_random_bytes(void *buf, int nbytes);
473
474static struct ratelimit_state unseeded_warning =
475 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
476static struct ratelimit_state urandom_warning =
477 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
478
479static int ratelimit_disable __read_mostly;
480
481module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
482MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
483
484
485
486
487
488
489
490
491struct entropy_store;
492struct entropy_store {
493
494 const struct poolinfo *poolinfo;
495 __u32 *pool;
496 const char *name;
497
498
499 spinlock_t lock;
500 unsigned short add_ptr;
501 unsigned short input_rotate;
502 int entropy_count;
503 unsigned int initialized:1;
504 unsigned int last_data_init:1;
505 __u8 last_data[EXTRACT_SIZE];
506};
507
508static ssize_t extract_entropy(struct entropy_store *r, void *buf,
509 size_t nbytes, int min, int rsvd);
510static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
511 size_t nbytes, int fips);
512
513static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
514static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
515
516static struct entropy_store input_pool = {
517 .poolinfo = &poolinfo_table[0],
518 .name = "input",
519 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
520 .pool = input_pool_data
521};
522
523static __u32 const twist_table[8] = {
524 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
525 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
526
527
528
529
530
531
532
533
534
535
536
537static void _mix_pool_bytes(struct entropy_store *r, const void *in,
538 int nbytes)
539{
540 unsigned long i, tap1, tap2, tap3, tap4, tap5;
541 int input_rotate;
542 int wordmask = r->poolinfo->poolwords - 1;
543 const char *bytes = in;
544 __u32 w;
545
546 tap1 = r->poolinfo->tap1;
547 tap2 = r->poolinfo->tap2;
548 tap3 = r->poolinfo->tap3;
549 tap4 = r->poolinfo->tap4;
550 tap5 = r->poolinfo->tap5;
551
552 input_rotate = r->input_rotate;
553 i = r->add_ptr;
554
555
556 while (nbytes--) {
557 w = rol32(*bytes++, input_rotate);
558 i = (i - 1) & wordmask;
559
560
561 w ^= r->pool[i];
562 w ^= r->pool[(i + tap1) & wordmask];
563 w ^= r->pool[(i + tap2) & wordmask];
564 w ^= r->pool[(i + tap3) & wordmask];
565 w ^= r->pool[(i + tap4) & wordmask];
566 w ^= r->pool[(i + tap5) & wordmask];
567
568
569 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
570
571
572
573
574
575
576
577 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
578 }
579
580 r->input_rotate = input_rotate;
581 r->add_ptr = i;
582}
583
584static void __mix_pool_bytes(struct entropy_store *r, const void *in,
585 int nbytes)
586{
587 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
588 _mix_pool_bytes(r, in, nbytes);
589}
590
591static void mix_pool_bytes(struct entropy_store *r, const void *in,
592 int nbytes)
593{
594 unsigned long flags;
595
596 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
597 spin_lock_irqsave(&r->lock, flags);
598 _mix_pool_bytes(r, in, nbytes);
599 spin_unlock_irqrestore(&r->lock, flags);
600}
601
602struct fast_pool {
603 __u32 pool[4];
604 unsigned long last;
605 unsigned short reg_idx;
606 unsigned char count;
607};
608
609
610
611
612
613
614static void fast_mix(struct fast_pool *f)
615{
616 __u32 a = f->pool[0], b = f->pool[1];
617 __u32 c = f->pool[2], d = f->pool[3];
618
619 a += b; c += d;
620 b = rol32(b, 6); d = rol32(d, 27);
621 d ^= a; b ^= c;
622
623 a += b; c += d;
624 b = rol32(b, 16); d = rol32(d, 14);
625 d ^= a; b ^= c;
626
627 a += b; c += d;
628 b = rol32(b, 6); d = rol32(d, 27);
629 d ^= a; b ^= c;
630
631 a += b; c += d;
632 b = rol32(b, 16); d = rol32(d, 14);
633 d ^= a; b ^= c;
634
635 f->pool[0] = a; f->pool[1] = b;
636 f->pool[2] = c; f->pool[3] = d;
637 f->count++;
638}
639
640static void process_random_ready_list(void)
641{
642 unsigned long flags;
643 struct random_ready_callback *rdy, *tmp;
644
645 spin_lock_irqsave(&random_ready_list_lock, flags);
646 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
647 struct module *owner = rdy->owner;
648
649 list_del_init(&rdy->list);
650 rdy->func(rdy);
651 module_put(owner);
652 }
653 spin_unlock_irqrestore(&random_ready_list_lock, flags);
654}
655
656
657
658
659
660
661static void credit_entropy_bits(struct entropy_store *r, int nbits)
662{
663 int entropy_count, orig, has_initialized = 0;
664 const int pool_size = r->poolinfo->poolfracbits;
665 int nfrac = nbits << ENTROPY_SHIFT;
666
667 if (!nbits)
668 return;
669
670retry:
671 entropy_count = orig = READ_ONCE(r->entropy_count);
672 if (nfrac < 0) {
673
674 entropy_count += nfrac;
675 } else {
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697 int pnfrac = nfrac;
698 const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
699
700
701 do {
702 unsigned int anfrac = min(pnfrac, pool_size/2);
703 unsigned int add =
704 ((pool_size - entropy_count)*anfrac*3) >> s;
705
706 entropy_count += add;
707 pnfrac -= anfrac;
708 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
709 }
710
711 if (WARN_ON(entropy_count < 0)) {
712 pr_warn("negative entropy/overflow: pool %s count %d\n",
713 r->name, entropy_count);
714 entropy_count = 0;
715 } else if (entropy_count > pool_size)
716 entropy_count = pool_size;
717 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
718 goto retry;
719
720 if (has_initialized) {
721 r->initialized = 1;
722 kill_fasync(&fasync, SIGIO, POLL_IN);
723 }
724
725 trace_credit_entropy_bits(r->name, nbits,
726 entropy_count >> ENTROPY_SHIFT, _RET_IP_);
727
728 if (r == &input_pool) {
729 int entropy_bits = entropy_count >> ENTROPY_SHIFT;
730
731 if (crng_init < 2) {
732 if (entropy_bits < 128)
733 return;
734 crng_reseed(&primary_crng, r);
735 entropy_bits = ENTROPY_BITS(r);
736 }
737 }
738}
739
740static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
741{
742 const int nbits_max = r->poolinfo->poolwords * 32;
743
744 if (nbits < 0)
745 return -EINVAL;
746
747
748 nbits = min(nbits, nbits_max);
749
750 credit_entropy_bits(r, nbits);
751 return 0;
752}
753
754
755
756
757
758
759
760#define CRNG_RESEED_INTERVAL (300*HZ)
761
762static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
763
764#ifdef CONFIG_NUMA
765
766
767
768
769
770
771static struct crng_state **crng_node_pool __read_mostly;
772#endif
773
774static void invalidate_batched_entropy(void);
775static void numa_crng_init(void);
776
777static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
778static int __init parse_trust_cpu(char *arg)
779{
780 return kstrtobool(arg, &trust_cpu);
781}
782early_param("random.trust_cpu", parse_trust_cpu);
783
784static bool crng_init_try_arch(struct crng_state *crng)
785{
786 int i;
787 bool arch_init = true;
788 unsigned long rv;
789
790 for (i = 4; i < 16; i++) {
791 if (!arch_get_random_seed_long(&rv) &&
792 !arch_get_random_long(&rv)) {
793 rv = random_get_entropy();
794 arch_init = false;
795 }
796 crng->state[i] ^= rv;
797 }
798
799 return arch_init;
800}
801
802static bool __init crng_init_try_arch_early(struct crng_state *crng)
803{
804 int i;
805 bool arch_init = true;
806 unsigned long rv;
807
808 for (i = 4; i < 16; i++) {
809 if (!arch_get_random_seed_long_early(&rv) &&
810 !arch_get_random_long_early(&rv)) {
811 rv = random_get_entropy();
812 arch_init = false;
813 }
814 crng->state[i] ^= rv;
815 }
816
817 return arch_init;
818}
819
820static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
821{
822 memcpy(&crng->state[0], "expand 32-byte k", 16);
823 _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
824 crng_init_try_arch(crng);
825 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
826}
827
828static void __init crng_initialize_primary(struct crng_state *crng)
829{
830 memcpy(&crng->state[0], "expand 32-byte k", 16);
831 _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
832 if (crng_init_try_arch_early(crng) && trust_cpu) {
833 invalidate_batched_entropy();
834 numa_crng_init();
835 crng_init = 2;
836 pr_notice("crng done (trusting CPU's manufacturer)\n");
837 }
838 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
839}
840
841#ifdef CONFIG_NUMA
842static void do_numa_crng_init(struct work_struct *work)
843{
844 int i;
845 struct crng_state *crng;
846 struct crng_state **pool;
847
848 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
849 for_each_online_node(i) {
850 crng = kmalloc_node(sizeof(struct crng_state),
851 GFP_KERNEL | __GFP_NOFAIL, i);
852 spin_lock_init(&crng->lock);
853 crng_initialize_secondary(crng);
854 pool[i] = crng;
855 }
856 mb();
857 if (cmpxchg(&crng_node_pool, NULL, pool)) {
858 for_each_node(i)
859 kfree(pool[i]);
860 kfree(pool);
861 }
862}
863
864static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
865
866static void numa_crng_init(void)
867{
868 schedule_work(&numa_crng_init_work);
869}
870#else
871static void numa_crng_init(void) {}
872#endif
873
874
875
876
877
878static int crng_fast_load(const char *cp, size_t len)
879{
880 unsigned long flags;
881 char *p;
882
883 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
884 return 0;
885 if (crng_init != 0) {
886 spin_unlock_irqrestore(&primary_crng.lock, flags);
887 return 0;
888 }
889 p = (unsigned char *) &primary_crng.state[4];
890 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
891 p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
892 cp++; crng_init_cnt++; len--;
893 }
894 spin_unlock_irqrestore(&primary_crng.lock, flags);
895 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
896 invalidate_batched_entropy();
897 crng_init = 1;
898 pr_notice("fast init done\n");
899 }
900 return 1;
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917static int crng_slow_load(const char *cp, size_t len)
918{
919 unsigned long flags;
920 static unsigned char lfsr = 1;
921 unsigned char tmp;
922 unsigned i, max = CHACHA_KEY_SIZE;
923 const char * src_buf = cp;
924 char * dest_buf = (char *) &primary_crng.state[4];
925
926 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
927 return 0;
928 if (crng_init != 0) {
929 spin_unlock_irqrestore(&primary_crng.lock, flags);
930 return 0;
931 }
932 if (len > max)
933 max = len;
934
935 for (i = 0; i < max ; i++) {
936 tmp = lfsr;
937 lfsr >>= 1;
938 if (tmp & 1)
939 lfsr ^= 0xE1;
940 tmp = dest_buf[i % CHACHA_KEY_SIZE];
941 dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
942 lfsr += (tmp << 3) | (tmp >> 5);
943 }
944 spin_unlock_irqrestore(&primary_crng.lock, flags);
945 return 1;
946}
947
948static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
949{
950 unsigned long flags;
951 int i, num;
952 union {
953 __u8 block[CHACHA_BLOCK_SIZE];
954 __u32 key[8];
955 } buf;
956
957 if (r) {
958 num = extract_entropy(r, &buf, 32, 16, 0);
959 if (num == 0)
960 return;
961 } else {
962 _extract_crng(&primary_crng, buf.block);
963 _crng_backtrack_protect(&primary_crng, buf.block,
964 CHACHA_KEY_SIZE);
965 }
966 spin_lock_irqsave(&crng->lock, flags);
967 for (i = 0; i < 8; i++) {
968 unsigned long rv;
969 if (!arch_get_random_seed_long(&rv) &&
970 !arch_get_random_long(&rv))
971 rv = random_get_entropy();
972 crng->state[i+4] ^= buf.key[i] ^ rv;
973 }
974 memzero_explicit(&buf, sizeof(buf));
975 crng->init_time = jiffies;
976 spin_unlock_irqrestore(&crng->lock, flags);
977 if (crng == &primary_crng && crng_init < 2) {
978 invalidate_batched_entropy();
979 numa_crng_init();
980 crng_init = 2;
981 process_random_ready_list();
982 wake_up_interruptible(&crng_init_wait);
983 kill_fasync(&fasync, SIGIO, POLL_IN);
984 pr_notice("crng init done\n");
985 if (unseeded_warning.missed) {
986 pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
987 unseeded_warning.missed);
988 unseeded_warning.missed = 0;
989 }
990 if (urandom_warning.missed) {
991 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
992 urandom_warning.missed);
993 urandom_warning.missed = 0;
994 }
995 }
996}
997
998static void _extract_crng(struct crng_state *crng,
999 __u8 out[CHACHA_BLOCK_SIZE])
1000{
1001 unsigned long v, flags;
1002
1003 if (crng_ready() &&
1004 (time_after(crng_global_init_time, crng->init_time) ||
1005 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
1006 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
1007 spin_lock_irqsave(&crng->lock, flags);
1008 if (arch_get_random_long(&v))
1009 crng->state[14] ^= v;
1010 chacha20_block(&crng->state[0], out);
1011 if (crng->state[12] == 0)
1012 crng->state[13]++;
1013 spin_unlock_irqrestore(&crng->lock, flags);
1014}
1015
1016static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
1017{
1018 struct crng_state *crng = NULL;
1019
1020#ifdef CONFIG_NUMA
1021 if (crng_node_pool)
1022 crng = crng_node_pool[numa_node_id()];
1023 if (crng == NULL)
1024#endif
1025 crng = &primary_crng;
1026 _extract_crng(crng, out);
1027}
1028
1029
1030
1031
1032
1033static void _crng_backtrack_protect(struct crng_state *crng,
1034 __u8 tmp[CHACHA_BLOCK_SIZE], int used)
1035{
1036 unsigned long flags;
1037 __u32 *s, *d;
1038 int i;
1039
1040 used = round_up(used, sizeof(__u32));
1041 if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
1042 extract_crng(tmp);
1043 used = 0;
1044 }
1045 spin_lock_irqsave(&crng->lock, flags);
1046 s = (__u32 *) &tmp[used];
1047 d = &crng->state[4];
1048 for (i=0; i < 8; i++)
1049 *d++ ^= *s++;
1050 spin_unlock_irqrestore(&crng->lock, flags);
1051}
1052
1053static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
1054{
1055 struct crng_state *crng = NULL;
1056
1057#ifdef CONFIG_NUMA
1058 if (crng_node_pool)
1059 crng = crng_node_pool[numa_node_id()];
1060 if (crng == NULL)
1061#endif
1062 crng = &primary_crng;
1063 _crng_backtrack_protect(crng, tmp, used);
1064}
1065
1066static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1067{
1068 ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
1069 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1070 int large_request = (nbytes > 256);
1071
1072 while (nbytes) {
1073 if (large_request && need_resched()) {
1074 if (signal_pending(current)) {
1075 if (ret == 0)
1076 ret = -ERESTARTSYS;
1077 break;
1078 }
1079 schedule();
1080 }
1081
1082 extract_crng(tmp);
1083 i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
1084 if (copy_to_user(buf, tmp, i)) {
1085 ret = -EFAULT;
1086 break;
1087 }
1088
1089 nbytes -= i;
1090 buf += i;
1091 ret += i;
1092 }
1093 crng_backtrack_protect(tmp, i);
1094
1095
1096 memzero_explicit(tmp, sizeof(tmp));
1097
1098 return ret;
1099}
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109struct timer_rand_state {
1110 cycles_t last_time;
1111 long last_delta, last_delta2;
1112};
1113
1114#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124void add_device_randomness(const void *buf, unsigned int size)
1125{
1126 unsigned long time = random_get_entropy() ^ jiffies;
1127 unsigned long flags;
1128
1129 if (!crng_ready() && size)
1130 crng_slow_load(buf, size);
1131
1132 trace_add_device_randomness(size, _RET_IP_);
1133 spin_lock_irqsave(&input_pool.lock, flags);
1134 _mix_pool_bytes(&input_pool, buf, size);
1135 _mix_pool_bytes(&input_pool, &time, sizeof(time));
1136 spin_unlock_irqrestore(&input_pool.lock, flags);
1137}
1138EXPORT_SYMBOL(add_device_randomness);
1139
1140static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1153{
1154 struct entropy_store *r;
1155 struct {
1156 long jiffies;
1157 unsigned cycles;
1158 unsigned num;
1159 } sample;
1160 long delta, delta2, delta3;
1161
1162 sample.jiffies = jiffies;
1163 sample.cycles = random_get_entropy();
1164 sample.num = num;
1165 r = &input_pool;
1166 mix_pool_bytes(r, &sample, sizeof(sample));
1167
1168
1169
1170
1171
1172
1173 delta = sample.jiffies - READ_ONCE(state->last_time);
1174 WRITE_ONCE(state->last_time, sample.jiffies);
1175
1176 delta2 = delta - READ_ONCE(state->last_delta);
1177 WRITE_ONCE(state->last_delta, delta);
1178
1179 delta3 = delta2 - READ_ONCE(state->last_delta2);
1180 WRITE_ONCE(state->last_delta2, delta2);
1181
1182 if (delta < 0)
1183 delta = -delta;
1184 if (delta2 < 0)
1185 delta2 = -delta2;
1186 if (delta3 < 0)
1187 delta3 = -delta3;
1188 if (delta > delta2)
1189 delta = delta2;
1190 if (delta > delta3)
1191 delta = delta3;
1192
1193
1194
1195
1196
1197
1198 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
1199}
1200
1201void add_input_randomness(unsigned int type, unsigned int code,
1202 unsigned int value)
1203{
1204 static unsigned char last_value;
1205
1206
1207 if (value == last_value)
1208 return;
1209
1210 last_value = value;
1211 add_timer_randomness(&input_timer_state,
1212 (type << 4) ^ code ^ (code >> 4) ^ value);
1213 trace_add_input_randomness(ENTROPY_BITS(&input_pool));
1214}
1215EXPORT_SYMBOL_GPL(add_input_randomness);
1216
1217static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1218
1219#ifdef ADD_INTERRUPT_BENCH
1220static unsigned long avg_cycles, avg_deviation;
1221
1222#define AVG_SHIFT 8
1223#define FIXED_1_2 (1 << (AVG_SHIFT-1))
1224
1225static void add_interrupt_bench(cycles_t start)
1226{
1227 long delta = random_get_entropy() - start;
1228
1229
1230 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
1231 avg_cycles += delta;
1232
1233 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
1234 avg_deviation += delta;
1235}
1236#else
1237#define add_interrupt_bench(x)
1238#endif
1239
1240static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1241{
1242 __u32 *ptr = (__u32 *) regs;
1243 unsigned int idx;
1244
1245 if (regs == NULL)
1246 return 0;
1247 idx = READ_ONCE(f->reg_idx);
1248 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1249 idx = 0;
1250 ptr += idx++;
1251 WRITE_ONCE(f->reg_idx, idx);
1252 return *ptr;
1253}
1254
1255void add_interrupt_randomness(int irq, int irq_flags)
1256{
1257 struct entropy_store *r;
1258 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1259 struct pt_regs *regs = get_irq_regs();
1260 unsigned long now = jiffies;
1261 cycles_t cycles = random_get_entropy();
1262 __u32 c_high, j_high;
1263 __u64 ip;
1264 unsigned long seed;
1265 int credit = 0;
1266
1267 if (cycles == 0)
1268 cycles = get_reg(fast_pool, regs);
1269 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
1270 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
1271 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
1272 fast_pool->pool[1] ^= now ^ c_high;
1273 ip = regs ? instruction_pointer(regs) : _RET_IP_;
1274 fast_pool->pool[2] ^= ip;
1275 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
1276 get_reg(fast_pool, regs);
1277
1278 fast_mix(fast_pool);
1279 add_interrupt_bench(cycles);
1280
1281 if (unlikely(crng_init == 0)) {
1282 if ((fast_pool->count >= 64) &&
1283 crng_fast_load((char *) fast_pool->pool,
1284 sizeof(fast_pool->pool))) {
1285 fast_pool->count = 0;
1286 fast_pool->last = now;
1287 }
1288 return;
1289 }
1290
1291 if ((fast_pool->count < 64) &&
1292 !time_after(now, fast_pool->last + HZ))
1293 return;
1294
1295 r = &input_pool;
1296 if (!spin_trylock(&r->lock))
1297 return;
1298
1299 fast_pool->last = now;
1300 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
1301
1302
1303
1304
1305
1306
1307
1308 if (arch_get_random_seed_long(&seed)) {
1309 __mix_pool_bytes(r, &seed, sizeof(seed));
1310 credit = 1;
1311 }
1312 spin_unlock(&r->lock);
1313
1314 fast_pool->count = 0;
1315
1316
1317 credit_entropy_bits(r, credit + 1);
1318}
1319EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1320
1321#ifdef CONFIG_BLOCK
1322void add_disk_randomness(struct gendisk *disk)
1323{
1324 if (!disk || !disk->random)
1325 return;
1326
1327 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1328 trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
1329}
1330EXPORT_SYMBOL_GPL(add_disk_randomness);
1331#endif
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343static size_t account(struct entropy_store *r, size_t nbytes, int min,
1344 int reserved)
1345{
1346 int entropy_count, orig, have_bytes;
1347 size_t ibytes, nfrac;
1348
1349 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1350
1351
1352retry:
1353 entropy_count = orig = READ_ONCE(r->entropy_count);
1354 ibytes = nbytes;
1355
1356 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1357
1358 if ((have_bytes -= reserved) < 0)
1359 have_bytes = 0;
1360 ibytes = min_t(size_t, ibytes, have_bytes);
1361 if (ibytes < min)
1362 ibytes = 0;
1363
1364 if (WARN_ON(entropy_count < 0)) {
1365 pr_warn("negative entropy count: pool %s count %d\n",
1366 r->name, entropy_count);
1367 entropy_count = 0;
1368 }
1369 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1370 if ((size_t) entropy_count > nfrac)
1371 entropy_count -= nfrac;
1372 else
1373 entropy_count = 0;
1374
1375 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1376 goto retry;
1377
1378 trace_debit_entropy(r->name, 8 * ibytes);
1379 if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
1380 wake_up_interruptible(&random_write_wait);
1381 kill_fasync(&fasync, SIGIO, POLL_OUT);
1382 }
1383
1384 return ibytes;
1385}
1386
1387
1388
1389
1390
1391
1392
1393static void extract_buf(struct entropy_store *r, __u8 *out)
1394{
1395 int i;
1396 union {
1397 __u32 w[5];
1398 unsigned long l[LONGS(20)];
1399 } hash;
1400 __u32 workspace[SHA1_WORKSPACE_WORDS];
1401 unsigned long flags;
1402
1403
1404
1405
1406
1407 sha1_init(hash.w);
1408 for (i = 0; i < LONGS(20); i++) {
1409 unsigned long v;
1410 if (!arch_get_random_long(&v))
1411 break;
1412 hash.l[i] = v;
1413 }
1414
1415
1416 spin_lock_irqsave(&r->lock, flags);
1417 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1418 sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1430 spin_unlock_irqrestore(&r->lock, flags);
1431
1432 memzero_explicit(workspace, sizeof(workspace));
1433
1434
1435
1436
1437
1438
1439 hash.w[0] ^= hash.w[3];
1440 hash.w[1] ^= hash.w[4];
1441 hash.w[2] ^= rol32(hash.w[2], 16);
1442
1443 memcpy(out, &hash, EXTRACT_SIZE);
1444 memzero_explicit(&hash, sizeof(hash));
1445}
1446
1447static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1448 size_t nbytes, int fips)
1449{
1450 ssize_t ret = 0, i;
1451 __u8 tmp[EXTRACT_SIZE];
1452 unsigned long flags;
1453
1454 while (nbytes) {
1455 extract_buf(r, tmp);
1456
1457 if (fips) {
1458 spin_lock_irqsave(&r->lock, flags);
1459 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1460 panic("Hardware RNG duplicated output!\n");
1461 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1462 spin_unlock_irqrestore(&r->lock, flags);
1463 }
1464 i = min_t(int, nbytes, EXTRACT_SIZE);
1465 memcpy(buf, tmp, i);
1466 nbytes -= i;
1467 buf += i;
1468 ret += i;
1469 }
1470
1471
1472 memzero_explicit(tmp, sizeof(tmp));
1473
1474 return ret;
1475}
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1487 size_t nbytes, int min, int reserved)
1488{
1489 __u8 tmp[EXTRACT_SIZE];
1490 unsigned long flags;
1491
1492
1493 if (fips_enabled) {
1494 spin_lock_irqsave(&r->lock, flags);
1495 if (!r->last_data_init) {
1496 r->last_data_init = 1;
1497 spin_unlock_irqrestore(&r->lock, flags);
1498 trace_extract_entropy(r->name, EXTRACT_SIZE,
1499 ENTROPY_BITS(r), _RET_IP_);
1500 extract_buf(r, tmp);
1501 spin_lock_irqsave(&r->lock, flags);
1502 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1503 }
1504 spin_unlock_irqrestore(&r->lock, flags);
1505 }
1506
1507 trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1508 nbytes = account(r, nbytes, min, reserved);
1509
1510 return _extract_entropy(r, buf, nbytes, fips_enabled);
1511}
1512
1513#define warn_unseeded_randomness(previous) \
1514 _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1515
1516static void _warn_unseeded_randomness(const char *func_name, void *caller,
1517 void **previous)
1518{
1519#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1520 const bool print_once = false;
1521#else
1522 static bool print_once __read_mostly;
1523#endif
1524
1525 if (print_once ||
1526 crng_ready() ||
1527 (previous && (caller == READ_ONCE(*previous))))
1528 return;
1529 WRITE_ONCE(*previous, caller);
1530#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1531 print_once = true;
1532#endif
1533 if (__ratelimit(&unseeded_warning))
1534 printk_deferred(KERN_NOTICE "random: %s called from %pS "
1535 "with crng_init=%d\n", func_name, caller,
1536 crng_init);
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549static void _get_random_bytes(void *buf, int nbytes)
1550{
1551 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1552
1553 trace_get_random_bytes(nbytes, _RET_IP_);
1554
1555 while (nbytes >= CHACHA_BLOCK_SIZE) {
1556 extract_crng(buf);
1557 buf += CHACHA_BLOCK_SIZE;
1558 nbytes -= CHACHA_BLOCK_SIZE;
1559 }
1560
1561 if (nbytes > 0) {
1562 extract_crng(tmp);
1563 memcpy(buf, tmp, nbytes);
1564 crng_backtrack_protect(tmp, nbytes);
1565 } else
1566 crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
1567 memzero_explicit(tmp, sizeof(tmp));
1568}
1569
1570void get_random_bytes(void *buf, int nbytes)
1571{
1572 static void *previous;
1573
1574 warn_unseeded_randomness(&previous);
1575 _get_random_bytes(buf, nbytes);
1576}
1577EXPORT_SYMBOL(get_random_bytes);
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593static void entropy_timer(struct timer_list *t)
1594{
1595 credit_entropy_bits(&input_pool, 1);
1596}
1597
1598
1599
1600
1601
1602static void try_to_generate_entropy(void)
1603{
1604 struct {
1605 unsigned long now;
1606 struct timer_list timer;
1607 } stack;
1608
1609 stack.now = random_get_entropy();
1610
1611
1612 if (stack.now == random_get_entropy())
1613 return;
1614
1615 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1616 while (!crng_ready()) {
1617 if (!timer_pending(&stack.timer))
1618 mod_timer(&stack.timer, jiffies+1);
1619 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1620 schedule();
1621 stack.now = random_get_entropy();
1622 }
1623
1624 del_timer_sync(&stack.timer);
1625 destroy_timer_on_stack(&stack.timer);
1626 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639int wait_for_random_bytes(void)
1640{
1641 if (likely(crng_ready()))
1642 return 0;
1643
1644 do {
1645 int ret;
1646 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1647 if (ret)
1648 return ret > 0 ? 0 : ret;
1649
1650 try_to_generate_entropy();
1651 } while (!crng_ready());
1652
1653 return 0;
1654}
1655EXPORT_SYMBOL(wait_for_random_bytes);
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666bool rng_is_initialized(void)
1667{
1668 return crng_ready();
1669}
1670EXPORT_SYMBOL(rng_is_initialized);
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680int add_random_ready_callback(struct random_ready_callback *rdy)
1681{
1682 struct module *owner;
1683 unsigned long flags;
1684 int err = -EALREADY;
1685
1686 if (crng_ready())
1687 return err;
1688
1689 owner = rdy->owner;
1690 if (!try_module_get(owner))
1691 return -ENOENT;
1692
1693 spin_lock_irqsave(&random_ready_list_lock, flags);
1694 if (crng_ready())
1695 goto out;
1696
1697 owner = NULL;
1698
1699 list_add(&rdy->list, &random_ready_list);
1700 err = 0;
1701
1702out:
1703 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1704
1705 module_put(owner);
1706
1707 return err;
1708}
1709EXPORT_SYMBOL(add_random_ready_callback);
1710
1711
1712
1713
1714void del_random_ready_callback(struct random_ready_callback *rdy)
1715{
1716 unsigned long flags;
1717 struct module *owner = NULL;
1718
1719 spin_lock_irqsave(&random_ready_list_lock, flags);
1720 if (!list_empty(&rdy->list)) {
1721 list_del_init(&rdy->list);
1722 owner = rdy->owner;
1723 }
1724 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1725
1726 module_put(owner);
1727}
1728EXPORT_SYMBOL(del_random_ready_callback);
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742int __must_check get_random_bytes_arch(void *buf, int nbytes)
1743{
1744 int left = nbytes;
1745 char *p = buf;
1746
1747 trace_get_random_bytes_arch(left, _RET_IP_);
1748 while (left) {
1749 unsigned long v;
1750 int chunk = min_t(int, left, sizeof(unsigned long));
1751
1752 if (!arch_get_random_long(&v))
1753 break;
1754
1755 memcpy(p, &v, chunk);
1756 p += chunk;
1757 left -= chunk;
1758 }
1759
1760 return nbytes - left;
1761}
1762EXPORT_SYMBOL(get_random_bytes_arch);
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773static void __init init_std_data(struct entropy_store *r)
1774{
1775 int i;
1776 ktime_t now = ktime_get_real();
1777 unsigned long rv;
1778
1779 mix_pool_bytes(r, &now, sizeof(now));
1780 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1781 if (!arch_get_random_seed_long(&rv) &&
1782 !arch_get_random_long(&rv))
1783 rv = random_get_entropy();
1784 mix_pool_bytes(r, &rv, sizeof(rv));
1785 }
1786 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1787}
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799int __init rand_initialize(void)
1800{
1801 init_std_data(&input_pool);
1802 crng_initialize_primary(&primary_crng);
1803 crng_global_init_time = jiffies;
1804 if (ratelimit_disable) {
1805 urandom_warning.interval = 0;
1806 unseeded_warning.interval = 0;
1807 }
1808 return 0;
1809}
1810
1811#ifdef CONFIG_BLOCK
1812void rand_initialize_disk(struct gendisk *disk)
1813{
1814 struct timer_rand_state *state;
1815
1816
1817
1818
1819
1820 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1821 if (state) {
1822 state->last_time = INITIAL_JIFFIES;
1823 disk->random = state;
1824 }
1825}
1826#endif
1827
1828static ssize_t
1829urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
1830 loff_t *ppos)
1831{
1832 int ret;
1833
1834 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1835 ret = extract_crng_user(buf, nbytes);
1836 trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
1837 return ret;
1838}
1839
1840static ssize_t
1841urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1842{
1843 unsigned long flags;
1844 static int maxwarn = 10;
1845
1846 if (!crng_ready() && maxwarn > 0) {
1847 maxwarn--;
1848 if (__ratelimit(&urandom_warning))
1849 pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1850 current->comm, nbytes);
1851 spin_lock_irqsave(&primary_crng.lock, flags);
1852 crng_init_cnt = 0;
1853 spin_unlock_irqrestore(&primary_crng.lock, flags);
1854 }
1855
1856 return urandom_read_nowarn(file, buf, nbytes, ppos);
1857}
1858
1859static ssize_t
1860random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1861{
1862 int ret;
1863
1864 ret = wait_for_random_bytes();
1865 if (ret != 0)
1866 return ret;
1867 return urandom_read_nowarn(file, buf, nbytes, ppos);
1868}
1869
1870static __poll_t
1871random_poll(struct file *file, poll_table * wait)
1872{
1873 __poll_t mask;
1874
1875 poll_wait(file, &crng_init_wait, wait);
1876 poll_wait(file, &random_write_wait, wait);
1877 mask = 0;
1878 if (crng_ready())
1879 mask |= EPOLLIN | EPOLLRDNORM;
1880 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
1881 mask |= EPOLLOUT | EPOLLWRNORM;
1882 return mask;
1883}
1884
1885static int
1886write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1887{
1888 size_t bytes;
1889 __u32 t, buf[16];
1890 const char __user *p = buffer;
1891
1892 while (count > 0) {
1893 int b, i = 0;
1894
1895 bytes = min(count, sizeof(buf));
1896 if (copy_from_user(&buf, p, bytes))
1897 return -EFAULT;
1898
1899 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1900 if (!arch_get_random_int(&t))
1901 break;
1902 buf[i] ^= t;
1903 }
1904
1905 count -= bytes;
1906 p += bytes;
1907
1908 mix_pool_bytes(r, buf, bytes);
1909 cond_resched();
1910 }
1911
1912 return 0;
1913}
1914
1915static ssize_t random_write(struct file *file, const char __user *buffer,
1916 size_t count, loff_t *ppos)
1917{
1918 size_t ret;
1919
1920 ret = write_pool(&input_pool, buffer, count);
1921 if (ret)
1922 return ret;
1923
1924 return (ssize_t)count;
1925}
1926
1927static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1928{
1929 int size, ent_count;
1930 int __user *p = (int __user *)arg;
1931 int retval;
1932
1933 switch (cmd) {
1934 case RNDGETENTCNT:
1935
1936 ent_count = ENTROPY_BITS(&input_pool);
1937 if (put_user(ent_count, p))
1938 return -EFAULT;
1939 return 0;
1940 case RNDADDTOENTCNT:
1941 if (!capable(CAP_SYS_ADMIN))
1942 return -EPERM;
1943 if (get_user(ent_count, p))
1944 return -EFAULT;
1945 return credit_entropy_bits_safe(&input_pool, ent_count);
1946 case RNDADDENTROPY:
1947 if (!capable(CAP_SYS_ADMIN))
1948 return -EPERM;
1949 if (get_user(ent_count, p++))
1950 return -EFAULT;
1951 if (ent_count < 0)
1952 return -EINVAL;
1953 if (get_user(size, p++))
1954 return -EFAULT;
1955 retval = write_pool(&input_pool, (const char __user *)p,
1956 size);
1957 if (retval < 0)
1958 return retval;
1959 return credit_entropy_bits_safe(&input_pool, ent_count);
1960 case RNDZAPENTCNT:
1961 case RNDCLEARPOOL:
1962
1963
1964
1965
1966 if (!capable(CAP_SYS_ADMIN))
1967 return -EPERM;
1968 input_pool.entropy_count = 0;
1969 return 0;
1970 case RNDRESEEDCRNG:
1971 if (!capable(CAP_SYS_ADMIN))
1972 return -EPERM;
1973 if (crng_init < 2)
1974 return -ENODATA;
1975 crng_reseed(&primary_crng, NULL);
1976 crng_global_init_time = jiffies - 1;
1977 return 0;
1978 default:
1979 return -EINVAL;
1980 }
1981}
1982
1983static int random_fasync(int fd, struct file *filp, int on)
1984{
1985 return fasync_helper(fd, filp, on, &fasync);
1986}
1987
1988const struct file_operations random_fops = {
1989 .read = random_read,
1990 .write = random_write,
1991 .poll = random_poll,
1992 .unlocked_ioctl = random_ioctl,
1993 .compat_ioctl = compat_ptr_ioctl,
1994 .fasync = random_fasync,
1995 .llseek = noop_llseek,
1996};
1997
1998const struct file_operations urandom_fops = {
1999 .read = urandom_read,
2000 .write = random_write,
2001 .unlocked_ioctl = random_ioctl,
2002 .compat_ioctl = compat_ptr_ioctl,
2003 .fasync = random_fasync,
2004 .llseek = noop_llseek,
2005};
2006
2007SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
2008 unsigned int, flags)
2009{
2010 int ret;
2011
2012 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
2013 return -EINVAL;
2014
2015
2016
2017
2018
2019 if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
2020 return -EINVAL;
2021
2022 if (count > INT_MAX)
2023 count = INT_MAX;
2024
2025 if (!(flags & GRND_INSECURE) && !crng_ready()) {
2026 if (flags & GRND_NONBLOCK)
2027 return -EAGAIN;
2028 ret = wait_for_random_bytes();
2029 if (unlikely(ret))
2030 return ret;
2031 }
2032 return urandom_read_nowarn(NULL, buf, count, NULL);
2033}
2034
2035
2036
2037
2038
2039
2040
2041#ifdef CONFIG_SYSCTL
2042
2043#include <linux/sysctl.h>
2044
2045static int min_write_thresh;
2046static int max_write_thresh = INPUT_POOL_WORDS * 32;
2047static int random_min_urandom_seed = 60;
2048static char sysctl_bootid[16];
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059static int proc_do_uuid(struct ctl_table *table, int write,
2060 void *buffer, size_t *lenp, loff_t *ppos)
2061{
2062 struct ctl_table fake_table;
2063 unsigned char buf[64], tmp_uuid[16], *uuid;
2064
2065 uuid = table->data;
2066 if (!uuid) {
2067 uuid = tmp_uuid;
2068 generate_random_uuid(uuid);
2069 } else {
2070 static DEFINE_SPINLOCK(bootid_spinlock);
2071
2072 spin_lock(&bootid_spinlock);
2073 if (!uuid[8])
2074 generate_random_uuid(uuid);
2075 spin_unlock(&bootid_spinlock);
2076 }
2077
2078 sprintf(buf, "%pU", uuid);
2079
2080 fake_table.data = buf;
2081 fake_table.maxlen = sizeof(buf);
2082
2083 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
2084}
2085
2086
2087
2088
2089static int proc_do_entropy(struct ctl_table *table, int write,
2090 void *buffer, size_t *lenp, loff_t *ppos)
2091{
2092 struct ctl_table fake_table;
2093 int entropy_count;
2094
2095 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
2096
2097 fake_table.data = &entropy_count;
2098 fake_table.maxlen = sizeof(entropy_count);
2099
2100 return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
2101}
2102
2103static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
2104extern struct ctl_table random_table[];
2105struct ctl_table random_table[] = {
2106 {
2107 .procname = "poolsize",
2108 .data = &sysctl_poolsize,
2109 .maxlen = sizeof(int),
2110 .mode = 0444,
2111 .proc_handler = proc_dointvec,
2112 },
2113 {
2114 .procname = "entropy_avail",
2115 .maxlen = sizeof(int),
2116 .mode = 0444,
2117 .proc_handler = proc_do_entropy,
2118 .data = &input_pool.entropy_count,
2119 },
2120 {
2121 .procname = "write_wakeup_threshold",
2122 .data = &random_write_wakeup_bits,
2123 .maxlen = sizeof(int),
2124 .mode = 0644,
2125 .proc_handler = proc_dointvec_minmax,
2126 .extra1 = &min_write_thresh,
2127 .extra2 = &max_write_thresh,
2128 },
2129 {
2130 .procname = "urandom_min_reseed_secs",
2131 .data = &random_min_urandom_seed,
2132 .maxlen = sizeof(int),
2133 .mode = 0644,
2134 .proc_handler = proc_dointvec,
2135 },
2136 {
2137 .procname = "boot_id",
2138 .data = &sysctl_bootid,
2139 .maxlen = 16,
2140 .mode = 0444,
2141 .proc_handler = proc_do_uuid,
2142 },
2143 {
2144 .procname = "uuid",
2145 .maxlen = 16,
2146 .mode = 0444,
2147 .proc_handler = proc_do_uuid,
2148 },
2149#ifdef ADD_INTERRUPT_BENCH
2150 {
2151 .procname = "add_interrupt_avg_cycles",
2152 .data = &avg_cycles,
2153 .maxlen = sizeof(avg_cycles),
2154 .mode = 0444,
2155 .proc_handler = proc_doulongvec_minmax,
2156 },
2157 {
2158 .procname = "add_interrupt_avg_deviation",
2159 .data = &avg_deviation,
2160 .maxlen = sizeof(avg_deviation),
2161 .mode = 0444,
2162 .proc_handler = proc_doulongvec_minmax,
2163 },
2164#endif
2165 { }
2166};
2167#endif
2168
2169struct batched_entropy {
2170 union {
2171 u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
2172 u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2173 };
2174 unsigned int position;
2175 spinlock_t batch_lock;
2176};
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2187 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2188};
2189
2190u64 get_random_u64(void)
2191{
2192 u64 ret;
2193 unsigned long flags;
2194 struct batched_entropy *batch;
2195 static void *previous;
2196
2197 warn_unseeded_randomness(&previous);
2198
2199 batch = raw_cpu_ptr(&batched_entropy_u64);
2200 spin_lock_irqsave(&batch->batch_lock, flags);
2201 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2202 extract_crng((u8 *)batch->entropy_u64);
2203 batch->position = 0;
2204 }
2205 ret = batch->entropy_u64[batch->position++];
2206 spin_unlock_irqrestore(&batch->batch_lock, flags);
2207 return ret;
2208}
2209EXPORT_SYMBOL(get_random_u64);
2210
2211static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2212 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2213};
2214u32 get_random_u32(void)
2215{
2216 u32 ret;
2217 unsigned long flags;
2218 struct batched_entropy *batch;
2219 static void *previous;
2220
2221 warn_unseeded_randomness(&previous);
2222
2223 batch = raw_cpu_ptr(&batched_entropy_u32);
2224 spin_lock_irqsave(&batch->batch_lock, flags);
2225 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2226 extract_crng((u8 *)batch->entropy_u32);
2227 batch->position = 0;
2228 }
2229 ret = batch->entropy_u32[batch->position++];
2230 spin_unlock_irqrestore(&batch->batch_lock, flags);
2231 return ret;
2232}
2233EXPORT_SYMBOL(get_random_u32);
2234
2235
2236
2237
2238
2239static void invalidate_batched_entropy(void)
2240{
2241 int cpu;
2242 unsigned long flags;
2243
2244 for_each_possible_cpu (cpu) {
2245 struct batched_entropy *batched_entropy;
2246
2247 batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2248 spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2249 batched_entropy->position = 0;
2250 spin_unlock(&batched_entropy->batch_lock);
2251
2252 batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2253 spin_lock(&batched_entropy->batch_lock);
2254 batched_entropy->position = 0;
2255 spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2256 }
2257}
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273unsigned long
2274randomize_page(unsigned long start, unsigned long range)
2275{
2276 if (!PAGE_ALIGNED(start)) {
2277 range -= PAGE_ALIGN(start) - start;
2278 start = PAGE_ALIGN(start);
2279 }
2280
2281 if (start > ULONG_MAX - range)
2282 range = ULONG_MAX - start;
2283
2284 range >>= PAGE_SHIFT;
2285
2286 if (range == 0)
2287 return start;
2288
2289 return start + (get_random_long() % range << PAGE_SHIFT);
2290}
2291
2292
2293
2294
2295
2296void add_hwgenerator_randomness(const char *buffer, size_t count,
2297 size_t entropy)
2298{
2299 struct entropy_store *poolp = &input_pool;
2300
2301 if (unlikely(crng_init == 0)) {
2302 crng_fast_load(buffer, count);
2303 return;
2304 }
2305
2306
2307
2308
2309
2310 wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2311 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2312 mix_pool_bytes(poolp, buffer, count);
2313 credit_entropy_bits(poolp, entropy);
2314}
2315EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2316
2317
2318
2319
2320
2321
2322void add_bootloader_randomness(const void *buf, unsigned int size)
2323{
2324 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
2325 add_hwgenerator_randomness(buf, size, size * 8);
2326 else
2327 add_device_randomness(buf, size);
2328}
2329EXPORT_SYMBOL_GPL(add_bootloader_randomness);
2330