1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241#include <linux/utsname.h>
242#include <linux/module.h>
243#include <linux/kernel.h>
244#include <linux/major.h>
245#include <linux/string.h>
246#include <linux/fcntl.h>
247#include <linux/slab.h>
248#include <linux/random.h>
249#include <linux/poll.h>
250#include <linux/init.h>
251#include <linux/fs.h>
252#include <linux/genhd.h>
253#include <linux/interrupt.h>
254#include <linux/mm.h>
255#include <linux/nodemask.h>
256#include <linux/spinlock.h>
257#include <linux/kthread.h>
258#include <linux/percpu.h>
259#include <linux/cryptohash.h>
260#include <linux/fips.h>
261#include <linux/ptrace.h>
262#include <linux/workqueue.h>
263#include <linux/irq.h>
264#include <linux/ratelimit.h>
265#include <linux/syscalls.h>
266#include <linux/completion.h>
267#include <linux/uuid.h>
268#include <crypto/chacha.h>
269
270#include <asm/processor.h>
271#include <linux/uaccess.h>
272#include <asm/irq.h>
273#include <asm/irq_regs.h>
274#include <asm/io.h>
275
276#define CREATE_TRACE_POINTS
277#include <trace/events/random.h>
278
279
280
281
282
283
284#define INPUT_POOL_SHIFT 12
285#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
286#define OUTPUT_POOL_SHIFT 10
287#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
288#define SEC_XFER_SIZE 512
289#define EXTRACT_SIZE 10
290
291
292#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
293
294
295
296
297
298
299
300
301#define ENTROPY_SHIFT 3
302#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
303
304
305
306
307
308static int random_read_wakeup_bits = 64;
309
310
311
312
313
314
315static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362static struct poolinfo {
363 int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
364#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
365 int tap1, tap2, tap3, tap4, tap5;
366} poolinfo_table[] = {
367
368
369 { S(128), 104, 76, 51, 25, 1 },
370
371
372 { S(32), 26, 19, 14, 7, 1 },
373#if 0
374
375 { S(2048), 1638, 1231, 819, 411, 1 },
376
377
378 { S(1024), 817, 615, 412, 204, 1 },
379
380
381 { S(1024), 819, 616, 410, 207, 2 },
382
383
384 { S(512), 411, 308, 208, 104, 1 },
385
386
387 { S(512), 409, 307, 206, 102, 2 },
388
389 { S(512), 409, 309, 205, 103, 2 },
390
391
392 { S(256), 205, 155, 101, 52, 1 },
393
394
395 { S(128), 103, 78, 51, 27, 2 },
396
397
398 { S(64), 52, 39, 26, 14, 1 },
399#endif
400};
401
402
403
404
405static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
406static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
407static struct fasync_struct *fasync;
408
409static DEFINE_SPINLOCK(random_ready_list_lock);
410static LIST_HEAD(random_ready_list);
411
412struct crng_state {
413 __u32 state[16];
414 unsigned long init_time;
415 spinlock_t lock;
416};
417
418struct crng_state primary_crng = {
419 .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
420};
421
422
423
424
425
426
427
428
429
430static int crng_init = 0;
431#define crng_ready() (likely(crng_init > 1))
432static int crng_init_cnt = 0;
433static unsigned long crng_global_init_time = 0;
434#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
435static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
436static void _crng_backtrack_protect(struct crng_state *crng,
437 __u8 tmp[CHACHA_BLOCK_SIZE], int used);
438static void process_random_ready_list(void);
439static void _get_random_bytes(void *buf, int nbytes);
440
441static struct ratelimit_state unseeded_warning =
442 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
443static struct ratelimit_state urandom_warning =
444 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
445
446static int ratelimit_disable __read_mostly;
447
448module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
449MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
450
451
452
453
454
455
456
457
458struct entropy_store;
459struct entropy_store {
460
461 const struct poolinfo *poolinfo;
462 __u32 *pool;
463 const char *name;
464 struct entropy_store *pull;
465 struct work_struct push_work;
466
467
468 unsigned long last_pulled;
469 spinlock_t lock;
470 unsigned short add_ptr;
471 unsigned short input_rotate;
472 int entropy_count;
473 int entropy_total;
474 unsigned int initialized:1;
475 unsigned int last_data_init:1;
476 __u8 last_data[EXTRACT_SIZE];
477};
478
479static ssize_t extract_entropy(struct entropy_store *r, void *buf,
480 size_t nbytes, int min, int rsvd);
481static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
482 size_t nbytes, int fips);
483
484static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
485static void push_to_pool(struct work_struct *work);
486static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
487static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
488
489static struct entropy_store input_pool = {
490 .poolinfo = &poolinfo_table[0],
491 .name = "input",
492 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
493 .pool = input_pool_data
494};
495
496static struct entropy_store blocking_pool = {
497 .poolinfo = &poolinfo_table[1],
498 .name = "blocking",
499 .pull = &input_pool,
500 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
501 .pool = blocking_pool_data,
502 .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
503 push_to_pool),
504};
505
506static __u32 const twist_table[8] = {
507 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
508 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
509
510
511
512
513
514
515
516
517
518
519
520static void _mix_pool_bytes(struct entropy_store *r, const void *in,
521 int nbytes)
522{
523 unsigned long i, tap1, tap2, tap3, tap4, tap5;
524 int input_rotate;
525 int wordmask = r->poolinfo->poolwords - 1;
526 const char *bytes = in;
527 __u32 w;
528
529 tap1 = r->poolinfo->tap1;
530 tap2 = r->poolinfo->tap2;
531 tap3 = r->poolinfo->tap3;
532 tap4 = r->poolinfo->tap4;
533 tap5 = r->poolinfo->tap5;
534
535 input_rotate = r->input_rotate;
536 i = r->add_ptr;
537
538
539 while (nbytes--) {
540 w = rol32(*bytes++, input_rotate);
541 i = (i - 1) & wordmask;
542
543
544 w ^= r->pool[i];
545 w ^= r->pool[(i + tap1) & wordmask];
546 w ^= r->pool[(i + tap2) & wordmask];
547 w ^= r->pool[(i + tap3) & wordmask];
548 w ^= r->pool[(i + tap4) & wordmask];
549 w ^= r->pool[(i + tap5) & wordmask];
550
551
552 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
553
554
555
556
557
558
559
560 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
561 }
562
563 r->input_rotate = input_rotate;
564 r->add_ptr = i;
565}
566
567static void __mix_pool_bytes(struct entropy_store *r, const void *in,
568 int nbytes)
569{
570 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
571 _mix_pool_bytes(r, in, nbytes);
572}
573
574static void mix_pool_bytes(struct entropy_store *r, const void *in,
575 int nbytes)
576{
577 unsigned long flags;
578
579 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
580 spin_lock_irqsave(&r->lock, flags);
581 _mix_pool_bytes(r, in, nbytes);
582 spin_unlock_irqrestore(&r->lock, flags);
583}
584
585struct fast_pool {
586 __u32 pool[4];
587 unsigned long last;
588 unsigned short reg_idx;
589 unsigned char count;
590};
591
592
593
594
595
596
597static void fast_mix(struct fast_pool *f)
598{
599 __u32 a = f->pool[0], b = f->pool[1];
600 __u32 c = f->pool[2], d = f->pool[3];
601
602 a += b; c += d;
603 b = rol32(b, 6); d = rol32(d, 27);
604 d ^= a; b ^= c;
605
606 a += b; c += d;
607 b = rol32(b, 16); d = rol32(d, 14);
608 d ^= a; b ^= c;
609
610 a += b; c += d;
611 b = rol32(b, 6); d = rol32(d, 27);
612 d ^= a; b ^= c;
613
614 a += b; c += d;
615 b = rol32(b, 16); d = rol32(d, 14);
616 d ^= a; b ^= c;
617
618 f->pool[0] = a; f->pool[1] = b;
619 f->pool[2] = c; f->pool[3] = d;
620 f->count++;
621}
622
623static void process_random_ready_list(void)
624{
625 unsigned long flags;
626 struct random_ready_callback *rdy, *tmp;
627
628 spin_lock_irqsave(&random_ready_list_lock, flags);
629 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
630 struct module *owner = rdy->owner;
631
632 list_del_init(&rdy->list);
633 rdy->func(rdy);
634 module_put(owner);
635 }
636 spin_unlock_irqrestore(&random_ready_list_lock, flags);
637}
638
639
640
641
642
643
644static void credit_entropy_bits(struct entropy_store *r, int nbits)
645{
646 int entropy_count, orig;
647 const int pool_size = r->poolinfo->poolfracbits;
648 int nfrac = nbits << ENTROPY_SHIFT;
649
650 if (!nbits)
651 return;
652
653retry:
654 entropy_count = orig = READ_ONCE(r->entropy_count);
655 if (nfrac < 0) {
656
657 entropy_count += nfrac;
658 } else {
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680 int pnfrac = nfrac;
681 const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
682
683
684 do {
685 unsigned int anfrac = min(pnfrac, pool_size/2);
686 unsigned int add =
687 ((pool_size - entropy_count)*anfrac*3) >> s;
688
689 entropy_count += add;
690 pnfrac -= anfrac;
691 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
692 }
693
694 if (unlikely(entropy_count < 0)) {
695 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
696 r->name, entropy_count);
697 WARN_ON(1);
698 entropy_count = 0;
699 } else if (entropy_count > pool_size)
700 entropy_count = pool_size;
701 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
702 goto retry;
703
704 r->entropy_total += nbits;
705 if (!r->initialized && r->entropy_total > 128) {
706 r->initialized = 1;
707 r->entropy_total = 0;
708 }
709
710 trace_credit_entropy_bits(r->name, nbits,
711 entropy_count >> ENTROPY_SHIFT,
712 r->entropy_total, _RET_IP_);
713
714 if (r == &input_pool) {
715 int entropy_bits = entropy_count >> ENTROPY_SHIFT;
716
717 if (crng_init < 2 && entropy_bits >= 128) {
718 crng_reseed(&primary_crng, r);
719 entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
720 }
721
722
723 if (entropy_bits >= random_read_wakeup_bits &&
724 wq_has_sleeper(&random_read_wait)) {
725 wake_up_interruptible(&random_read_wait);
726 kill_fasync(&fasync, SIGIO, POLL_IN);
727 }
728
729
730
731 if (entropy_bits > random_write_wakeup_bits &&
732 r->initialized &&
733 r->entropy_total >= 2*random_read_wakeup_bits) {
734 struct entropy_store *other = &blocking_pool;
735
736 if (other->entropy_count <=
737 3 * other->poolinfo->poolfracbits / 4) {
738 schedule_work(&other->push_work);
739 r->entropy_total = 0;
740 }
741 }
742 }
743}
744
745static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
746{
747 const int nbits_max = r->poolinfo->poolwords * 32;
748
749 if (nbits < 0)
750 return -EINVAL;
751
752
753 nbits = min(nbits, nbits_max);
754
755 credit_entropy_bits(r, nbits);
756 return 0;
757}
758
759
760
761
762
763
764
765#define CRNG_RESEED_INTERVAL (300*HZ)
766
767static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
768
769#ifdef CONFIG_NUMA
770
771
772
773
774
775
776static struct crng_state **crng_node_pool __read_mostly;
777#endif
778
779static void invalidate_batched_entropy(void);
780
781static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
782static int __init parse_trust_cpu(char *arg)
783{
784 return kstrtobool(arg, &trust_cpu);
785}
786early_param("random.trust_cpu", parse_trust_cpu);
787
788static void crng_initialize(struct crng_state *crng)
789{
790 int i;
791 int arch_init = 1;
792 unsigned long rv;
793
794 memcpy(&crng->state[0], "expand 32-byte k", 16);
795 if (crng == &primary_crng)
796 _extract_entropy(&input_pool, &crng->state[4],
797 sizeof(__u32) * 12, 0);
798 else
799 _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
800 for (i = 4; i < 16; i++) {
801 if (!arch_get_random_seed_long(&rv) &&
802 !arch_get_random_long(&rv)) {
803 rv = random_get_entropy();
804 arch_init = 0;
805 }
806 crng->state[i] ^= rv;
807 }
808 if (trust_cpu && arch_init) {
809 crng_init = 2;
810 pr_notice("random: crng done (trusting CPU's manufacturer)\n");
811 }
812 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
813}
814
815#ifdef CONFIG_NUMA
816static void do_numa_crng_init(struct work_struct *work)
817{
818 int i;
819 struct crng_state *crng;
820 struct crng_state **pool;
821
822 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
823 for_each_online_node(i) {
824 crng = kmalloc_node(sizeof(struct crng_state),
825 GFP_KERNEL | __GFP_NOFAIL, i);
826 spin_lock_init(&crng->lock);
827 crng_initialize(crng);
828 pool[i] = crng;
829 }
830 mb();
831 if (cmpxchg(&crng_node_pool, NULL, pool)) {
832 for_each_node(i)
833 kfree(pool[i]);
834 kfree(pool);
835 }
836}
837
838static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
839
840static void numa_crng_init(void)
841{
842 schedule_work(&numa_crng_init_work);
843}
844#else
845static void numa_crng_init(void) {}
846#endif
847
848
849
850
851
852static int crng_fast_load(const char *cp, size_t len)
853{
854 unsigned long flags;
855 char *p;
856
857 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
858 return 0;
859 if (crng_init != 0) {
860 spin_unlock_irqrestore(&primary_crng.lock, flags);
861 return 0;
862 }
863 p = (unsigned char *) &primary_crng.state[4];
864 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
865 p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
866 cp++; crng_init_cnt++; len--;
867 }
868 spin_unlock_irqrestore(&primary_crng.lock, flags);
869 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
870 invalidate_batched_entropy();
871 crng_init = 1;
872 wake_up_interruptible(&crng_init_wait);
873 pr_notice("random: fast init done\n");
874 }
875 return 1;
876}
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892static int crng_slow_load(const char *cp, size_t len)
893{
894 unsigned long flags;
895 static unsigned char lfsr = 1;
896 unsigned char tmp;
897 unsigned i, max = CHACHA_KEY_SIZE;
898 const char * src_buf = cp;
899 char * dest_buf = (char *) &primary_crng.state[4];
900
901 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
902 return 0;
903 if (crng_init != 0) {
904 spin_unlock_irqrestore(&primary_crng.lock, flags);
905 return 0;
906 }
907 if (len > max)
908 max = len;
909
910 for (i = 0; i < max ; i++) {
911 tmp = lfsr;
912 lfsr >>= 1;
913 if (tmp & 1)
914 lfsr ^= 0xE1;
915 tmp = dest_buf[i % CHACHA_KEY_SIZE];
916 dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
917 lfsr += (tmp << 3) | (tmp >> 5);
918 }
919 spin_unlock_irqrestore(&primary_crng.lock, flags);
920 return 1;
921}
922
923static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
924{
925 unsigned long flags;
926 int i, num;
927 union {
928 __u8 block[CHACHA_BLOCK_SIZE];
929 __u32 key[8];
930 } buf;
931
932 if (r) {
933 num = extract_entropy(r, &buf, 32, 16, 0);
934 if (num == 0)
935 return;
936 } else {
937 _extract_crng(&primary_crng, buf.block);
938 _crng_backtrack_protect(&primary_crng, buf.block,
939 CHACHA_KEY_SIZE);
940 }
941 spin_lock_irqsave(&crng->lock, flags);
942 for (i = 0; i < 8; i++) {
943 unsigned long rv;
944 if (!arch_get_random_seed_long(&rv) &&
945 !arch_get_random_long(&rv))
946 rv = random_get_entropy();
947 crng->state[i+4] ^= buf.key[i] ^ rv;
948 }
949 memzero_explicit(&buf, sizeof(buf));
950 crng->init_time = jiffies;
951 spin_unlock_irqrestore(&crng->lock, flags);
952 if (crng == &primary_crng && crng_init < 2) {
953 invalidate_batched_entropy();
954 numa_crng_init();
955 crng_init = 2;
956 process_random_ready_list();
957 wake_up_interruptible(&crng_init_wait);
958 pr_notice("random: crng init done\n");
959 if (unseeded_warning.missed) {
960 pr_notice("random: %d get_random_xx warning(s) missed "
961 "due to ratelimiting\n",
962 unseeded_warning.missed);
963 unseeded_warning.missed = 0;
964 }
965 if (urandom_warning.missed) {
966 pr_notice("random: %d urandom warning(s) missed "
967 "due to ratelimiting\n",
968 urandom_warning.missed);
969 urandom_warning.missed = 0;
970 }
971 }
972}
973
974static void _extract_crng(struct crng_state *crng,
975 __u8 out[CHACHA_BLOCK_SIZE])
976{
977 unsigned long v, flags;
978
979 if (crng_ready() &&
980 (time_after(crng_global_init_time, crng->init_time) ||
981 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
982 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
983 spin_lock_irqsave(&crng->lock, flags);
984 if (arch_get_random_long(&v))
985 crng->state[14] ^= v;
986 chacha20_block(&crng->state[0], out);
987 if (crng->state[12] == 0)
988 crng->state[13]++;
989 spin_unlock_irqrestore(&crng->lock, flags);
990}
991
992static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
993{
994 struct crng_state *crng = NULL;
995
996#ifdef CONFIG_NUMA
997 if (crng_node_pool)
998 crng = crng_node_pool[numa_node_id()];
999 if (crng == NULL)
1000#endif
1001 crng = &primary_crng;
1002 _extract_crng(crng, out);
1003}
1004
1005
1006
1007
1008
1009static void _crng_backtrack_protect(struct crng_state *crng,
1010 __u8 tmp[CHACHA_BLOCK_SIZE], int used)
1011{
1012 unsigned long flags;
1013 __u32 *s, *d;
1014 int i;
1015
1016 used = round_up(used, sizeof(__u32));
1017 if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
1018 extract_crng(tmp);
1019 used = 0;
1020 }
1021 spin_lock_irqsave(&crng->lock, flags);
1022 s = (__u32 *) &tmp[used];
1023 d = &crng->state[4];
1024 for (i=0; i < 8; i++)
1025 *d++ ^= *s++;
1026 spin_unlock_irqrestore(&crng->lock, flags);
1027}
1028
1029static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
1030{
1031 struct crng_state *crng = NULL;
1032
1033#ifdef CONFIG_NUMA
1034 if (crng_node_pool)
1035 crng = crng_node_pool[numa_node_id()];
1036 if (crng == NULL)
1037#endif
1038 crng = &primary_crng;
1039 _crng_backtrack_protect(crng, tmp, used);
1040}
1041
1042static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1043{
1044 ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
1045 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1046 int large_request = (nbytes > 256);
1047
1048 while (nbytes) {
1049 if (large_request && need_resched()) {
1050 if (signal_pending(current)) {
1051 if (ret == 0)
1052 ret = -ERESTARTSYS;
1053 break;
1054 }
1055 schedule();
1056 }
1057
1058 extract_crng(tmp);
1059 i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
1060 if (copy_to_user(buf, tmp, i)) {
1061 ret = -EFAULT;
1062 break;
1063 }
1064
1065 nbytes -= i;
1066 buf += i;
1067 ret += i;
1068 }
1069 crng_backtrack_protect(tmp, i);
1070
1071
1072 memzero_explicit(tmp, sizeof(tmp));
1073
1074 return ret;
1075}
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085struct timer_rand_state {
1086 cycles_t last_time;
1087 long last_delta, last_delta2;
1088};
1089
1090#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100void add_device_randomness(const void *buf, unsigned int size)
1101{
1102 unsigned long time = random_get_entropy() ^ jiffies;
1103 unsigned long flags;
1104
1105 if (!crng_ready() && size)
1106 crng_slow_load(buf, size);
1107
1108 trace_add_device_randomness(size, _RET_IP_);
1109 spin_lock_irqsave(&input_pool.lock, flags);
1110 _mix_pool_bytes(&input_pool, buf, size);
1111 _mix_pool_bytes(&input_pool, &time, sizeof(time));
1112 spin_unlock_irqrestore(&input_pool.lock, flags);
1113}
1114EXPORT_SYMBOL(add_device_randomness);
1115
1116static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1129{
1130 struct entropy_store *r;
1131 struct {
1132 long jiffies;
1133 unsigned cycles;
1134 unsigned num;
1135 } sample;
1136 long delta, delta2, delta3;
1137
1138 sample.jiffies = jiffies;
1139 sample.cycles = random_get_entropy();
1140 sample.num = num;
1141 r = &input_pool;
1142 mix_pool_bytes(r, &sample, sizeof(sample));
1143
1144
1145
1146
1147
1148
1149 delta = sample.jiffies - state->last_time;
1150 state->last_time = sample.jiffies;
1151
1152 delta2 = delta - state->last_delta;
1153 state->last_delta = delta;
1154
1155 delta3 = delta2 - state->last_delta2;
1156 state->last_delta2 = delta2;
1157
1158 if (delta < 0)
1159 delta = -delta;
1160 if (delta2 < 0)
1161 delta2 = -delta2;
1162 if (delta3 < 0)
1163 delta3 = -delta3;
1164 if (delta > delta2)
1165 delta = delta2;
1166 if (delta > delta3)
1167 delta = delta3;
1168
1169
1170
1171
1172
1173
1174 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
1175}
1176
1177void add_input_randomness(unsigned int type, unsigned int code,
1178 unsigned int value)
1179{
1180 static unsigned char last_value;
1181
1182
1183 if (value == last_value)
1184 return;
1185
1186 last_value = value;
1187 add_timer_randomness(&input_timer_state,
1188 (type << 4) ^ code ^ (code >> 4) ^ value);
1189 trace_add_input_randomness(ENTROPY_BITS(&input_pool));
1190}
1191EXPORT_SYMBOL_GPL(add_input_randomness);
1192
1193static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1194
1195#ifdef ADD_INTERRUPT_BENCH
1196static unsigned long avg_cycles, avg_deviation;
1197
1198#define AVG_SHIFT 8
1199#define FIXED_1_2 (1 << (AVG_SHIFT-1))
1200
1201static void add_interrupt_bench(cycles_t start)
1202{
1203 long delta = random_get_entropy() - start;
1204
1205
1206 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
1207 avg_cycles += delta;
1208
1209 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
1210 avg_deviation += delta;
1211}
1212#else
1213#define add_interrupt_bench(x)
1214#endif
1215
1216static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1217{
1218 __u32 *ptr = (__u32 *) regs;
1219 unsigned int idx;
1220
1221 if (regs == NULL)
1222 return 0;
1223 idx = READ_ONCE(f->reg_idx);
1224 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1225 idx = 0;
1226 ptr += idx++;
1227 WRITE_ONCE(f->reg_idx, idx);
1228 return *ptr;
1229}
1230
1231void add_interrupt_randomness(int irq, int irq_flags)
1232{
1233 struct entropy_store *r;
1234 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1235 struct pt_regs *regs = get_irq_regs();
1236 unsigned long now = jiffies;
1237 cycles_t cycles = random_get_entropy();
1238 __u32 c_high, j_high;
1239 __u64 ip;
1240 unsigned long seed;
1241 int credit = 0;
1242
1243 if (cycles == 0)
1244 cycles = get_reg(fast_pool, regs);
1245 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
1246 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
1247 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
1248 fast_pool->pool[1] ^= now ^ c_high;
1249 ip = regs ? instruction_pointer(regs) : _RET_IP_;
1250 fast_pool->pool[2] ^= ip;
1251 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
1252 get_reg(fast_pool, regs);
1253
1254 fast_mix(fast_pool);
1255 add_interrupt_bench(cycles);
1256
1257 if (unlikely(crng_init == 0)) {
1258 if ((fast_pool->count >= 64) &&
1259 crng_fast_load((char *) fast_pool->pool,
1260 sizeof(fast_pool->pool))) {
1261 fast_pool->count = 0;
1262 fast_pool->last = now;
1263 }
1264 return;
1265 }
1266
1267 if ((fast_pool->count < 64) &&
1268 !time_after(now, fast_pool->last + HZ))
1269 return;
1270
1271 r = &input_pool;
1272 if (!spin_trylock(&r->lock))
1273 return;
1274
1275 fast_pool->last = now;
1276 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
1277
1278
1279
1280
1281
1282
1283
1284 if (arch_get_random_seed_long(&seed)) {
1285 __mix_pool_bytes(r, &seed, sizeof(seed));
1286 credit = 1;
1287 }
1288 spin_unlock(&r->lock);
1289
1290 fast_pool->count = 0;
1291
1292
1293 credit_entropy_bits(r, credit + 1);
1294}
1295EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1296
1297#ifdef CONFIG_BLOCK
1298void add_disk_randomness(struct gendisk *disk)
1299{
1300 if (!disk || !disk->random)
1301 return;
1302
1303 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1304 trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
1305}
1306EXPORT_SYMBOL_GPL(add_disk_randomness);
1307#endif
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
1321static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1322{
1323 if (!r->pull ||
1324 r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
1325 r->entropy_count > r->poolinfo->poolfracbits)
1326 return;
1327
1328 _xfer_secondary_pool(r, nbytes);
1329}
1330
1331static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1332{
1333 __u32 tmp[OUTPUT_POOL_WORDS];
1334
1335 int bytes = nbytes;
1336
1337
1338 bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
1339
1340 bytes = min_t(int, bytes, sizeof(tmp));
1341
1342 trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
1343 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
1344 bytes = extract_entropy(r->pull, tmp, bytes,
1345 random_read_wakeup_bits / 8, 0);
1346 mix_pool_bytes(r, tmp, bytes);
1347 credit_entropy_bits(r, bytes*8);
1348}
1349
1350
1351
1352
1353
1354
1355
1356static void push_to_pool(struct work_struct *work)
1357{
1358 struct entropy_store *r = container_of(work, struct entropy_store,
1359 push_work);
1360 BUG_ON(!r);
1361 _xfer_secondary_pool(r, random_read_wakeup_bits/8);
1362 trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
1363 r->pull->entropy_count >> ENTROPY_SHIFT);
1364}
1365
1366
1367
1368
1369
1370static size_t account(struct entropy_store *r, size_t nbytes, int min,
1371 int reserved)
1372{
1373 int entropy_count, orig, have_bytes;
1374 size_t ibytes, nfrac;
1375
1376 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1377
1378
1379retry:
1380 entropy_count = orig = READ_ONCE(r->entropy_count);
1381 ibytes = nbytes;
1382
1383 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1384
1385 if ((have_bytes -= reserved) < 0)
1386 have_bytes = 0;
1387 ibytes = min_t(size_t, ibytes, have_bytes);
1388 if (ibytes < min)
1389 ibytes = 0;
1390
1391 if (unlikely(entropy_count < 0)) {
1392 pr_warn("random: negative entropy count: pool %s count %d\n",
1393 r->name, entropy_count);
1394 WARN_ON(1);
1395 entropy_count = 0;
1396 }
1397 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1398 if ((size_t) entropy_count > nfrac)
1399 entropy_count -= nfrac;
1400 else
1401 entropy_count = 0;
1402
1403 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1404 goto retry;
1405
1406 trace_debit_entropy(r->name, 8 * ibytes);
1407 if (ibytes &&
1408 (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
1409 wake_up_interruptible(&random_write_wait);
1410 kill_fasync(&fasync, SIGIO, POLL_OUT);
1411 }
1412
1413 return ibytes;
1414}
1415
1416
1417
1418
1419
1420
1421
1422static void extract_buf(struct entropy_store *r, __u8 *out)
1423{
1424 int i;
1425 union {
1426 __u32 w[5];
1427 unsigned long l[LONGS(20)];
1428 } hash;
1429 __u32 workspace[SHA_WORKSPACE_WORDS];
1430 unsigned long flags;
1431
1432
1433
1434
1435
1436 sha_init(hash.w);
1437 for (i = 0; i < LONGS(20); i++) {
1438 unsigned long v;
1439 if (!arch_get_random_long(&v))
1440 break;
1441 hash.l[i] = v;
1442 }
1443
1444
1445 spin_lock_irqsave(&r->lock, flags);
1446 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1447 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1459 spin_unlock_irqrestore(&r->lock, flags);
1460
1461 memzero_explicit(workspace, sizeof(workspace));
1462
1463
1464
1465
1466
1467
1468 hash.w[0] ^= hash.w[3];
1469 hash.w[1] ^= hash.w[4];
1470 hash.w[2] ^= rol32(hash.w[2], 16);
1471
1472 memcpy(out, &hash, EXTRACT_SIZE);
1473 memzero_explicit(&hash, sizeof(hash));
1474}
1475
1476static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1477 size_t nbytes, int fips)
1478{
1479 ssize_t ret = 0, i;
1480 __u8 tmp[EXTRACT_SIZE];
1481 unsigned long flags;
1482
1483 while (nbytes) {
1484 extract_buf(r, tmp);
1485
1486 if (fips) {
1487 spin_lock_irqsave(&r->lock, flags);
1488 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1489 panic("Hardware RNG duplicated output!\n");
1490 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1491 spin_unlock_irqrestore(&r->lock, flags);
1492 }
1493 i = min_t(int, nbytes, EXTRACT_SIZE);
1494 memcpy(buf, tmp, i);
1495 nbytes -= i;
1496 buf += i;
1497 ret += i;
1498 }
1499
1500
1501 memzero_explicit(tmp, sizeof(tmp));
1502
1503 return ret;
1504}
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1516 size_t nbytes, int min, int reserved)
1517{
1518 __u8 tmp[EXTRACT_SIZE];
1519 unsigned long flags;
1520
1521
1522 if (fips_enabled) {
1523 spin_lock_irqsave(&r->lock, flags);
1524 if (!r->last_data_init) {
1525 r->last_data_init = 1;
1526 spin_unlock_irqrestore(&r->lock, flags);
1527 trace_extract_entropy(r->name, EXTRACT_SIZE,
1528 ENTROPY_BITS(r), _RET_IP_);
1529 xfer_secondary_pool(r, EXTRACT_SIZE);
1530 extract_buf(r, tmp);
1531 spin_lock_irqsave(&r->lock, flags);
1532 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1533 }
1534 spin_unlock_irqrestore(&r->lock, flags);
1535 }
1536
1537 trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1538 xfer_secondary_pool(r, nbytes);
1539 nbytes = account(r, nbytes, min, reserved);
1540
1541 return _extract_entropy(r, buf, nbytes, fips_enabled);
1542}
1543
1544
1545
1546
1547
1548static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1549 size_t nbytes)
1550{
1551 ssize_t ret = 0, i;
1552 __u8 tmp[EXTRACT_SIZE];
1553 int large_request = (nbytes > 256);
1554
1555 trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1556 xfer_secondary_pool(r, nbytes);
1557 nbytes = account(r, nbytes, 0, 0);
1558
1559 while (nbytes) {
1560 if (large_request && need_resched()) {
1561 if (signal_pending(current)) {
1562 if (ret == 0)
1563 ret = -ERESTARTSYS;
1564 break;
1565 }
1566 schedule();
1567 }
1568
1569 extract_buf(r, tmp);
1570 i = min_t(int, nbytes, EXTRACT_SIZE);
1571 if (copy_to_user(buf, tmp, i)) {
1572 ret = -EFAULT;
1573 break;
1574 }
1575
1576 nbytes -= i;
1577 buf += i;
1578 ret += i;
1579 }
1580
1581
1582 memzero_explicit(tmp, sizeof(tmp));
1583
1584 return ret;
1585}
1586
1587#define warn_unseeded_randomness(previous) \
1588 _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1589
1590static void _warn_unseeded_randomness(const char *func_name, void *caller,
1591 void **previous)
1592{
1593#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1594 const bool print_once = false;
1595#else
1596 static bool print_once __read_mostly;
1597#endif
1598
1599 if (print_once ||
1600 crng_ready() ||
1601 (previous && (caller == READ_ONCE(*previous))))
1602 return;
1603 WRITE_ONCE(*previous, caller);
1604#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1605 print_once = true;
1606#endif
1607 if (__ratelimit(&unseeded_warning))
1608 pr_notice("random: %s called from %pS with crng_init=%d\n",
1609 func_name, caller, crng_init);
1610}
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static void _get_random_bytes(void *buf, int nbytes)
1623{
1624 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1625
1626 trace_get_random_bytes(nbytes, _RET_IP_);
1627
1628 while (nbytes >= CHACHA_BLOCK_SIZE) {
1629 extract_crng(buf);
1630 buf += CHACHA_BLOCK_SIZE;
1631 nbytes -= CHACHA_BLOCK_SIZE;
1632 }
1633
1634 if (nbytes > 0) {
1635 extract_crng(tmp);
1636 memcpy(buf, tmp, nbytes);
1637 crng_backtrack_protect(tmp, nbytes);
1638 } else
1639 crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
1640 memzero_explicit(tmp, sizeof(tmp));
1641}
1642
1643void get_random_bytes(void *buf, int nbytes)
1644{
1645 static void *previous;
1646
1647 warn_unseeded_randomness(&previous);
1648 _get_random_bytes(buf, nbytes);
1649}
1650EXPORT_SYMBOL(get_random_bytes);
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662int wait_for_random_bytes(void)
1663{
1664 if (likely(crng_ready()))
1665 return 0;
1666 return wait_event_interruptible(crng_init_wait, crng_ready());
1667}
1668EXPORT_SYMBOL(wait_for_random_bytes);
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679bool rng_is_initialized(void)
1680{
1681 return crng_ready();
1682}
1683EXPORT_SYMBOL(rng_is_initialized);
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693int add_random_ready_callback(struct random_ready_callback *rdy)
1694{
1695 struct module *owner;
1696 unsigned long flags;
1697 int err = -EALREADY;
1698
1699 if (crng_ready())
1700 return err;
1701
1702 owner = rdy->owner;
1703 if (!try_module_get(owner))
1704 return -ENOENT;
1705
1706 spin_lock_irqsave(&random_ready_list_lock, flags);
1707 if (crng_ready())
1708 goto out;
1709
1710 owner = NULL;
1711
1712 list_add(&rdy->list, &random_ready_list);
1713 err = 0;
1714
1715out:
1716 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1717
1718 module_put(owner);
1719
1720 return err;
1721}
1722EXPORT_SYMBOL(add_random_ready_callback);
1723
1724
1725
1726
1727void del_random_ready_callback(struct random_ready_callback *rdy)
1728{
1729 unsigned long flags;
1730 struct module *owner = NULL;
1731
1732 spin_lock_irqsave(&random_ready_list_lock, flags);
1733 if (!list_empty(&rdy->list)) {
1734 list_del_init(&rdy->list);
1735 owner = rdy->owner;
1736 }
1737 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1738
1739 module_put(owner);
1740}
1741EXPORT_SYMBOL(del_random_ready_callback);
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755int __must_check get_random_bytes_arch(void *buf, int nbytes)
1756{
1757 int left = nbytes;
1758 char *p = buf;
1759
1760 trace_get_random_bytes_arch(left, _RET_IP_);
1761 while (left) {
1762 unsigned long v;
1763 int chunk = min_t(int, left, sizeof(unsigned long));
1764
1765 if (!arch_get_random_long(&v))
1766 break;
1767
1768 memcpy(p, &v, chunk);
1769 p += chunk;
1770 left -= chunk;
1771 }
1772
1773 return nbytes - left;
1774}
1775EXPORT_SYMBOL(get_random_bytes_arch);
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786static void init_std_data(struct entropy_store *r)
1787{
1788 int i;
1789 ktime_t now = ktime_get_real();
1790 unsigned long rv;
1791
1792 r->last_pulled = jiffies;
1793 mix_pool_bytes(r, &now, sizeof(now));
1794 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1795 if (!arch_get_random_seed_long(&rv) &&
1796 !arch_get_random_long(&rv))
1797 rv = random_get_entropy();
1798 mix_pool_bytes(r, &rv, sizeof(rv));
1799 }
1800 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1801}
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813static int rand_initialize(void)
1814{
1815 init_std_data(&input_pool);
1816 init_std_data(&blocking_pool);
1817 crng_initialize(&primary_crng);
1818 crng_global_init_time = jiffies;
1819 if (ratelimit_disable) {
1820 urandom_warning.interval = 0;
1821 unseeded_warning.interval = 0;
1822 }
1823 return 0;
1824}
1825early_initcall(rand_initialize);
1826
1827#ifdef CONFIG_BLOCK
1828void rand_initialize_disk(struct gendisk *disk)
1829{
1830 struct timer_rand_state *state;
1831
1832
1833
1834
1835
1836 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1837 if (state) {
1838 state->last_time = INITIAL_JIFFIES;
1839 disk->random = state;
1840 }
1841}
1842#endif
1843
1844static ssize_t
1845_random_read(int nonblock, char __user *buf, size_t nbytes)
1846{
1847 ssize_t n;
1848
1849 if (nbytes == 0)
1850 return 0;
1851
1852 nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
1853 while (1) {
1854 n = extract_entropy_user(&blocking_pool, buf, nbytes);
1855 if (n < 0)
1856 return n;
1857 trace_random_read(n*8, (nbytes-n)*8,
1858 ENTROPY_BITS(&blocking_pool),
1859 ENTROPY_BITS(&input_pool));
1860 if (n > 0)
1861 return n;
1862
1863
1864 if (nonblock)
1865 return -EAGAIN;
1866
1867 wait_event_interruptible(random_read_wait,
1868 ENTROPY_BITS(&input_pool) >=
1869 random_read_wakeup_bits);
1870 if (signal_pending(current))
1871 return -ERESTARTSYS;
1872 }
1873}
1874
1875static ssize_t
1876random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1877{
1878 return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
1879}
1880
1881static ssize_t
1882urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1883{
1884 unsigned long flags;
1885 static int maxwarn = 10;
1886 int ret;
1887
1888 if (!crng_ready() && maxwarn > 0) {
1889 maxwarn--;
1890 if (__ratelimit(&urandom_warning))
1891 printk(KERN_NOTICE "random: %s: uninitialized "
1892 "urandom read (%zd bytes read)\n",
1893 current->comm, nbytes);
1894 spin_lock_irqsave(&primary_crng.lock, flags);
1895 crng_init_cnt = 0;
1896 spin_unlock_irqrestore(&primary_crng.lock, flags);
1897 }
1898 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1899 ret = extract_crng_user(buf, nbytes);
1900 trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
1901 return ret;
1902}
1903
1904static __poll_t
1905random_poll(struct file *file, poll_table * wait)
1906{
1907 __poll_t mask;
1908
1909 poll_wait(file, &random_read_wait, wait);
1910 poll_wait(file, &random_write_wait, wait);
1911 mask = 0;
1912 if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
1913 mask |= EPOLLIN | EPOLLRDNORM;
1914 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
1915 mask |= EPOLLOUT | EPOLLWRNORM;
1916 return mask;
1917}
1918
1919static int
1920write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1921{
1922 size_t bytes;
1923 __u32 t, buf[16];
1924 const char __user *p = buffer;
1925
1926 while (count > 0) {
1927 int b, i = 0;
1928
1929 bytes = min(count, sizeof(buf));
1930 if (copy_from_user(&buf, p, bytes))
1931 return -EFAULT;
1932
1933 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1934 if (!arch_get_random_int(&t))
1935 break;
1936 buf[i] ^= t;
1937 }
1938
1939 count -= bytes;
1940 p += bytes;
1941
1942 mix_pool_bytes(r, buf, bytes);
1943 cond_resched();
1944 }
1945
1946 return 0;
1947}
1948
1949static ssize_t random_write(struct file *file, const char __user *buffer,
1950 size_t count, loff_t *ppos)
1951{
1952 size_t ret;
1953
1954 ret = write_pool(&input_pool, buffer, count);
1955 if (ret)
1956 return ret;
1957
1958 return (ssize_t)count;
1959}
1960
1961static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1962{
1963 int size, ent_count;
1964 int __user *p = (int __user *)arg;
1965 int retval;
1966
1967 switch (cmd) {
1968 case RNDGETENTCNT:
1969
1970 ent_count = ENTROPY_BITS(&input_pool);
1971 if (put_user(ent_count, p))
1972 return -EFAULT;
1973 return 0;
1974 case RNDADDTOENTCNT:
1975 if (!capable(CAP_SYS_ADMIN))
1976 return -EPERM;
1977 if (get_user(ent_count, p))
1978 return -EFAULT;
1979 return credit_entropy_bits_safe(&input_pool, ent_count);
1980 case RNDADDENTROPY:
1981 if (!capable(CAP_SYS_ADMIN))
1982 return -EPERM;
1983 if (get_user(ent_count, p++))
1984 return -EFAULT;
1985 if (ent_count < 0)
1986 return -EINVAL;
1987 if (get_user(size, p++))
1988 return -EFAULT;
1989 retval = write_pool(&input_pool, (const char __user *)p,
1990 size);
1991 if (retval < 0)
1992 return retval;
1993 return credit_entropy_bits_safe(&input_pool, ent_count);
1994 case RNDZAPENTCNT:
1995 case RNDCLEARPOOL:
1996
1997
1998
1999
2000 if (!capable(CAP_SYS_ADMIN))
2001 return -EPERM;
2002 input_pool.entropy_count = 0;
2003 blocking_pool.entropy_count = 0;
2004 return 0;
2005 case RNDRESEEDCRNG:
2006 if (!capable(CAP_SYS_ADMIN))
2007 return -EPERM;
2008 if (crng_init < 2)
2009 return -ENODATA;
2010 crng_reseed(&primary_crng, NULL);
2011 crng_global_init_time = jiffies - 1;
2012 return 0;
2013 default:
2014 return -EINVAL;
2015 }
2016}
2017
2018static int random_fasync(int fd, struct file *filp, int on)
2019{
2020 return fasync_helper(fd, filp, on, &fasync);
2021}
2022
2023const struct file_operations random_fops = {
2024 .read = random_read,
2025 .write = random_write,
2026 .poll = random_poll,
2027 .unlocked_ioctl = random_ioctl,
2028 .fasync = random_fasync,
2029 .llseek = noop_llseek,
2030};
2031
2032const struct file_operations urandom_fops = {
2033 .read = urandom_read,
2034 .write = random_write,
2035 .unlocked_ioctl = random_ioctl,
2036 .fasync = random_fasync,
2037 .llseek = noop_llseek,
2038};
2039
2040SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
2041 unsigned int, flags)
2042{
2043 int ret;
2044
2045 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
2046 return -EINVAL;
2047
2048 if (count > INT_MAX)
2049 count = INT_MAX;
2050
2051 if (flags & GRND_RANDOM)
2052 return _random_read(flags & GRND_NONBLOCK, buf, count);
2053
2054 if (!crng_ready()) {
2055 if (flags & GRND_NONBLOCK)
2056 return -EAGAIN;
2057 ret = wait_for_random_bytes();
2058 if (unlikely(ret))
2059 return ret;
2060 }
2061 return urandom_read(NULL, buf, count, NULL);
2062}
2063
2064
2065
2066
2067
2068
2069
2070#ifdef CONFIG_SYSCTL
2071
2072#include <linux/sysctl.h>
2073
2074static int min_read_thresh = 8, min_write_thresh;
2075static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
2076static int max_write_thresh = INPUT_POOL_WORDS * 32;
2077static int random_min_urandom_seed = 60;
2078static char sysctl_bootid[16];
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089static int proc_do_uuid(struct ctl_table *table, int write,
2090 void __user *buffer, size_t *lenp, loff_t *ppos)
2091{
2092 struct ctl_table fake_table;
2093 unsigned char buf[64], tmp_uuid[16], *uuid;
2094
2095 uuid = table->data;
2096 if (!uuid) {
2097 uuid = tmp_uuid;
2098 generate_random_uuid(uuid);
2099 } else {
2100 static DEFINE_SPINLOCK(bootid_spinlock);
2101
2102 spin_lock(&bootid_spinlock);
2103 if (!uuid[8])
2104 generate_random_uuid(uuid);
2105 spin_unlock(&bootid_spinlock);
2106 }
2107
2108 sprintf(buf, "%pU", uuid);
2109
2110 fake_table.data = buf;
2111 fake_table.maxlen = sizeof(buf);
2112
2113 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
2114}
2115
2116
2117
2118
2119static int proc_do_entropy(struct ctl_table *table, int write,
2120 void __user *buffer, size_t *lenp, loff_t *ppos)
2121{
2122 struct ctl_table fake_table;
2123 int entropy_count;
2124
2125 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
2126
2127 fake_table.data = &entropy_count;
2128 fake_table.maxlen = sizeof(entropy_count);
2129
2130 return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
2131}
2132
2133static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
2134extern struct ctl_table random_table[];
2135struct ctl_table random_table[] = {
2136 {
2137 .procname = "poolsize",
2138 .data = &sysctl_poolsize,
2139 .maxlen = sizeof(int),
2140 .mode = 0444,
2141 .proc_handler = proc_dointvec,
2142 },
2143 {
2144 .procname = "entropy_avail",
2145 .maxlen = sizeof(int),
2146 .mode = 0444,
2147 .proc_handler = proc_do_entropy,
2148 .data = &input_pool.entropy_count,
2149 },
2150 {
2151 .procname = "read_wakeup_threshold",
2152 .data = &random_read_wakeup_bits,
2153 .maxlen = sizeof(int),
2154 .mode = 0644,
2155 .proc_handler = proc_dointvec_minmax,
2156 .extra1 = &min_read_thresh,
2157 .extra2 = &max_read_thresh,
2158 },
2159 {
2160 .procname = "write_wakeup_threshold",
2161 .data = &random_write_wakeup_bits,
2162 .maxlen = sizeof(int),
2163 .mode = 0644,
2164 .proc_handler = proc_dointvec_minmax,
2165 .extra1 = &min_write_thresh,
2166 .extra2 = &max_write_thresh,
2167 },
2168 {
2169 .procname = "urandom_min_reseed_secs",
2170 .data = &random_min_urandom_seed,
2171 .maxlen = sizeof(int),
2172 .mode = 0644,
2173 .proc_handler = proc_dointvec,
2174 },
2175 {
2176 .procname = "boot_id",
2177 .data = &sysctl_bootid,
2178 .maxlen = 16,
2179 .mode = 0444,
2180 .proc_handler = proc_do_uuid,
2181 },
2182 {
2183 .procname = "uuid",
2184 .maxlen = 16,
2185 .mode = 0444,
2186 .proc_handler = proc_do_uuid,
2187 },
2188#ifdef ADD_INTERRUPT_BENCH
2189 {
2190 .procname = "add_interrupt_avg_cycles",
2191 .data = &avg_cycles,
2192 .maxlen = sizeof(avg_cycles),
2193 .mode = 0444,
2194 .proc_handler = proc_doulongvec_minmax,
2195 },
2196 {
2197 .procname = "add_interrupt_avg_deviation",
2198 .data = &avg_deviation,
2199 .maxlen = sizeof(avg_deviation),
2200 .mode = 0444,
2201 .proc_handler = proc_doulongvec_minmax,
2202 },
2203#endif
2204 { }
2205};
2206#endif
2207
2208struct batched_entropy {
2209 union {
2210 u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
2211 u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2212 };
2213 unsigned int position;
2214};
2215static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2226u64 get_random_u64(void)
2227{
2228 u64 ret;
2229 bool use_lock;
2230 unsigned long flags = 0;
2231 struct batched_entropy *batch;
2232 static void *previous;
2233
2234#if BITS_PER_LONG == 64
2235 if (arch_get_random_long((unsigned long *)&ret))
2236 return ret;
2237#else
2238 if (arch_get_random_long((unsigned long *)&ret) &&
2239 arch_get_random_long((unsigned long *)&ret + 1))
2240 return ret;
2241#endif
2242
2243 warn_unseeded_randomness(&previous);
2244
2245 use_lock = READ_ONCE(crng_init) < 2;
2246 batch = &get_cpu_var(batched_entropy_u64);
2247 if (use_lock)
2248 read_lock_irqsave(&batched_entropy_reset_lock, flags);
2249 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2250 extract_crng((u8 *)batch->entropy_u64);
2251 batch->position = 0;
2252 }
2253 ret = batch->entropy_u64[batch->position++];
2254 if (use_lock)
2255 read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2256 put_cpu_var(batched_entropy_u64);
2257 return ret;
2258}
2259EXPORT_SYMBOL(get_random_u64);
2260
2261static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2262u32 get_random_u32(void)
2263{
2264 u32 ret;
2265 bool use_lock;
2266 unsigned long flags = 0;
2267 struct batched_entropy *batch;
2268 static void *previous;
2269
2270 if (arch_get_random_int(&ret))
2271 return ret;
2272
2273 warn_unseeded_randomness(&previous);
2274
2275 use_lock = READ_ONCE(crng_init) < 2;
2276 batch = &get_cpu_var(batched_entropy_u32);
2277 if (use_lock)
2278 read_lock_irqsave(&batched_entropy_reset_lock, flags);
2279 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2280 extract_crng((u8 *)batch->entropy_u32);
2281 batch->position = 0;
2282 }
2283 ret = batch->entropy_u32[batch->position++];
2284 if (use_lock)
2285 read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2286 put_cpu_var(batched_entropy_u32);
2287 return ret;
2288}
2289EXPORT_SYMBOL(get_random_u32);
2290
2291
2292
2293
2294
2295static void invalidate_batched_entropy(void)
2296{
2297 int cpu;
2298 unsigned long flags;
2299
2300 write_lock_irqsave(&batched_entropy_reset_lock, flags);
2301 for_each_possible_cpu (cpu) {
2302 per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2303 per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2304 }
2305 write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2306}
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322unsigned long
2323randomize_page(unsigned long start, unsigned long range)
2324{
2325 if (!PAGE_ALIGNED(start)) {
2326 range -= PAGE_ALIGN(start) - start;
2327 start = PAGE_ALIGN(start);
2328 }
2329
2330 if (start > ULONG_MAX - range)
2331 range = ULONG_MAX - start;
2332
2333 range >>= PAGE_SHIFT;
2334
2335 if (range == 0)
2336 return start;
2337
2338 return start + (get_random_long() % range << PAGE_SHIFT);
2339}
2340
2341
2342
2343
2344
2345void add_hwgenerator_randomness(const char *buffer, size_t count,
2346 size_t entropy)
2347{
2348 struct entropy_store *poolp = &input_pool;
2349
2350 if (unlikely(crng_init == 0)) {
2351 crng_fast_load(buffer, count);
2352 return;
2353 }
2354
2355
2356
2357
2358
2359 wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2360 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2361 mix_pool_bytes(poolp, buffer, count);
2362 credit_entropy_bits(poolp, entropy);
2363}
2364EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2365