1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241#include <linux/utsname.h>
242#include <linux/module.h>
243#include <linux/kernel.h>
244#include <linux/major.h>
245#include <linux/string.h>
246#include <linux/fcntl.h>
247#include <linux/slab.h>
248#include <linux/random.h>
249#include <linux/poll.h>
250#include <linux/init.h>
251#include <linux/fs.h>
252#include <linux/genhd.h>
253#include <linux/interrupt.h>
254#include <linux/mm.h>
255#include <linux/nodemask.h>
256#include <linux/spinlock.h>
257#include <linux/kthread.h>
258#include <linux/percpu.h>
259#include <linux/cryptohash.h>
260#include <linux/fips.h>
261#include <linux/ptrace.h>
262#include <linux/workqueue.h>
263#include <linux/irq.h>
264#include <linux/ratelimit.h>
265#include <linux/syscalls.h>
266#include <linux/completion.h>
267#include <linux/uuid.h>
268#include <linux/rcupdate.h>
269#include <crypto/chacha20.h>
270
271#include <asm/processor.h>
272#include <linux/uaccess.h>
273#include <asm/irq.h>
274#include <asm/irq_regs.h>
275#include <asm/io.h>
276
277#define CREATE_TRACE_POINTS
278#include <trace/events/random.h>
279
280
281
282
283
284
285#define INPUT_POOL_SHIFT 12
286#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
287#define OUTPUT_POOL_SHIFT 10
288#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
289#define SEC_XFER_SIZE 512
290#define EXTRACT_SIZE 10
291
292
293#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
294
295
296
297
298
299
300
301
302#define ENTROPY_SHIFT 3
303#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
304
305
306
307
308static const struct random_extrng __rcu *extrng;
309
310
311
312
313
314static int random_read_wakeup_bits = 64;
315
316
317
318
319
320
321static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368static struct poolinfo {
369 int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
370#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
371 int tap1, tap2, tap3, tap4, tap5;
372} poolinfo_table[] = {
373
374
375 { S(128), 104, 76, 51, 25, 1 },
376
377
378 { S(32), 26, 19, 14, 7, 1 },
379#if 0
380
381 { S(2048), 1638, 1231, 819, 411, 1 },
382
383
384 { S(1024), 817, 615, 412, 204, 1 },
385
386
387 { S(1024), 819, 616, 410, 207, 2 },
388
389
390 { S(512), 411, 308, 208, 104, 1 },
391
392
393 { S(512), 409, 307, 206, 102, 2 },
394
395 { S(512), 409, 309, 205, 103, 2 },
396
397
398 { S(256), 205, 155, 101, 52, 1 },
399
400
401 { S(128), 103, 78, 51, 27, 2 },
402
403
404 { S(64), 52, 39, 26, 14, 1 },
405#endif
406};
407
408
409
410
411static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
412static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
413static struct fasync_struct *fasync;
414
415static DEFINE_SPINLOCK(random_ready_list_lock);
416static LIST_HEAD(random_ready_list);
417
418struct crng_state {
419 __u32 state[16];
420 unsigned long init_time;
421 spinlock_t lock;
422};
423
424struct crng_state primary_crng = {
425 .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
426};
427
428
429
430
431
432
433
434
435
436static int crng_init = 0;
437#define crng_ready() (likely(crng_init > 1))
438static int crng_init_cnt = 0;
439static unsigned long crng_global_init_time = 0;
440#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
441static void _extract_crng(struct crng_state *crng,
442 __u8 out[CHACHA20_BLOCK_SIZE]);
443static void _crng_backtrack_protect(struct crng_state *crng,
444 __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
445static void process_random_ready_list(void);
446static void _get_random_bytes(void *buf, int nbytes);
447
448static struct ratelimit_state unseeded_warning =
449 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
450static struct ratelimit_state urandom_warning =
451 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
452
453static int ratelimit_disable __read_mostly;
454
455module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
456MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
457
458static const struct file_operations extrng_random_fops;
459static const struct file_operations extrng_urandom_fops;
460
461
462
463
464
465
466
467
468struct entropy_store;
469struct entropy_store {
470
471 const struct poolinfo *poolinfo;
472 __u32 *pool;
473 const char *name;
474 struct entropy_store *pull;
475 struct work_struct push_work;
476
477
478 unsigned long last_pulled;
479 spinlock_t lock;
480 unsigned short add_ptr;
481 unsigned short input_rotate;
482 int entropy_count;
483 int entropy_total;
484 unsigned int initialized:1;
485 unsigned int last_data_init:1;
486 __u8 last_data[EXTRACT_SIZE];
487};
488
489static ssize_t extract_entropy(struct entropy_store *r, void *buf,
490 size_t nbytes, int min, int rsvd);
491static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
492 size_t nbytes, int fips);
493
494static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
495static void push_to_pool(struct work_struct *work);
496static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
497static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
498
499static struct entropy_store input_pool = {
500 .poolinfo = &poolinfo_table[0],
501 .name = "input",
502 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
503 .pool = input_pool_data
504};
505
506static struct entropy_store blocking_pool = {
507 .poolinfo = &poolinfo_table[1],
508 .name = "blocking",
509 .pull = &input_pool,
510 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
511 .pool = blocking_pool_data,
512 .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
513 push_to_pool),
514};
515
516static __u32 const twist_table[8] = {
517 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
518 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
519
520
521
522
523
524
525
526
527
528
529
530static void _mix_pool_bytes(struct entropy_store *r, const void *in,
531 int nbytes)
532{
533 unsigned long i, tap1, tap2, tap3, tap4, tap5;
534 int input_rotate;
535 int wordmask = r->poolinfo->poolwords - 1;
536 const char *bytes = in;
537 __u32 w;
538
539 tap1 = r->poolinfo->tap1;
540 tap2 = r->poolinfo->tap2;
541 tap3 = r->poolinfo->tap3;
542 tap4 = r->poolinfo->tap4;
543 tap5 = r->poolinfo->tap5;
544
545 input_rotate = r->input_rotate;
546 i = r->add_ptr;
547
548
549 while (nbytes--) {
550 w = rol32(*bytes++, input_rotate);
551 i = (i - 1) & wordmask;
552
553
554 w ^= r->pool[i];
555 w ^= r->pool[(i + tap1) & wordmask];
556 w ^= r->pool[(i + tap2) & wordmask];
557 w ^= r->pool[(i + tap3) & wordmask];
558 w ^= r->pool[(i + tap4) & wordmask];
559 w ^= r->pool[(i + tap5) & wordmask];
560
561
562 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
563
564
565
566
567
568
569
570 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
571 }
572
573 r->input_rotate = input_rotate;
574 r->add_ptr = i;
575}
576
577static void __mix_pool_bytes(struct entropy_store *r, const void *in,
578 int nbytes)
579{
580 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
581 _mix_pool_bytes(r, in, nbytes);
582}
583
584static void mix_pool_bytes(struct entropy_store *r, const void *in,
585 int nbytes)
586{
587 unsigned long flags;
588
589 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
590 spin_lock_irqsave(&r->lock, flags);
591 _mix_pool_bytes(r, in, nbytes);
592 spin_unlock_irqrestore(&r->lock, flags);
593}
594
595struct fast_pool {
596 __u32 pool[4];
597 unsigned long last;
598 unsigned short reg_idx;
599 unsigned char count;
600};
601
602
603
604
605
606
607static void fast_mix(struct fast_pool *f)
608{
609 __u32 a = f->pool[0], b = f->pool[1];
610 __u32 c = f->pool[2], d = f->pool[3];
611
612 a += b; c += d;
613 b = rol32(b, 6); d = rol32(d, 27);
614 d ^= a; b ^= c;
615
616 a += b; c += d;
617 b = rol32(b, 16); d = rol32(d, 14);
618 d ^= a; b ^= c;
619
620 a += b; c += d;
621 b = rol32(b, 6); d = rol32(d, 27);
622 d ^= a; b ^= c;
623
624 a += b; c += d;
625 b = rol32(b, 16); d = rol32(d, 14);
626 d ^= a; b ^= c;
627
628 f->pool[0] = a; f->pool[1] = b;
629 f->pool[2] = c; f->pool[3] = d;
630 f->count++;
631}
632
633static void process_random_ready_list(void)
634{
635 unsigned long flags;
636 struct random_ready_callback *rdy, *tmp;
637
638 spin_lock_irqsave(&random_ready_list_lock, flags);
639 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
640 struct module *owner = rdy->owner;
641
642 list_del_init(&rdy->list);
643 rdy->func(rdy);
644 module_put(owner);
645 }
646 spin_unlock_irqrestore(&random_ready_list_lock, flags);
647}
648
649
650
651
652
653
654static void credit_entropy_bits(struct entropy_store *r, int nbits)
655{
656 int entropy_count, orig;
657 const int pool_size = r->poolinfo->poolfracbits;
658 int nfrac = nbits << ENTROPY_SHIFT;
659
660 if (!nbits)
661 return;
662
663retry:
664 entropy_count = orig = READ_ONCE(r->entropy_count);
665 if (nfrac < 0) {
666
667 entropy_count += nfrac;
668 } else {
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690 int pnfrac = nfrac;
691 const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
692
693
694 do {
695 unsigned int anfrac = min(pnfrac, pool_size/2);
696 unsigned int add =
697 ((pool_size - entropy_count)*anfrac*3) >> s;
698
699 entropy_count += add;
700 pnfrac -= anfrac;
701 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
702 }
703
704 if (unlikely(entropy_count < 0)) {
705 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
706 r->name, entropy_count);
707 WARN_ON(1);
708 entropy_count = 0;
709 } else if (entropy_count > pool_size)
710 entropy_count = pool_size;
711 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
712 goto retry;
713
714 r->entropy_total += nbits;
715 if (!r->initialized && r->entropy_total > 128) {
716 r->initialized = 1;
717 r->entropy_total = 0;
718 }
719
720 trace_credit_entropy_bits(r->name, nbits,
721 entropy_count >> ENTROPY_SHIFT,
722 r->entropy_total, _RET_IP_);
723
724 if (r == &input_pool) {
725 int entropy_bits = entropy_count >> ENTROPY_SHIFT;
726
727 if (crng_init < 2 && entropy_bits >= 128) {
728 crng_reseed(&primary_crng, r);
729 entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
730 }
731
732
733 if (entropy_bits >= random_read_wakeup_bits &&
734 wq_has_sleeper(&random_read_wait)) {
735 wake_up_interruptible(&random_read_wait);
736 kill_fasync(&fasync, SIGIO, POLL_IN);
737 }
738
739
740
741 if (entropy_bits > random_write_wakeup_bits &&
742 r->initialized &&
743 r->entropy_total >= 2*random_read_wakeup_bits) {
744 struct entropy_store *other = &blocking_pool;
745
746 if (other->entropy_count <=
747 3 * other->poolinfo->poolfracbits / 4) {
748 schedule_work(&other->push_work);
749 r->entropy_total = 0;
750 }
751 }
752 }
753}
754
755static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
756{
757 const int nbits_max = r->poolinfo->poolwords * 32;
758
759 if (nbits < 0)
760 return -EINVAL;
761
762
763 nbits = min(nbits, nbits_max);
764
765 credit_entropy_bits(r, nbits);
766 return 0;
767}
768
769
770
771
772
773
774
775#define CRNG_RESEED_INTERVAL (300*HZ)
776
777static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
778
779#ifdef CONFIG_NUMA
780
781
782
783
784
785
786static struct crng_state **crng_node_pool __read_mostly;
787#endif
788
789static void invalidate_batched_entropy(void);
790static void numa_crng_init(void);
791
792static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
793static int __init parse_trust_cpu(char *arg)
794{
795 return kstrtobool(arg, &trust_cpu);
796}
797early_param("random.trust_cpu", parse_trust_cpu);
798
799static void crng_initialize(struct crng_state *crng)
800{
801 int i;
802 int arch_init = 1;
803 unsigned long rv;
804
805 memcpy(&crng->state[0], "expand 32-byte k", 16);
806 if (crng == &primary_crng)
807 _extract_entropy(&input_pool, &crng->state[4],
808 sizeof(__u32) * 12, 0);
809 else
810 _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
811 for (i = 4; i < 16; i++) {
812 if (!arch_get_random_seed_long(&rv) &&
813 !arch_get_random_long(&rv)) {
814 rv = random_get_entropy();
815 arch_init = 0;
816 }
817 crng->state[i] ^= rv;
818 }
819 if (trust_cpu && arch_init && crng == &primary_crng) {
820 invalidate_batched_entropy();
821 numa_crng_init();
822 crng_init = 2;
823 pr_notice("random: crng done (trusting CPU's manufacturer)\n");
824 }
825 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
826}
827
828#ifdef CONFIG_NUMA
829static void do_numa_crng_init(struct work_struct *work)
830{
831 int i;
832 struct crng_state *crng;
833 struct crng_state **pool;
834
835 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
836 for_each_online_node(i) {
837 crng = kmalloc_node(sizeof(struct crng_state),
838 GFP_KERNEL | __GFP_NOFAIL, i);
839 spin_lock_init(&crng->lock);
840 crng_initialize(crng);
841 pool[i] = crng;
842 }
843 mb();
844 if (cmpxchg(&crng_node_pool, NULL, pool)) {
845 for_each_node(i)
846 kfree(pool[i]);
847 kfree(pool);
848 }
849}
850
851static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
852
853static void numa_crng_init(void)
854{
855 schedule_work(&numa_crng_init_work);
856}
857#else
858static void numa_crng_init(void) {}
859#endif
860
861
862
863
864
865static int crng_fast_load(const char *cp, size_t len)
866{
867 unsigned long flags;
868 char *p;
869
870 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
871 return 0;
872 if (crng_init != 0) {
873 spin_unlock_irqrestore(&primary_crng.lock, flags);
874 return 0;
875 }
876 p = (unsigned char *) &primary_crng.state[4];
877 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
878 p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
879 cp++; crng_init_cnt++; len--;
880 }
881 spin_unlock_irqrestore(&primary_crng.lock, flags);
882 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
883 invalidate_batched_entropy();
884 crng_init = 1;
885 wake_up_interruptible(&crng_init_wait);
886 pr_notice("random: fast init done\n");
887 }
888 return 1;
889}
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905static int crng_slow_load(const char *cp, size_t len)
906{
907 unsigned long flags;
908 static unsigned char lfsr = 1;
909 unsigned char tmp;
910 unsigned i, max = CHACHA20_KEY_SIZE;
911 const char * src_buf = cp;
912 char * dest_buf = (char *) &primary_crng.state[4];
913
914 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
915 return 0;
916 if (crng_init != 0) {
917 spin_unlock_irqrestore(&primary_crng.lock, flags);
918 return 0;
919 }
920 if (len > max)
921 max = len;
922
923 for (i = 0; i < max ; i++) {
924 tmp = lfsr;
925 lfsr >>= 1;
926 if (tmp & 1)
927 lfsr ^= 0xE1;
928 tmp = dest_buf[i % CHACHA20_KEY_SIZE];
929 dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
930 lfsr += (tmp << 3) | (tmp >> 5);
931 }
932 spin_unlock_irqrestore(&primary_crng.lock, flags);
933 return 1;
934}
935
936static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
937{
938 unsigned long flags;
939 int i, num;
940 union {
941 __u8 block[CHACHA20_BLOCK_SIZE];
942 __u32 key[8];
943 } buf;
944
945 if (r) {
946 num = extract_entropy(r, &buf, 32, 16, 0);
947 if (num == 0)
948 return;
949 } else {
950 _extract_crng(&primary_crng, buf.block);
951 _crng_backtrack_protect(&primary_crng, buf.block,
952 CHACHA20_KEY_SIZE);
953 }
954 spin_lock_irqsave(&crng->lock, flags);
955 for (i = 0; i < 8; i++) {
956 unsigned long rv;
957 if (!arch_get_random_seed_long(&rv) &&
958 !arch_get_random_long(&rv))
959 rv = random_get_entropy();
960 crng->state[i+4] ^= buf.key[i] ^ rv;
961 }
962 memzero_explicit(&buf, sizeof(buf));
963 crng->init_time = jiffies;
964 spin_unlock_irqrestore(&crng->lock, flags);
965 if (crng == &primary_crng && crng_init < 2) {
966 invalidate_batched_entropy();
967 numa_crng_init();
968 crng_init = 2;
969 process_random_ready_list();
970 wake_up_interruptible(&crng_init_wait);
971 pr_notice("random: crng init done\n");
972 if (unseeded_warning.missed) {
973 pr_notice("random: %d get_random_xx warning(s) missed "
974 "due to ratelimiting\n",
975 unseeded_warning.missed);
976 unseeded_warning.missed = 0;
977 }
978 if (urandom_warning.missed) {
979 pr_notice("random: %d urandom warning(s) missed "
980 "due to ratelimiting\n",
981 urandom_warning.missed);
982 urandom_warning.missed = 0;
983 }
984 }
985}
986
987static void _extract_crng(struct crng_state *crng,
988 __u8 out[CHACHA20_BLOCK_SIZE])
989{
990 unsigned long v, flags;
991
992 if (crng_ready() &&
993 (time_after(crng_global_init_time, crng->init_time) ||
994 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
995 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
996 spin_lock_irqsave(&crng->lock, flags);
997 if (arch_get_random_long(&v))
998 crng->state[14] ^= v;
999 chacha20_block(&crng->state[0], out);
1000 if (crng->state[12] == 0)
1001 crng->state[13]++;
1002 spin_unlock_irqrestore(&crng->lock, flags);
1003}
1004
1005static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
1006{
1007 struct crng_state *crng = NULL;
1008
1009#ifdef CONFIG_NUMA
1010 if (crng_node_pool)
1011 crng = crng_node_pool[numa_node_id()];
1012 if (crng == NULL)
1013#endif
1014 crng = &primary_crng;
1015 _extract_crng(crng, out);
1016}
1017
1018
1019
1020
1021
1022static void _crng_backtrack_protect(struct crng_state *crng,
1023 __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
1024{
1025 unsigned long flags;
1026 __u32 *s, *d;
1027 int i;
1028
1029 used = round_up(used, sizeof(__u32));
1030 if (used + CHACHA20_KEY_SIZE > CHACHA20_BLOCK_SIZE) {
1031 extract_crng(tmp);
1032 used = 0;
1033 }
1034 spin_lock_irqsave(&crng->lock, flags);
1035 s = (__u32 *) &tmp[used];
1036 d = &crng->state[4];
1037 for (i=0; i < 8; i++)
1038 *d++ ^= *s++;
1039 spin_unlock_irqrestore(&crng->lock, flags);
1040}
1041
1042static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
1043{
1044 struct crng_state *crng = NULL;
1045
1046#ifdef CONFIG_NUMA
1047 if (crng_node_pool)
1048 crng = crng_node_pool[numa_node_id()];
1049 if (crng == NULL)
1050#endif
1051 crng = &primary_crng;
1052 _crng_backtrack_protect(crng, tmp, used);
1053}
1054
1055static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1056{
1057 ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
1058 __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
1059 int large_request = (nbytes > 256);
1060
1061 while (nbytes) {
1062 if (large_request && need_resched()) {
1063 if (signal_pending(current)) {
1064 if (ret == 0)
1065 ret = -ERESTARTSYS;
1066 break;
1067 }
1068 schedule();
1069 }
1070
1071 extract_crng(tmp);
1072 i = min_t(int, nbytes, CHACHA20_BLOCK_SIZE);
1073 if (copy_to_user(buf, tmp, i)) {
1074 ret = -EFAULT;
1075 break;
1076 }
1077
1078 nbytes -= i;
1079 buf += i;
1080 ret += i;
1081 }
1082 crng_backtrack_protect(tmp, i);
1083
1084
1085 memzero_explicit(tmp, sizeof(tmp));
1086
1087 return ret;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098struct timer_rand_state {
1099 cycles_t last_time;
1100 long last_delta, last_delta2;
1101};
1102
1103#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113void add_device_randomness(const void *buf, unsigned int size)
1114{
1115 unsigned long time = random_get_entropy() ^ jiffies;
1116 unsigned long flags;
1117
1118 if (!crng_ready() && size)
1119 crng_slow_load(buf, size);
1120
1121 trace_add_device_randomness(size, _RET_IP_);
1122 spin_lock_irqsave(&input_pool.lock, flags);
1123 _mix_pool_bytes(&input_pool, buf, size);
1124 _mix_pool_bytes(&input_pool, &time, sizeof(time));
1125 spin_unlock_irqrestore(&input_pool.lock, flags);
1126}
1127EXPORT_SYMBOL(add_device_randomness);
1128
1129static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1142{
1143 struct entropy_store *r;
1144 struct {
1145 long jiffies;
1146 unsigned cycles;
1147 unsigned num;
1148 } sample;
1149 long delta, delta2, delta3;
1150
1151 sample.jiffies = jiffies;
1152 sample.cycles = random_get_entropy();
1153 sample.num = num;
1154 r = &input_pool;
1155 mix_pool_bytes(r, &sample, sizeof(sample));
1156
1157
1158
1159
1160
1161
1162 delta = sample.jiffies - state->last_time;
1163 state->last_time = sample.jiffies;
1164
1165 delta2 = delta - state->last_delta;
1166 state->last_delta = delta;
1167
1168 delta3 = delta2 - state->last_delta2;
1169 state->last_delta2 = delta2;
1170
1171 if (delta < 0)
1172 delta = -delta;
1173 if (delta2 < 0)
1174 delta2 = -delta2;
1175 if (delta3 < 0)
1176 delta3 = -delta3;
1177 if (delta > delta2)
1178 delta = delta2;
1179 if (delta > delta3)
1180 delta = delta3;
1181
1182
1183
1184
1185
1186
1187 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
1188}
1189
1190void add_input_randomness(unsigned int type, unsigned int code,
1191 unsigned int value)
1192{
1193 static unsigned char last_value;
1194
1195
1196 if (value == last_value)
1197 return;
1198
1199 last_value = value;
1200 add_timer_randomness(&input_timer_state,
1201 (type << 4) ^ code ^ (code >> 4) ^ value);
1202 trace_add_input_randomness(ENTROPY_BITS(&input_pool));
1203}
1204EXPORT_SYMBOL_GPL(add_input_randomness);
1205
1206static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1207
1208#ifdef ADD_INTERRUPT_BENCH
1209static unsigned long avg_cycles, avg_deviation;
1210
1211#define AVG_SHIFT 8
1212#define FIXED_1_2 (1 << (AVG_SHIFT-1))
1213
1214static void add_interrupt_bench(cycles_t start)
1215{
1216 long delta = random_get_entropy() - start;
1217
1218
1219 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
1220 avg_cycles += delta;
1221
1222 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
1223 avg_deviation += delta;
1224}
1225#else
1226#define add_interrupt_bench(x)
1227#endif
1228
1229static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1230{
1231 __u32 *ptr = (__u32 *) regs;
1232 unsigned int idx;
1233
1234 if (regs == NULL)
1235 return 0;
1236 idx = READ_ONCE(f->reg_idx);
1237 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1238 idx = 0;
1239 ptr += idx++;
1240 WRITE_ONCE(f->reg_idx, idx);
1241 return *ptr;
1242}
1243
1244void add_interrupt_randomness(int irq, int irq_flags)
1245{
1246 struct entropy_store *r;
1247 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1248 struct pt_regs *regs = get_irq_regs();
1249 unsigned long now = jiffies;
1250 cycles_t cycles = random_get_entropy();
1251 __u32 c_high, j_high;
1252 __u64 ip;
1253 unsigned long seed;
1254 int credit = 0;
1255
1256 if (cycles == 0)
1257 cycles = get_reg(fast_pool, regs);
1258 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
1259 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
1260 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
1261 fast_pool->pool[1] ^= now ^ c_high;
1262 ip = regs ? instruction_pointer(regs) : _RET_IP_;
1263 fast_pool->pool[2] ^= ip;
1264 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
1265 get_reg(fast_pool, regs);
1266
1267 fast_mix(fast_pool);
1268 add_interrupt_bench(cycles);
1269
1270 if (unlikely(crng_init == 0)) {
1271 if ((fast_pool->count >= 64) &&
1272 crng_fast_load((char *) fast_pool->pool,
1273 sizeof(fast_pool->pool))) {
1274 fast_pool->count = 0;
1275 fast_pool->last = now;
1276 }
1277 return;
1278 }
1279
1280 if ((fast_pool->count < 64) &&
1281 !time_after(now, fast_pool->last + HZ))
1282 return;
1283
1284 r = &input_pool;
1285 if (!spin_trylock(&r->lock))
1286 return;
1287
1288 fast_pool->last = now;
1289 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
1290
1291
1292
1293
1294
1295
1296
1297 if (arch_get_random_seed_long(&seed)) {
1298 __mix_pool_bytes(r, &seed, sizeof(seed));
1299 credit = 1;
1300 }
1301 spin_unlock(&r->lock);
1302
1303 fast_pool->count = 0;
1304
1305
1306 credit_entropy_bits(r, credit + 1);
1307}
1308EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1309
1310#ifdef CONFIG_BLOCK
1311void add_disk_randomness(struct gendisk *disk)
1312{
1313 if (!disk || !disk->random)
1314 return;
1315
1316 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1317 trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
1318}
1319EXPORT_SYMBOL_GPL(add_disk_randomness);
1320#endif
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
1334static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1335{
1336 if (!r->pull ||
1337 r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
1338 r->entropy_count > r->poolinfo->poolfracbits)
1339 return;
1340
1341 _xfer_secondary_pool(r, nbytes);
1342}
1343
1344static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1345{
1346 __u32 tmp[OUTPUT_POOL_WORDS];
1347
1348 int bytes = nbytes;
1349
1350
1351 bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
1352
1353 bytes = min_t(int, bytes, sizeof(tmp));
1354
1355 trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
1356 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
1357 bytes = extract_entropy(r->pull, tmp, bytes,
1358 random_read_wakeup_bits / 8, 0);
1359 mix_pool_bytes(r, tmp, bytes);
1360 credit_entropy_bits(r, bytes*8);
1361}
1362
1363
1364
1365
1366
1367
1368
1369static void push_to_pool(struct work_struct *work)
1370{
1371 struct entropy_store *r = container_of(work, struct entropy_store,
1372 push_work);
1373 BUG_ON(!r);
1374 _xfer_secondary_pool(r, random_read_wakeup_bits/8);
1375 trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
1376 r->pull->entropy_count >> ENTROPY_SHIFT);
1377}
1378
1379
1380
1381
1382
1383static size_t account(struct entropy_store *r, size_t nbytes, int min,
1384 int reserved)
1385{
1386 int entropy_count, orig, have_bytes;
1387 size_t ibytes, nfrac;
1388
1389 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1390
1391
1392retry:
1393 entropy_count = orig = READ_ONCE(r->entropy_count);
1394 ibytes = nbytes;
1395
1396 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1397
1398 if ((have_bytes -= reserved) < 0)
1399 have_bytes = 0;
1400 ibytes = min_t(size_t, ibytes, have_bytes);
1401 if (ibytes < min)
1402 ibytes = 0;
1403
1404 if (unlikely(entropy_count < 0)) {
1405 pr_warn("random: negative entropy count: pool %s count %d\n",
1406 r->name, entropy_count);
1407 WARN_ON(1);
1408 entropy_count = 0;
1409 }
1410 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1411 if ((size_t) entropy_count > nfrac)
1412 entropy_count -= nfrac;
1413 else
1414 entropy_count = 0;
1415
1416 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1417 goto retry;
1418
1419 trace_debit_entropy(r->name, 8 * ibytes);
1420 if (ibytes &&
1421 (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
1422 wake_up_interruptible(&random_write_wait);
1423 kill_fasync(&fasync, SIGIO, POLL_OUT);
1424 }
1425
1426 return ibytes;
1427}
1428
1429
1430
1431
1432
1433
1434
1435static void extract_buf(struct entropy_store *r, __u8 *out)
1436{
1437 int i;
1438 union {
1439 __u32 w[5];
1440 unsigned long l[LONGS(20)];
1441 } hash;
1442 __u32 workspace[SHA1_WORKSPACE_WORDS];
1443 unsigned long flags;
1444
1445
1446
1447
1448
1449 sha1_init(hash.w);
1450 for (i = 0; i < LONGS(20); i++) {
1451 unsigned long v;
1452 if (!arch_get_random_long(&v))
1453 break;
1454 hash.l[i] = v;
1455 }
1456
1457
1458 spin_lock_irqsave(&r->lock, flags);
1459 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1460 sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1472 spin_unlock_irqrestore(&r->lock, flags);
1473
1474 memzero_explicit(workspace, sizeof(workspace));
1475
1476
1477
1478
1479
1480
1481 hash.w[0] ^= hash.w[3];
1482 hash.w[1] ^= hash.w[4];
1483 hash.w[2] ^= rol32(hash.w[2], 16);
1484
1485 memcpy(out, &hash, EXTRACT_SIZE);
1486 memzero_explicit(&hash, sizeof(hash));
1487}
1488
1489static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1490 size_t nbytes, int fips)
1491{
1492 ssize_t ret = 0, i;
1493 __u8 tmp[EXTRACT_SIZE];
1494 unsigned long flags;
1495
1496 while (nbytes) {
1497 extract_buf(r, tmp);
1498
1499 if (fips) {
1500 spin_lock_irqsave(&r->lock, flags);
1501 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1502 panic("Hardware RNG duplicated output!\n");
1503 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1504 spin_unlock_irqrestore(&r->lock, flags);
1505 }
1506 i = min_t(int, nbytes, EXTRACT_SIZE);
1507 memcpy(buf, tmp, i);
1508 nbytes -= i;
1509 buf += i;
1510 ret += i;
1511 }
1512
1513
1514 memzero_explicit(tmp, sizeof(tmp));
1515
1516 return ret;
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1529 size_t nbytes, int min, int reserved)
1530{
1531 __u8 tmp[EXTRACT_SIZE];
1532 unsigned long flags;
1533
1534
1535 if (fips_enabled) {
1536 spin_lock_irqsave(&r->lock, flags);
1537 if (!r->last_data_init) {
1538 r->last_data_init = 1;
1539 spin_unlock_irqrestore(&r->lock, flags);
1540 trace_extract_entropy(r->name, EXTRACT_SIZE,
1541 ENTROPY_BITS(r), _RET_IP_);
1542 xfer_secondary_pool(r, EXTRACT_SIZE);
1543 extract_buf(r, tmp);
1544 spin_lock_irqsave(&r->lock, flags);
1545 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1546 }
1547 spin_unlock_irqrestore(&r->lock, flags);
1548 }
1549
1550 trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1551 xfer_secondary_pool(r, nbytes);
1552 nbytes = account(r, nbytes, min, reserved);
1553
1554 return _extract_entropy(r, buf, nbytes, fips_enabled);
1555}
1556
1557
1558
1559
1560
1561static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1562 size_t nbytes)
1563{
1564 ssize_t ret = 0, i;
1565 __u8 tmp[EXTRACT_SIZE];
1566 int large_request = (nbytes > 256);
1567
1568 trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1569 xfer_secondary_pool(r, nbytes);
1570 nbytes = account(r, nbytes, 0, 0);
1571
1572 while (nbytes) {
1573 if (large_request && need_resched()) {
1574 if (signal_pending(current)) {
1575 if (ret == 0)
1576 ret = -ERESTARTSYS;
1577 break;
1578 }
1579 schedule();
1580 }
1581
1582 extract_buf(r, tmp);
1583 i = min_t(int, nbytes, EXTRACT_SIZE);
1584 if (copy_to_user(buf, tmp, i)) {
1585 ret = -EFAULT;
1586 break;
1587 }
1588
1589 nbytes -= i;
1590 buf += i;
1591 ret += i;
1592 }
1593
1594
1595 memzero_explicit(tmp, sizeof(tmp));
1596
1597 return ret;
1598}
1599
1600#define warn_unseeded_randomness(previous) \
1601 _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1602
1603static void _warn_unseeded_randomness(const char *func_name, void *caller,
1604 void **previous)
1605{
1606#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1607 const bool print_once = false;
1608#else
1609 static bool print_once __read_mostly;
1610#endif
1611
1612 if (print_once ||
1613 crng_ready() ||
1614 (previous && (caller == READ_ONCE(*previous))))
1615 return;
1616 WRITE_ONCE(*previous, caller);
1617#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1618 print_once = true;
1619#endif
1620 if (__ratelimit(&unseeded_warning))
1621 printk_deferred(KERN_NOTICE "random: %s called from %pS "
1622 "with crng_init=%d\n", func_name, caller,
1623 crng_init);
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636static void _get_random_bytes(void *buf, int nbytes)
1637{
1638 __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
1639
1640 trace_get_random_bytes(nbytes, _RET_IP_);
1641
1642 while (nbytes >= CHACHA20_BLOCK_SIZE) {
1643 extract_crng(buf);
1644 buf += CHACHA20_BLOCK_SIZE;
1645 nbytes -= CHACHA20_BLOCK_SIZE;
1646 }
1647
1648 if (nbytes > 0) {
1649 extract_crng(tmp);
1650 memcpy(buf, tmp, nbytes);
1651 crng_backtrack_protect(tmp, nbytes);
1652 } else
1653 crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
1654 memzero_explicit(tmp, sizeof(tmp));
1655}
1656
1657void get_random_bytes(void *buf, int nbytes)
1658{
1659 static void *previous;
1660
1661 warn_unseeded_randomness(&previous);
1662 _get_random_bytes(buf, nbytes);
1663}
1664EXPORT_SYMBOL(get_random_bytes);
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680static void entropy_timer(struct timer_list *t)
1681{
1682 credit_entropy_bits(&input_pool, 1);
1683}
1684
1685
1686
1687
1688
1689static void try_to_generate_entropy(void)
1690{
1691 struct {
1692 unsigned long now;
1693 struct timer_list timer;
1694 } stack;
1695
1696 stack.now = random_get_entropy();
1697
1698
1699 if (stack.now == random_get_entropy())
1700 return;
1701
1702 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1703 while (!crng_ready()) {
1704 if (!timer_pending(&stack.timer))
1705 mod_timer(&stack.timer, jiffies+1);
1706 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1707 schedule();
1708 stack.now = random_get_entropy();
1709 }
1710
1711 del_timer_sync(&stack.timer);
1712 destroy_timer_on_stack(&stack.timer);
1713 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1714}
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726int wait_for_random_bytes(void)
1727{
1728 if (likely(crng_ready()))
1729 return 0;
1730
1731 do {
1732 int ret;
1733 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1734 if (ret)
1735 return ret > 0 ? 0 : ret;
1736
1737 try_to_generate_entropy();
1738 } while (!crng_ready());
1739
1740 return 0;
1741}
1742EXPORT_SYMBOL(wait_for_random_bytes);
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753bool rng_is_initialized(void)
1754{
1755 return crng_ready();
1756}
1757EXPORT_SYMBOL(rng_is_initialized);
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767int add_random_ready_callback(struct random_ready_callback *rdy)
1768{
1769 struct module *owner;
1770 unsigned long flags;
1771 int err = -EALREADY;
1772
1773 if (crng_ready())
1774 return err;
1775
1776 owner = rdy->owner;
1777 if (!try_module_get(owner))
1778 return -ENOENT;
1779
1780 spin_lock_irqsave(&random_ready_list_lock, flags);
1781 if (crng_ready())
1782 goto out;
1783
1784 owner = NULL;
1785
1786 list_add(&rdy->list, &random_ready_list);
1787 err = 0;
1788
1789out:
1790 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1791
1792 module_put(owner);
1793
1794 return err;
1795}
1796EXPORT_SYMBOL(add_random_ready_callback);
1797
1798
1799
1800
1801void del_random_ready_callback(struct random_ready_callback *rdy)
1802{
1803 unsigned long flags;
1804 struct module *owner = NULL;
1805
1806 spin_lock_irqsave(&random_ready_list_lock, flags);
1807 if (!list_empty(&rdy->list)) {
1808 list_del_init(&rdy->list);
1809 owner = rdy->owner;
1810 }
1811 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1812
1813 module_put(owner);
1814}
1815EXPORT_SYMBOL(del_random_ready_callback);
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829int __must_check get_random_bytes_arch(void *buf, int nbytes)
1830{
1831 int left = nbytes;
1832 char *p = buf;
1833
1834 trace_get_random_bytes_arch(left, _RET_IP_);
1835 while (left) {
1836 unsigned long v;
1837 int chunk = min_t(int, left, sizeof(unsigned long));
1838
1839 if (!arch_get_random_long(&v))
1840 break;
1841
1842 memcpy(p, &v, chunk);
1843 p += chunk;
1844 left -= chunk;
1845 }
1846
1847 return nbytes - left;
1848}
1849EXPORT_SYMBOL(get_random_bytes_arch);
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860static void __init init_std_data(struct entropy_store *r)
1861{
1862 int i;
1863 ktime_t now = ktime_get_real();
1864 unsigned long rv;
1865
1866 r->last_pulled = jiffies;
1867 mix_pool_bytes(r, &now, sizeof(now));
1868 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1869 if (!arch_get_random_seed_long(&rv) &&
1870 !arch_get_random_long(&rv))
1871 rv = random_get_entropy();
1872 mix_pool_bytes(r, &rv, sizeof(rv));
1873 }
1874 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1875}
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887int __init rand_initialize(void)
1888{
1889 init_std_data(&input_pool);
1890 init_std_data(&blocking_pool);
1891 crng_initialize(&primary_crng);
1892 crng_global_init_time = jiffies;
1893 if (ratelimit_disable) {
1894 urandom_warning.interval = 0;
1895 unseeded_warning.interval = 0;
1896 }
1897 return 0;
1898}
1899
1900#ifdef CONFIG_BLOCK
1901void rand_initialize_disk(struct gendisk *disk)
1902{
1903 struct timer_rand_state *state;
1904
1905
1906
1907
1908
1909 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1910 if (state) {
1911 state->last_time = INITIAL_JIFFIES;
1912 disk->random = state;
1913 }
1914}
1915#endif
1916
1917static ssize_t
1918_random_read(int nonblock, char __user *buf, size_t nbytes)
1919{
1920 ssize_t n;
1921
1922 if (nbytes == 0)
1923 return 0;
1924
1925 nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
1926 while (1) {
1927 n = extract_entropy_user(&blocking_pool, buf, nbytes);
1928 if (n < 0)
1929 return n;
1930 trace_random_read(n*8, (nbytes-n)*8,
1931 ENTROPY_BITS(&blocking_pool),
1932 ENTROPY_BITS(&input_pool));
1933 if (n > 0)
1934 return n;
1935
1936
1937 if (nonblock)
1938 return -EAGAIN;
1939
1940 wait_event_interruptible(random_read_wait,
1941 ENTROPY_BITS(&input_pool) >=
1942 random_read_wakeup_bits);
1943 if (signal_pending(current))
1944 return -ERESTARTSYS;
1945 }
1946}
1947
1948static ssize_t
1949random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1950{
1951 return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
1952}
1953
1954static ssize_t
1955urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1956{
1957 unsigned long flags;
1958 static int maxwarn = 10;
1959 int ret;
1960
1961 if (!crng_ready() && maxwarn > 0) {
1962 maxwarn--;
1963 if (__ratelimit(&urandom_warning))
1964 printk(KERN_NOTICE "random: %s: uninitialized "
1965 "urandom read (%zd bytes read)\n",
1966 current->comm, nbytes);
1967 spin_lock_irqsave(&primary_crng.lock, flags);
1968 crng_init_cnt = 0;
1969 spin_unlock_irqrestore(&primary_crng.lock, flags);
1970 }
1971 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1972 ret = extract_crng_user(buf, nbytes);
1973 trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
1974 return ret;
1975}
1976
1977static __poll_t
1978random_poll(struct file *file, poll_table * wait)
1979{
1980 __poll_t mask;
1981
1982 poll_wait(file, &random_read_wait, wait);
1983 poll_wait(file, &random_write_wait, wait);
1984 mask = 0;
1985 if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
1986 mask |= EPOLLIN | EPOLLRDNORM;
1987 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
1988 mask |= EPOLLOUT | EPOLLWRNORM;
1989 return mask;
1990}
1991
1992static __poll_t
1993extrng_poll(struct file *file, poll_table * wait)
1994{
1995
1996 return EPOLLIN | EPOLLRDNORM;
1997}
1998
1999static int
2000write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
2001{
2002 size_t bytes;
2003 __u32 t, buf[16];
2004 const char __user *p = buffer;
2005
2006 while (count > 0) {
2007 int b, i = 0;
2008
2009 bytes = min(count, sizeof(buf));
2010 if (copy_from_user(&buf, p, bytes))
2011 return -EFAULT;
2012
2013 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
2014 if (!arch_get_random_int(&t))
2015 break;
2016 buf[i] ^= t;
2017 }
2018
2019 count -= bytes;
2020 p += bytes;
2021
2022 mix_pool_bytes(r, buf, bytes);
2023 cond_resched();
2024 }
2025
2026 return 0;
2027}
2028
2029static ssize_t random_write(struct file *file, const char __user *buffer,
2030 size_t count, loff_t *ppos)
2031{
2032 size_t ret;
2033
2034 ret = write_pool(&input_pool, buffer, count);
2035 if (ret)
2036 return ret;
2037
2038 return (ssize_t)count;
2039}
2040
2041static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
2042{
2043 int size, ent_count;
2044 int __user *p = (int __user *)arg;
2045 int retval;
2046
2047 switch (cmd) {
2048 case RNDGETENTCNT:
2049
2050 ent_count = ENTROPY_BITS(&input_pool);
2051 if (put_user(ent_count, p))
2052 return -EFAULT;
2053 return 0;
2054 case RNDADDTOENTCNT:
2055 if (!capable(CAP_SYS_ADMIN))
2056 return -EPERM;
2057 if (get_user(ent_count, p))
2058 return -EFAULT;
2059 return credit_entropy_bits_safe(&input_pool, ent_count);
2060 case RNDADDENTROPY:
2061 if (!capable(CAP_SYS_ADMIN))
2062 return -EPERM;
2063 if (get_user(ent_count, p++))
2064 return -EFAULT;
2065 if (ent_count < 0)
2066 return -EINVAL;
2067 if (get_user(size, p++))
2068 return -EFAULT;
2069 retval = write_pool(&input_pool, (const char __user *)p,
2070 size);
2071 if (retval < 0)
2072 return retval;
2073 return credit_entropy_bits_safe(&input_pool, ent_count);
2074 case RNDZAPENTCNT:
2075 case RNDCLEARPOOL:
2076
2077
2078
2079
2080 if (!capable(CAP_SYS_ADMIN))
2081 return -EPERM;
2082 input_pool.entropy_count = 0;
2083 blocking_pool.entropy_count = 0;
2084 return 0;
2085 case RNDRESEEDCRNG:
2086 if (!capable(CAP_SYS_ADMIN))
2087 return -EPERM;
2088 if (crng_init < 2)
2089 return -ENODATA;
2090 crng_reseed(&primary_crng, NULL);
2091 crng_global_init_time = jiffies - 1;
2092 return 0;
2093 default:
2094 return -EINVAL;
2095 }
2096}
2097
2098static int random_fasync(int fd, struct file *filp, int on)
2099{
2100 return fasync_helper(fd, filp, on, &fasync);
2101}
2102
2103static int random_open(struct inode *inode, struct file *filp)
2104{
2105 const struct random_extrng *rng;
2106
2107 rcu_read_lock();
2108 rng = extrng;
2109 if (rng && !try_module_get(rng->owner))
2110 rng = NULL;
2111 rcu_read_unlock();
2112
2113 if (!rng)
2114 return 0;
2115
2116 filp->f_op = &extrng_random_fops;
2117
2118 return 0;
2119}
2120
2121static int urandom_open(struct inode *inode, struct file *filp)
2122{
2123 const struct random_extrng *rng;
2124
2125 rcu_read_lock();
2126 rng = extrng;
2127 if (rng && !try_module_get(rng->owner))
2128 rng = NULL;
2129 rcu_read_unlock();
2130
2131 if (!rng)
2132 return 0;
2133
2134 filp->f_op = &extrng_urandom_fops;
2135
2136 return 0;
2137}
2138
2139static int extrng_release(struct inode *inode, struct file *filp)
2140{
2141 module_put(extrng->owner);
2142 return 0;
2143}
2144
2145static ssize_t
2146extrng_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
2147{
2148 return rcu_dereference_raw(extrng)->extrng_read(buf, nbytes);
2149}
2150
2151const struct file_operations random_fops = {
2152 .open = random_open,
2153 .read = random_read,
2154 .write = random_write,
2155 .poll = random_poll,
2156 .unlocked_ioctl = random_ioctl,
2157 .fasync = random_fasync,
2158 .llseek = noop_llseek,
2159};
2160
2161const struct file_operations urandom_fops = {
2162 .open = urandom_open,
2163 .read = urandom_read,
2164 .write = random_write,
2165 .unlocked_ioctl = random_ioctl,
2166 .fasync = random_fasync,
2167 .llseek = noop_llseek,
2168};
2169
2170static const struct file_operations extrng_random_fops = {
2171 .open = random_open,
2172 .read = extrng_read,
2173 .write = random_write,
2174 .poll = extrng_poll,
2175 .unlocked_ioctl = random_ioctl,
2176 .fasync = random_fasync,
2177 .llseek = noop_llseek,
2178 .release = extrng_release,
2179};
2180
2181static const struct file_operations extrng_urandom_fops = {
2182 .open = urandom_open,
2183 .read = extrng_read,
2184 .write = random_write,
2185 .unlocked_ioctl = random_ioctl,
2186 .fasync = random_fasync,
2187 .llseek = noop_llseek,
2188 .release = extrng_release,
2189};
2190
2191SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
2192 unsigned int, flags)
2193{
2194 const struct random_extrng *rng;
2195 int ret;
2196
2197 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
2198 return -EINVAL;
2199
2200 if (count > INT_MAX)
2201 count = INT_MAX;
2202
2203 rcu_read_lock();
2204 rng = extrng;
2205 if (rng && !try_module_get(rng->owner))
2206 rng = NULL;
2207 rcu_read_unlock();
2208
2209 if (rng) {
2210 ret = rng->extrng_read(buf, count);
2211 module_put(extrng->owner);
2212 return ret;
2213 }
2214
2215 if (flags & GRND_RANDOM)
2216 return _random_read(flags & GRND_NONBLOCK, buf, count);
2217
2218 if (!crng_ready()) {
2219 if (flags & GRND_NONBLOCK)
2220 return -EAGAIN;
2221 ret = wait_for_random_bytes();
2222 if (unlikely(ret))
2223 return ret;
2224 }
2225 return urandom_read(NULL, buf, count, NULL);
2226}
2227
2228
2229
2230
2231
2232
2233
2234#ifdef CONFIG_SYSCTL
2235
2236#include <linux/sysctl.h>
2237
2238static int min_read_thresh = 8, min_write_thresh;
2239static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
2240static int max_write_thresh = INPUT_POOL_WORDS * 32;
2241static int random_min_urandom_seed = 60;
2242static char sysctl_bootid[16];
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253static int proc_do_uuid(struct ctl_table *table, int write,
2254 void __user *buffer, size_t *lenp, loff_t *ppos)
2255{
2256 struct ctl_table fake_table;
2257 unsigned char buf[64], tmp_uuid[16], *uuid;
2258
2259 uuid = table->data;
2260 if (!uuid) {
2261 uuid = tmp_uuid;
2262 generate_random_uuid(uuid);
2263 } else {
2264 static DEFINE_SPINLOCK(bootid_spinlock);
2265
2266 spin_lock(&bootid_spinlock);
2267 if (!uuid[8])
2268 generate_random_uuid(uuid);
2269 spin_unlock(&bootid_spinlock);
2270 }
2271
2272 sprintf(buf, "%pU", uuid);
2273
2274 fake_table.data = buf;
2275 fake_table.maxlen = sizeof(buf);
2276
2277 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
2278}
2279
2280
2281
2282
2283static int proc_do_entropy(struct ctl_table *table, int write,
2284 void __user *buffer, size_t *lenp, loff_t *ppos)
2285{
2286 struct ctl_table fake_table;
2287 int entropy_count;
2288
2289 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
2290
2291 fake_table.data = &entropy_count;
2292 fake_table.maxlen = sizeof(entropy_count);
2293
2294 return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
2295}
2296
2297static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
2298extern struct ctl_table random_table[];
2299struct ctl_table random_table[] = {
2300 {
2301 .procname = "poolsize",
2302 .data = &sysctl_poolsize,
2303 .maxlen = sizeof(int),
2304 .mode = 0444,
2305 .proc_handler = proc_dointvec,
2306 },
2307 {
2308 .procname = "entropy_avail",
2309 .maxlen = sizeof(int),
2310 .mode = 0444,
2311 .proc_handler = proc_do_entropy,
2312 .data = &input_pool.entropy_count,
2313 },
2314 {
2315 .procname = "read_wakeup_threshold",
2316 .data = &random_read_wakeup_bits,
2317 .maxlen = sizeof(int),
2318 .mode = 0644,
2319 .proc_handler = proc_dointvec_minmax,
2320 .extra1 = &min_read_thresh,
2321 .extra2 = &max_read_thresh,
2322 },
2323 {
2324 .procname = "write_wakeup_threshold",
2325 .data = &random_write_wakeup_bits,
2326 .maxlen = sizeof(int),
2327 .mode = 0644,
2328 .proc_handler = proc_dointvec_minmax,
2329 .extra1 = &min_write_thresh,
2330 .extra2 = &max_write_thresh,
2331 },
2332 {
2333 .procname = "urandom_min_reseed_secs",
2334 .data = &random_min_urandom_seed,
2335 .maxlen = sizeof(int),
2336 .mode = 0644,
2337 .proc_handler = proc_dointvec,
2338 },
2339 {
2340 .procname = "boot_id",
2341 .data = &sysctl_bootid,
2342 .maxlen = 16,
2343 .mode = 0444,
2344 .proc_handler = proc_do_uuid,
2345 },
2346 {
2347 .procname = "uuid",
2348 .maxlen = 16,
2349 .mode = 0444,
2350 .proc_handler = proc_do_uuid,
2351 },
2352#ifdef ADD_INTERRUPT_BENCH
2353 {
2354 .procname = "add_interrupt_avg_cycles",
2355 .data = &avg_cycles,
2356 .maxlen = sizeof(avg_cycles),
2357 .mode = 0444,
2358 .proc_handler = proc_doulongvec_minmax,
2359 },
2360 {
2361 .procname = "add_interrupt_avg_deviation",
2362 .data = &avg_deviation,
2363 .maxlen = sizeof(avg_deviation),
2364 .mode = 0444,
2365 .proc_handler = proc_doulongvec_minmax,
2366 },
2367#endif
2368 { }
2369};
2370#endif
2371
2372struct batched_entropy {
2373 union {
2374 u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
2375 u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
2376 };
2377 unsigned int position;
2378 spinlock_t batch_lock;
2379};
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2390 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2391};
2392
2393u64 get_random_u64(void)
2394{
2395 u64 ret;
2396 unsigned long flags;
2397 struct batched_entropy *batch;
2398 static void *previous;
2399
2400#if BITS_PER_LONG == 64
2401 if (arch_get_random_long((unsigned long *)&ret))
2402 return ret;
2403#else
2404 if (arch_get_random_long((unsigned long *)&ret) &&
2405 arch_get_random_long((unsigned long *)&ret + 1))
2406 return ret;
2407#endif
2408
2409 warn_unseeded_randomness(&previous);
2410
2411 batch = raw_cpu_ptr(&batched_entropy_u64);
2412 spin_lock_irqsave(&batch->batch_lock, flags);
2413 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2414 extract_crng((u8 *)batch->entropy_u64);
2415 batch->position = 0;
2416 }
2417 ret = batch->entropy_u64[batch->position++];
2418 spin_unlock_irqrestore(&batch->batch_lock, flags);
2419 return ret;
2420}
2421EXPORT_SYMBOL(get_random_u64);
2422
2423static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2424 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2425};
2426u32 get_random_u32(void)
2427{
2428 u32 ret;
2429 unsigned long flags;
2430 struct batched_entropy *batch;
2431 static void *previous;
2432
2433 if (arch_get_random_int(&ret))
2434 return ret;
2435
2436 warn_unseeded_randomness(&previous);
2437
2438 batch = raw_cpu_ptr(&batched_entropy_u32);
2439 spin_lock_irqsave(&batch->batch_lock, flags);
2440 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2441 extract_crng((u8 *)batch->entropy_u32);
2442 batch->position = 0;
2443 }
2444 ret = batch->entropy_u32[batch->position++];
2445 spin_unlock_irqrestore(&batch->batch_lock, flags);
2446 return ret;
2447}
2448EXPORT_SYMBOL(get_random_u32);
2449
2450
2451
2452
2453
2454static void invalidate_batched_entropy(void)
2455{
2456 int cpu;
2457 unsigned long flags;
2458
2459 for_each_possible_cpu (cpu) {
2460 struct batched_entropy *batched_entropy;
2461
2462 batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2463 spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2464 batched_entropy->position = 0;
2465 spin_unlock(&batched_entropy->batch_lock);
2466
2467 batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2468 spin_lock(&batched_entropy->batch_lock);
2469 batched_entropy->position = 0;
2470 spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2471 }
2472}
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488unsigned long
2489randomize_page(unsigned long start, unsigned long range)
2490{
2491 if (!PAGE_ALIGNED(start)) {
2492 range -= PAGE_ALIGN(start) - start;
2493 start = PAGE_ALIGN(start);
2494 }
2495
2496 if (start > ULONG_MAX - range)
2497 range = ULONG_MAX - start;
2498
2499 range >>= PAGE_SHIFT;
2500
2501 if (range == 0)
2502 return start;
2503
2504 return start + (get_random_long() % range << PAGE_SHIFT);
2505}
2506
2507
2508
2509
2510
2511void add_hwgenerator_randomness(const char *buffer, size_t count,
2512 size_t entropy)
2513{
2514 struct entropy_store *poolp = &input_pool;
2515
2516 if (unlikely(crng_init == 0)) {
2517 crng_fast_load(buffer, count);
2518 return;
2519 }
2520
2521
2522
2523
2524
2525 wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2526 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2527 mix_pool_bytes(poolp, buffer, count);
2528 credit_entropy_bits(poolp, entropy);
2529}
2530EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2531
2532void random_register_extrng(const struct random_extrng *rng)
2533{
2534 rcu_assign_pointer(extrng, rng);
2535}
2536EXPORT_SYMBOL_GPL(random_register_extrng);
2537
2538void random_unregister_extrng(void)
2539{
2540 RCU_INIT_POINTER(extrng, NULL);
2541 synchronize_rcu();
2542}
2543EXPORT_SYMBOL_GPL(random_unregister_extrng);
2544