1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238#include <linux/utsname.h>
239#include <linux/module.h>
240#include <linux/kernel.h>
241#include <linux/major.h>
242#include <linux/string.h>
243#include <linux/fcntl.h>
244#include <linux/slab.h>
245#include <linux/random.h>
246#include <linux/poll.h>
247#include <linux/init.h>
248#include <linux/fs.h>
249#include <linux/genhd.h>
250#include <linux/interrupt.h>
251#include <linux/mm.h>
252#include <linux/spinlock.h>
253#include <linux/kthread.h>
254#include <linux/percpu.h>
255#include <linux/cryptohash.h>
256#include <linux/fips.h>
257#include <linux/ptrace.h>
258#include <linux/kmemcheck.h>
259#include <linux/workqueue.h>
260#include <linux/irq.h>
261#include <linux/syscalls.h>
262#include <linux/completion.h>
263
264#include <asm/processor.h>
265#include <asm/uaccess.h>
266#include <asm/irq.h>
267#include <asm/irq_regs.h>
268#include <asm/io.h>
269
270#define CREATE_TRACE_POINTS
271#include <trace/events/random.h>
272
273
274
275
276
277
278#define INPUT_POOL_SHIFT 12
279#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
280#define OUTPUT_POOL_SHIFT 10
281#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
282#define SEC_XFER_SIZE 512
283#define EXTRACT_SIZE 10
284
285#define DEBUG_RANDOM_BOOT 0
286
287#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
288
289
290
291
292
293
294
295
296#define ENTROPY_SHIFT 3
297#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
298
299
300
301
302
303static int random_read_wakeup_bits = 64;
304
305
306
307
308
309
310static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
311
312
313
314
315
316
317static int random_min_urandom_seed = 60;
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364static struct poolinfo {
365 int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
366#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
367 int tap1, tap2, tap3, tap4, tap5;
368} poolinfo_table[] = {
369
370
371 { S(128), 104, 76, 51, 25, 1 },
372
373
374 { S(32), 26, 19, 14, 7, 1 },
375#if 0
376
377 { S(2048), 1638, 1231, 819, 411, 1 },
378
379
380 { S(1024), 817, 615, 412, 204, 1 },
381
382
383 { S(1024), 819, 616, 410, 207, 2 },
384
385
386 { S(512), 411, 308, 208, 104, 1 },
387
388
389 { S(512), 409, 307, 206, 102, 2 },
390
391 { S(512), 409, 309, 205, 103, 2 },
392
393
394 { S(256), 205, 155, 101, 52, 1 },
395
396
397 { S(128), 103, 78, 51, 27, 2 },
398
399
400 { S(64), 52, 39, 26, 14, 1 },
401#endif
402};
403
404
405
406
407static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
408static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
409static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
410static struct fasync_struct *fasync;
411
412
413
414
415
416
417
418
419struct entropy_store;
420struct entropy_store {
421
422 const struct poolinfo *poolinfo;
423 __u32 *pool;
424 const char *name;
425 struct entropy_store *pull;
426 struct work_struct push_work;
427
428
429 unsigned long last_pulled;
430 spinlock_t lock;
431 unsigned short add_ptr;
432 unsigned short input_rotate;
433 int entropy_count;
434 int entropy_total;
435 unsigned int initialized:1;
436 unsigned int limit:1;
437 unsigned int last_data_init:1;
438 __u8 last_data[EXTRACT_SIZE];
439};
440
441static void push_to_pool(struct work_struct *work);
442static __u32 input_pool_data[INPUT_POOL_WORDS];
443static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
444static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
445
446static struct entropy_store input_pool = {
447 .poolinfo = &poolinfo_table[0],
448 .name = "input",
449 .limit = 1,
450 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
451 .pool = input_pool_data
452};
453
454static struct entropy_store blocking_pool = {
455 .poolinfo = &poolinfo_table[1],
456 .name = "blocking",
457 .limit = 1,
458 .pull = &input_pool,
459 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
460 .pool = blocking_pool_data,
461 .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
462 push_to_pool),
463};
464
465static struct entropy_store nonblocking_pool = {
466 .poolinfo = &poolinfo_table[1],
467 .name = "nonblocking",
468 .pull = &input_pool,
469 .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
470 .pool = nonblocking_pool_data,
471 .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work,
472 push_to_pool),
473};
474
475static __u32 const twist_table[8] = {
476 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
477 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
478
479
480
481
482
483
484
485
486
487
488
489static void _mix_pool_bytes(struct entropy_store *r, const void *in,
490 int nbytes)
491{
492 unsigned long i, tap1, tap2, tap3, tap4, tap5;
493 int input_rotate;
494 int wordmask = r->poolinfo->poolwords - 1;
495 const char *bytes = in;
496 __u32 w;
497
498 tap1 = r->poolinfo->tap1;
499 tap2 = r->poolinfo->tap2;
500 tap3 = r->poolinfo->tap3;
501 tap4 = r->poolinfo->tap4;
502 tap5 = r->poolinfo->tap5;
503
504 input_rotate = r->input_rotate;
505 i = r->add_ptr;
506
507
508 while (nbytes--) {
509 w = rol32(*bytes++, input_rotate);
510 i = (i - 1) & wordmask;
511
512
513 w ^= r->pool[i];
514 w ^= r->pool[(i + tap1) & wordmask];
515 w ^= r->pool[(i + tap2) & wordmask];
516 w ^= r->pool[(i + tap3) & wordmask];
517 w ^= r->pool[(i + tap4) & wordmask];
518 w ^= r->pool[(i + tap5) & wordmask];
519
520
521 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
522
523
524
525
526
527
528
529 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
530 }
531
532 r->input_rotate = input_rotate;
533 r->add_ptr = i;
534}
535
536static void __mix_pool_bytes(struct entropy_store *r, const void *in,
537 int nbytes)
538{
539 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
540 _mix_pool_bytes(r, in, nbytes);
541}
542
543static void mix_pool_bytes(struct entropy_store *r, const void *in,
544 int nbytes)
545{
546 unsigned long flags;
547
548 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
549 spin_lock_irqsave(&r->lock, flags);
550 _mix_pool_bytes(r, in, nbytes);
551 spin_unlock_irqrestore(&r->lock, flags);
552}
553
554struct fast_pool {
555 __u32 pool[4];
556 unsigned long last;
557 unsigned short reg_idx;
558 unsigned char count;
559};
560
561
562
563
564
565
566static void fast_mix(struct fast_pool *f)
567{
568 __u32 a = f->pool[0], b = f->pool[1];
569 __u32 c = f->pool[2], d = f->pool[3];
570
571 a += b; c += d;
572 b = rol32(a, 6); d = rol32(c, 27);
573 d ^= a; b ^= c;
574
575 a += b; c += d;
576 b = rol32(a, 16); d = rol32(c, 14);
577 d ^= a; b ^= c;
578
579 a += b; c += d;
580 b = rol32(a, 6); d = rol32(c, 27);
581 d ^= a; b ^= c;
582
583 a += b; c += d;
584 b = rol32(a, 16); d = rol32(c, 14);
585 d ^= a; b ^= c;
586
587 f->pool[0] = a; f->pool[1] = b;
588 f->pool[2] = c; f->pool[3] = d;
589 f->count++;
590}
591
592
593
594
595
596
597static void credit_entropy_bits(struct entropy_store *r, int nbits)
598{
599 int entropy_count, orig;
600 const int pool_size = r->poolinfo->poolfracbits;
601 int nfrac = nbits << ENTROPY_SHIFT;
602
603 if (!nbits)
604 return;
605
606retry:
607 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
608 if (nfrac < 0) {
609
610 entropy_count += nfrac;
611 } else {
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633 int pnfrac = nfrac;
634 const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
635
636
637 do {
638 unsigned int anfrac = min(pnfrac, pool_size/2);
639 unsigned int add =
640 ((pool_size - entropy_count)*anfrac*3) >> s;
641
642 entropy_count += add;
643 pnfrac -= anfrac;
644 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
645 }
646
647 if (unlikely(entropy_count < 0)) {
648 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
649 r->name, entropy_count);
650 WARN_ON(1);
651 entropy_count = 0;
652 } else if (entropy_count > pool_size)
653 entropy_count = pool_size;
654 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
655 goto retry;
656
657 r->entropy_total += nbits;
658 if (!r->initialized && r->entropy_total > 128) {
659 r->initialized = 1;
660 r->entropy_total = 0;
661 if (r == &nonblocking_pool) {
662 prandom_reseed_late();
663 wake_up_interruptible(&urandom_init_wait);
664 pr_notice("random: %s pool is initialized\n", r->name);
665 }
666 }
667
668 trace_credit_entropy_bits(r->name, nbits,
669 entropy_count >> ENTROPY_SHIFT,
670 r->entropy_total, _RET_IP_);
671
672 if (r == &input_pool) {
673 int entropy_bits = entropy_count >> ENTROPY_SHIFT;
674
675
676 if (entropy_bits >= random_read_wakeup_bits) {
677 wake_up_interruptible(&random_read_wait);
678 kill_fasync(&fasync, SIGIO, POLL_IN);
679 }
680
681
682
683
684
685 if (entropy_bits > random_write_wakeup_bits &&
686 r->initialized &&
687 r->entropy_total >= 2*random_read_wakeup_bits) {
688 static struct entropy_store *last = &blocking_pool;
689 struct entropy_store *other = &blocking_pool;
690
691 if (last == &blocking_pool)
692 other = &nonblocking_pool;
693 if (other->entropy_count <=
694 3 * other->poolinfo->poolfracbits / 4)
695 last = other;
696 if (last->entropy_count <=
697 3 * last->poolinfo->poolfracbits / 4) {
698 schedule_work(&last->push_work);
699 r->entropy_total = 0;
700 }
701 }
702 }
703}
704
705static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
706{
707 const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
708
709
710 nbits = min(nbits, nbits_max);
711 nbits = max(nbits, -nbits_max);
712
713 credit_entropy_bits(r, nbits);
714}
715
716
717
718
719
720
721
722
723struct timer_rand_state {
724 cycles_t last_time;
725 long last_delta, last_delta2;
726 unsigned dont_count_entropy:1;
727};
728
729#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
730
731
732
733
734
735
736
737
738
739void add_device_randomness(const void *buf, unsigned int size)
740{
741 unsigned long time = random_get_entropy() ^ jiffies;
742 unsigned long flags;
743
744 trace_add_device_randomness(size, _RET_IP_);
745 spin_lock_irqsave(&input_pool.lock, flags);
746 _mix_pool_bytes(&input_pool, buf, size);
747 _mix_pool_bytes(&input_pool, &time, sizeof(time));
748 spin_unlock_irqrestore(&input_pool.lock, flags);
749
750 spin_lock_irqsave(&nonblocking_pool.lock, flags);
751 _mix_pool_bytes(&nonblocking_pool, buf, size);
752 _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time));
753 spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
754}
755EXPORT_SYMBOL(add_device_randomness);
756
757static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
758
759
760
761
762
763
764
765
766
767
768
769static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
770{
771 struct entropy_store *r;
772 struct {
773 long jiffies;
774 unsigned cycles;
775 unsigned num;
776 } sample;
777 long delta, delta2, delta3;
778
779 preempt_disable();
780
781 sample.jiffies = jiffies;
782 sample.cycles = random_get_entropy();
783 sample.num = num;
784 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
785 mix_pool_bytes(r, &sample, sizeof(sample));
786
787
788
789
790
791
792
793 if (!state->dont_count_entropy) {
794 delta = sample.jiffies - state->last_time;
795 state->last_time = sample.jiffies;
796
797 delta2 = delta - state->last_delta;
798 state->last_delta = delta;
799
800 delta3 = delta2 - state->last_delta2;
801 state->last_delta2 = delta2;
802
803 if (delta < 0)
804 delta = -delta;
805 if (delta2 < 0)
806 delta2 = -delta2;
807 if (delta3 < 0)
808 delta3 = -delta3;
809 if (delta > delta2)
810 delta = delta2;
811 if (delta > delta3)
812 delta = delta3;
813
814
815
816
817
818
819 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
820 }
821 preempt_enable();
822}
823
824void add_input_randomness(unsigned int type, unsigned int code,
825 unsigned int value)
826{
827 static unsigned char last_value;
828
829
830 if (value == last_value)
831 return;
832
833 last_value = value;
834 add_timer_randomness(&input_timer_state,
835 (type << 4) ^ code ^ (code >> 4) ^ value);
836 trace_add_input_randomness(ENTROPY_BITS(&input_pool));
837}
838EXPORT_SYMBOL_GPL(add_input_randomness);
839
840static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
841
842#ifdef ADD_INTERRUPT_BENCH
843static unsigned long avg_cycles, avg_deviation;
844
845#define AVG_SHIFT 8
846#define FIXED_1_2 (1 << (AVG_SHIFT-1))
847
848static void add_interrupt_bench(cycles_t start)
849{
850 long delta = random_get_entropy() - start;
851
852
853 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
854 avg_cycles += delta;
855
856 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
857 avg_deviation += delta;
858}
859#else
860#define add_interrupt_bench(x)
861#endif
862
863static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
864{
865 __u32 *ptr = (__u32 *) regs;
866
867 if (regs == NULL)
868 return 0;
869 if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
870 f->reg_idx = 0;
871 return *(ptr + f->reg_idx++);
872}
873
874void add_interrupt_randomness(int irq, int irq_flags)
875{
876 struct entropy_store *r;
877 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
878 struct pt_regs *regs = get_irq_regs();
879 unsigned long now = jiffies;
880 cycles_t cycles = random_get_entropy();
881 __u32 c_high, j_high;
882 __u64 ip;
883 unsigned long seed;
884 int credit = 0;
885
886 if (cycles == 0)
887 cycles = get_reg(fast_pool, regs);
888 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
889 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
890 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
891 fast_pool->pool[1] ^= now ^ c_high;
892 ip = regs ? instruction_pointer(regs) : _RET_IP_;
893 fast_pool->pool[2] ^= ip;
894 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
895 get_reg(fast_pool, regs);
896
897 fast_mix(fast_pool);
898 add_interrupt_bench(cycles);
899
900 if ((fast_pool->count < 64) &&
901 !time_after(now, fast_pool->last + HZ))
902 return;
903
904 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
905 if (!spin_trylock(&r->lock))
906 return;
907
908 fast_pool->last = now;
909 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
910
911
912
913
914
915
916
917 if (arch_get_random_seed_long(&seed)) {
918 __mix_pool_bytes(r, &seed, sizeof(seed));
919 credit = 1;
920 }
921 spin_unlock(&r->lock);
922
923 fast_pool->count = 0;
924
925
926 credit_entropy_bits(r, credit + 1);
927}
928
929#ifdef CONFIG_BLOCK
930void add_disk_randomness(struct gendisk *disk)
931{
932 if (!disk || !disk->random)
933 return;
934
935 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
936 trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
937}
938EXPORT_SYMBOL_GPL(add_disk_randomness);
939#endif
940
941
942
943
944
945
946
947static ssize_t extract_entropy(struct entropy_store *r, void *buf,
948 size_t nbytes, int min, int rsvd);
949
950
951
952
953
954
955static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
956static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
957{
958 if (!r->pull ||
959 r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
960 r->entropy_count > r->poolinfo->poolfracbits)
961 return;
962
963 if (r->limit == 0 && random_min_urandom_seed) {
964 unsigned long now = jiffies;
965
966 if (time_before(now,
967 r->last_pulled + random_min_urandom_seed * HZ))
968 return;
969 r->last_pulled = now;
970 }
971
972 _xfer_secondary_pool(r, nbytes);
973}
974
975static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
976{
977 __u32 tmp[OUTPUT_POOL_WORDS];
978
979
980 int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
981 int bytes = nbytes;
982
983
984 bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
985
986 bytes = min_t(int, bytes, sizeof(tmp));
987
988 trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
989 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
990 bytes = extract_entropy(r->pull, tmp, bytes,
991 random_read_wakeup_bits / 8, rsvd_bytes);
992 mix_pool_bytes(r, tmp, bytes);
993 credit_entropy_bits(r, bytes*8);
994}
995
996
997
998
999
1000
1001
1002static void push_to_pool(struct work_struct *work)
1003{
1004 struct entropy_store *r = container_of(work, struct entropy_store,
1005 push_work);
1006 BUG_ON(!r);
1007 _xfer_secondary_pool(r, random_read_wakeup_bits/8);
1008 trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
1009 r->pull->entropy_count >> ENTROPY_SHIFT);
1010}
1011
1012
1013
1014
1015
1016static size_t account(struct entropy_store *r, size_t nbytes, int min,
1017 int reserved)
1018{
1019 int entropy_count, orig;
1020 size_t ibytes, nfrac;
1021
1022 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1023
1024
1025retry:
1026 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
1027 ibytes = nbytes;
1028
1029 if (r->limit) {
1030 int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1031
1032 if ((have_bytes -= reserved) < 0)
1033 have_bytes = 0;
1034 ibytes = min_t(size_t, ibytes, have_bytes);
1035 }
1036 if (ibytes < min)
1037 ibytes = 0;
1038
1039 if (unlikely(entropy_count < 0)) {
1040 pr_warn("random: negative entropy count: pool %s count %d\n",
1041 r->name, entropy_count);
1042 WARN_ON(1);
1043 entropy_count = 0;
1044 }
1045 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1046 if ((size_t) entropy_count > nfrac)
1047 entropy_count -= nfrac;
1048 else
1049 entropy_count = 0;
1050
1051 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1052 goto retry;
1053
1054 trace_debit_entropy(r->name, 8 * ibytes);
1055 if (ibytes &&
1056 (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
1057 wake_up_interruptible(&random_write_wait);
1058 kill_fasync(&fasync, SIGIO, POLL_OUT);
1059 }
1060
1061 return ibytes;
1062}
1063
1064
1065
1066
1067
1068
1069
1070static void extract_buf(struct entropy_store *r, __u8 *out)
1071{
1072 int i;
1073 union {
1074 __u32 w[5];
1075 unsigned long l[LONGS(20)];
1076 } hash;
1077 __u32 workspace[SHA_WORKSPACE_WORDS];
1078 unsigned long flags;
1079
1080
1081
1082
1083
1084 sha_init(hash.w);
1085 for (i = 0; i < LONGS(20); i++) {
1086 unsigned long v;
1087 if (!arch_get_random_long(&v))
1088 break;
1089 hash.l[i] = v;
1090 }
1091
1092
1093 spin_lock_irqsave(&r->lock, flags);
1094 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1095 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1107 spin_unlock_irqrestore(&r->lock, flags);
1108
1109 memzero_explicit(workspace, sizeof(workspace));
1110
1111
1112
1113
1114
1115
1116 hash.w[0] ^= hash.w[3];
1117 hash.w[1] ^= hash.w[4];
1118 hash.w[2] ^= rol32(hash.w[2], 16);
1119
1120 memcpy(out, &hash, EXTRACT_SIZE);
1121 memzero_explicit(&hash, sizeof(hash));
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1134 size_t nbytes, int min, int reserved)
1135{
1136 ssize_t ret = 0, i;
1137 __u8 tmp[EXTRACT_SIZE];
1138 unsigned long flags;
1139
1140
1141 if (fips_enabled) {
1142 spin_lock_irqsave(&r->lock, flags);
1143 if (!r->last_data_init) {
1144 r->last_data_init = 1;
1145 spin_unlock_irqrestore(&r->lock, flags);
1146 trace_extract_entropy(r->name, EXTRACT_SIZE,
1147 ENTROPY_BITS(r), _RET_IP_);
1148 xfer_secondary_pool(r, EXTRACT_SIZE);
1149 extract_buf(r, tmp);
1150 spin_lock_irqsave(&r->lock, flags);
1151 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1152 }
1153 spin_unlock_irqrestore(&r->lock, flags);
1154 }
1155
1156 trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1157 xfer_secondary_pool(r, nbytes);
1158 nbytes = account(r, nbytes, min, reserved);
1159
1160 while (nbytes) {
1161 extract_buf(r, tmp);
1162
1163 if (fips_enabled) {
1164 spin_lock_irqsave(&r->lock, flags);
1165 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1166 panic("Hardware RNG duplicated output!\n");
1167 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1168 spin_unlock_irqrestore(&r->lock, flags);
1169 }
1170 i = min_t(int, nbytes, EXTRACT_SIZE);
1171 memcpy(buf, tmp, i);
1172 nbytes -= i;
1173 buf += i;
1174 ret += i;
1175 }
1176
1177
1178 memzero_explicit(tmp, sizeof(tmp));
1179
1180 return ret;
1181}
1182
1183
1184
1185
1186
1187static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1188 size_t nbytes)
1189{
1190 ssize_t ret = 0, i;
1191 __u8 tmp[EXTRACT_SIZE];
1192 int large_request = (nbytes > 256);
1193
1194 trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1195 xfer_secondary_pool(r, nbytes);
1196 nbytes = account(r, nbytes, 0, 0);
1197
1198 while (nbytes) {
1199 if (large_request && need_resched()) {
1200 if (signal_pending(current)) {
1201 if (ret == 0)
1202 ret = -ERESTARTSYS;
1203 break;
1204 }
1205 schedule();
1206 }
1207
1208 extract_buf(r, tmp);
1209 i = min_t(int, nbytes, EXTRACT_SIZE);
1210 if (copy_to_user(buf, tmp, i)) {
1211 ret = -EFAULT;
1212 break;
1213 }
1214
1215 nbytes -= i;
1216 buf += i;
1217 ret += i;
1218 }
1219
1220
1221 memzero_explicit(tmp, sizeof(tmp));
1222
1223 return ret;
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233void get_random_bytes(void *buf, int nbytes)
1234{
1235#if DEBUG_RANDOM_BOOT > 0
1236 if (unlikely(nonblocking_pool.initialized == 0))
1237 printk(KERN_NOTICE "random: %pF get_random_bytes called "
1238 "with %d bits of entropy available\n",
1239 (void *) _RET_IP_,
1240 nonblocking_pool.entropy_total);
1241#endif
1242 trace_get_random_bytes(nbytes, _RET_IP_);
1243 extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
1244}
1245EXPORT_SYMBOL(get_random_bytes);
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257void get_random_bytes_arch(void *buf, int nbytes)
1258{
1259 char *p = buf;
1260
1261 trace_get_random_bytes_arch(nbytes, _RET_IP_);
1262 while (nbytes) {
1263 unsigned long v;
1264 int chunk = min(nbytes, (int)sizeof(unsigned long));
1265
1266 if (!arch_get_random_long(&v))
1267 break;
1268
1269 memcpy(p, &v, chunk);
1270 p += chunk;
1271 nbytes -= chunk;
1272 }
1273
1274 if (nbytes)
1275 extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
1276}
1277EXPORT_SYMBOL(get_random_bytes_arch);
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289static void init_std_data(struct entropy_store *r)
1290{
1291 int i;
1292 ktime_t now = ktime_get_real();
1293 unsigned long rv;
1294
1295 r->last_pulled = jiffies;
1296 mix_pool_bytes(r, &now, sizeof(now));
1297 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1298 if (!arch_get_random_seed_long(&rv) &&
1299 !arch_get_random_long(&rv))
1300 rv = random_get_entropy();
1301 mix_pool_bytes(r, &rv, sizeof(rv));
1302 }
1303 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316static int rand_initialize(void)
1317{
1318 init_std_data(&input_pool);
1319 init_std_data(&blocking_pool);
1320 init_std_data(&nonblocking_pool);
1321 return 0;
1322}
1323early_initcall(rand_initialize);
1324
1325#ifdef CONFIG_BLOCK
1326void rand_initialize_disk(struct gendisk *disk)
1327{
1328 struct timer_rand_state *state;
1329
1330
1331
1332
1333
1334 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1335 if (state) {
1336 state->last_time = INITIAL_JIFFIES;
1337 disk->random = state;
1338 }
1339}
1340#endif
1341
1342static ssize_t
1343_random_read(int nonblock, char __user *buf, size_t nbytes)
1344{
1345 ssize_t n;
1346
1347 if (nbytes == 0)
1348 return 0;
1349
1350 nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
1351 while (1) {
1352 n = extract_entropy_user(&blocking_pool, buf, nbytes);
1353 if (n < 0)
1354 return n;
1355 trace_random_read(n*8, (nbytes-n)*8,
1356 ENTROPY_BITS(&blocking_pool),
1357 ENTROPY_BITS(&input_pool));
1358 if (n > 0)
1359 return n;
1360
1361
1362 if (nonblock)
1363 return -EAGAIN;
1364
1365 wait_event_interruptible(random_read_wait,
1366 ENTROPY_BITS(&input_pool) >=
1367 random_read_wakeup_bits);
1368 if (signal_pending(current))
1369 return -ERESTARTSYS;
1370 }
1371}
1372
1373static ssize_t
1374random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1375{
1376 return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
1377}
1378
1379static ssize_t
1380urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1381{
1382 int ret;
1383
1384 if (unlikely(nonblocking_pool.initialized == 0))
1385 printk_once(KERN_NOTICE "random: %s urandom read "
1386 "with %d bits of entropy available\n",
1387 current->comm, nonblocking_pool.entropy_total);
1388
1389 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1390 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
1391
1392 trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
1393 ENTROPY_BITS(&input_pool));
1394 return ret;
1395}
1396
1397static unsigned int
1398random_poll(struct file *file, poll_table * wait)
1399{
1400 unsigned int mask;
1401
1402 poll_wait(file, &random_read_wait, wait);
1403 poll_wait(file, &random_write_wait, wait);
1404 mask = 0;
1405 if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
1406 mask |= POLLIN | POLLRDNORM;
1407 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
1408 mask |= POLLOUT | POLLWRNORM;
1409 return mask;
1410}
1411
1412static int
1413write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1414{
1415 size_t bytes;
1416 __u32 buf[16];
1417 const char __user *p = buffer;
1418
1419 while (count > 0) {
1420 bytes = min(count, sizeof(buf));
1421 if (copy_from_user(&buf, p, bytes))
1422 return -EFAULT;
1423
1424 count -= bytes;
1425 p += bytes;
1426
1427 mix_pool_bytes(r, buf, bytes);
1428 cond_resched();
1429 }
1430
1431 return 0;
1432}
1433
1434static ssize_t random_write(struct file *file, const char __user *buffer,
1435 size_t count, loff_t *ppos)
1436{
1437 size_t ret;
1438
1439 ret = write_pool(&blocking_pool, buffer, count);
1440 if (ret)
1441 return ret;
1442 ret = write_pool(&nonblocking_pool, buffer, count);
1443 if (ret)
1444 return ret;
1445
1446 return (ssize_t)count;
1447}
1448
1449static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1450{
1451 int size, ent_count;
1452 int __user *p = (int __user *)arg;
1453 int retval;
1454
1455 switch (cmd) {
1456 case RNDGETENTCNT:
1457
1458 ent_count = ENTROPY_BITS(&input_pool);
1459 if (put_user(ent_count, p))
1460 return -EFAULT;
1461 return 0;
1462 case RNDADDTOENTCNT:
1463 if (!capable(CAP_SYS_ADMIN))
1464 return -EPERM;
1465 if (get_user(ent_count, p))
1466 return -EFAULT;
1467 credit_entropy_bits_safe(&input_pool, ent_count);
1468 return 0;
1469 case RNDADDENTROPY:
1470 if (!capable(CAP_SYS_ADMIN))
1471 return -EPERM;
1472 if (get_user(ent_count, p++))
1473 return -EFAULT;
1474 if (ent_count < 0)
1475 return -EINVAL;
1476 if (get_user(size, p++))
1477 return -EFAULT;
1478 retval = write_pool(&input_pool, (const char __user *)p,
1479 size);
1480 if (retval < 0)
1481 return retval;
1482 credit_entropy_bits_safe(&input_pool, ent_count);
1483 return 0;
1484 case RNDZAPENTCNT:
1485 case RNDCLEARPOOL:
1486
1487
1488
1489
1490 if (!capable(CAP_SYS_ADMIN))
1491 return -EPERM;
1492 input_pool.entropy_count = 0;
1493 nonblocking_pool.entropy_count = 0;
1494 blocking_pool.entropy_count = 0;
1495 return 0;
1496 default:
1497 return -EINVAL;
1498 }
1499}
1500
1501static int random_fasync(int fd, struct file *filp, int on)
1502{
1503 return fasync_helper(fd, filp, on, &fasync);
1504}
1505
1506const struct file_operations random_fops = {
1507 .read = random_read,
1508 .write = random_write,
1509 .poll = random_poll,
1510 .unlocked_ioctl = random_ioctl,
1511 .fasync = random_fasync,
1512 .llseek = noop_llseek,
1513};
1514
1515const struct file_operations urandom_fops = {
1516 .read = urandom_read,
1517 .write = random_write,
1518 .unlocked_ioctl = random_ioctl,
1519 .fasync = random_fasync,
1520 .llseek = noop_llseek,
1521};
1522
1523SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
1524 unsigned int, flags)
1525{
1526 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
1527 return -EINVAL;
1528
1529 if (count > INT_MAX)
1530 count = INT_MAX;
1531
1532 if (flags & GRND_RANDOM)
1533 return _random_read(flags & GRND_NONBLOCK, buf, count);
1534
1535 if (unlikely(nonblocking_pool.initialized == 0)) {
1536 if (flags & GRND_NONBLOCK)
1537 return -EAGAIN;
1538 wait_event_interruptible(urandom_init_wait,
1539 nonblocking_pool.initialized);
1540 if (signal_pending(current))
1541 return -ERESTARTSYS;
1542 }
1543 return urandom_read(NULL, buf, count, NULL);
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556void generate_random_uuid(unsigned char uuid_out[16])
1557{
1558 get_random_bytes(uuid_out, 16);
1559
1560 uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
1561
1562 uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
1563}
1564EXPORT_SYMBOL(generate_random_uuid);
1565
1566
1567
1568
1569
1570
1571
1572#ifdef CONFIG_SYSCTL
1573
1574#include <linux/sysctl.h>
1575
1576static int min_read_thresh = 8, min_write_thresh;
1577static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
1578static int max_write_thresh = INPUT_POOL_WORDS * 32;
1579static char sysctl_bootid[16];
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590static int proc_do_uuid(struct ctl_table *table, int write,
1591 void __user *buffer, size_t *lenp, loff_t *ppos)
1592{
1593 struct ctl_table fake_table;
1594 unsigned char buf[64], tmp_uuid[16], *uuid;
1595
1596 uuid = table->data;
1597 if (!uuid) {
1598 uuid = tmp_uuid;
1599 generate_random_uuid(uuid);
1600 } else {
1601 static DEFINE_SPINLOCK(bootid_spinlock);
1602
1603 spin_lock(&bootid_spinlock);
1604 if (!uuid[8])
1605 generate_random_uuid(uuid);
1606 spin_unlock(&bootid_spinlock);
1607 }
1608
1609 sprintf(buf, "%pU", uuid);
1610
1611 fake_table.data = buf;
1612 fake_table.maxlen = sizeof(buf);
1613
1614 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
1615}
1616
1617
1618
1619
1620static int proc_do_entropy(struct ctl_table *table, int write,
1621 void __user *buffer, size_t *lenp, loff_t *ppos)
1622{
1623 struct ctl_table fake_table;
1624 int entropy_count;
1625
1626 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
1627
1628 fake_table.data = &entropy_count;
1629 fake_table.maxlen = sizeof(entropy_count);
1630
1631 return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
1632}
1633
1634static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
1635extern struct ctl_table random_table[];
1636struct ctl_table random_table[] = {
1637 {
1638 .procname = "poolsize",
1639 .data = &sysctl_poolsize,
1640 .maxlen = sizeof(int),
1641 .mode = 0444,
1642 .proc_handler = proc_dointvec,
1643 },
1644 {
1645 .procname = "entropy_avail",
1646 .maxlen = sizeof(int),
1647 .mode = 0444,
1648 .proc_handler = proc_do_entropy,
1649 .data = &input_pool.entropy_count,
1650 },
1651 {
1652 .procname = "read_wakeup_threshold",
1653 .data = &random_read_wakeup_bits,
1654 .maxlen = sizeof(int),
1655 .mode = 0644,
1656 .proc_handler = proc_dointvec_minmax,
1657 .extra1 = &min_read_thresh,
1658 .extra2 = &max_read_thresh,
1659 },
1660 {
1661 .procname = "write_wakeup_threshold",
1662 .data = &random_write_wakeup_bits,
1663 .maxlen = sizeof(int),
1664 .mode = 0644,
1665 .proc_handler = proc_dointvec_minmax,
1666 .extra1 = &min_write_thresh,
1667 .extra2 = &max_write_thresh,
1668 },
1669 {
1670 .procname = "urandom_min_reseed_secs",
1671 .data = &random_min_urandom_seed,
1672 .maxlen = sizeof(int),
1673 .mode = 0644,
1674 .proc_handler = proc_dointvec,
1675 },
1676 {
1677 .procname = "boot_id",
1678 .data = &sysctl_bootid,
1679 .maxlen = 16,
1680 .mode = 0444,
1681 .proc_handler = proc_do_uuid,
1682 },
1683 {
1684 .procname = "uuid",
1685 .maxlen = 16,
1686 .mode = 0444,
1687 .proc_handler = proc_do_uuid,
1688 },
1689#ifdef ADD_INTERRUPT_BENCH
1690 {
1691 .procname = "add_interrupt_avg_cycles",
1692 .data = &avg_cycles,
1693 .maxlen = sizeof(avg_cycles),
1694 .mode = 0444,
1695 .proc_handler = proc_doulongvec_minmax,
1696 },
1697 {
1698 .procname = "add_interrupt_avg_deviation",
1699 .data = &avg_deviation,
1700 .maxlen = sizeof(avg_deviation),
1701 .mode = 0444,
1702 .proc_handler = proc_doulongvec_minmax,
1703 },
1704#endif
1705 { }
1706};
1707#endif
1708
1709static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
1710
1711int random_int_secret_init(void)
1712{
1713 get_random_bytes(random_int_secret, sizeof(random_int_secret));
1714 return 0;
1715}
1716
1717
1718
1719
1720
1721
1722
1723static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
1724unsigned int get_random_int(void)
1725{
1726 __u32 *hash;
1727 unsigned int ret;
1728
1729 if (arch_get_random_int(&ret))
1730 return ret;
1731
1732 hash = get_cpu_var(get_random_int_hash);
1733
1734 hash[0] += current->pid + jiffies + random_get_entropy();
1735 md5_transform(hash, random_int_secret);
1736 ret = hash[0];
1737 put_cpu_var(get_random_int_hash);
1738
1739 return ret;
1740}
1741EXPORT_SYMBOL(get_random_int);
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752unsigned long
1753randomize_range(unsigned long start, unsigned long end, unsigned long len)
1754{
1755 unsigned long range = end - len - start;
1756
1757 if (end <= start + len)
1758 return 0;
1759 return PAGE_ALIGN(get_random_int() % range + start);
1760}
1761
1762
1763
1764
1765
1766void add_hwgenerator_randomness(const char *buffer, size_t count,
1767 size_t entropy)
1768{
1769 struct entropy_store *poolp = &input_pool;
1770
1771
1772
1773
1774
1775 wait_event_interruptible(random_write_wait, kthread_should_stop() ||
1776 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
1777 mix_pool_bytes(poolp, buffer, count);
1778 credit_entropy_bits(poolp, entropy);
1779}
1780EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
1781