1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226#include <linux/utsname.h>
227#include <linux/module.h>
228#include <linux/kernel.h>
229#include <linux/major.h>
230#include <linux/string.h>
231#include <linux/fcntl.h>
232#include <linux/slab.h>
233#include <linux/random.h>
234#include <linux/poll.h>
235#include <linux/init.h>
236#include <linux/fs.h>
237#include <linux/genhd.h>
238#include <linux/interrupt.h>
239#include <linux/mm.h>
240#include <linux/spinlock.h>
241#include <linux/percpu.h>
242#include <linux/cryptohash.h>
243#include <linux/fips.h>
244
245#ifdef CONFIG_GENERIC_HARDIRQS
246# include <linux/irq.h>
247#endif
248
249#include <asm/processor.h>
250#include <asm/uaccess.h>
251#include <asm/irq.h>
252#include <asm/io.h>
253
254
255
256
257#define INPUT_POOL_WORDS 128
258#define OUTPUT_POOL_WORDS 32
259#define SEC_XFER_SIZE 512
260#define EXTRACT_SIZE 10
261
262
263
264
265
266static int random_read_wakeup_thresh = 64;
267
268
269
270
271
272
273static int random_write_wakeup_thresh = 128;
274
275
276
277
278
279
280static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
281
282static DEFINE_PER_CPU(int, trickle_count);
283
284
285
286
287
288
289
290
291
292static struct poolinfo {
293 int poolwords;
294 int tap1, tap2, tap3, tap4, tap5;
295} poolinfo_table[] = {
296
297 { 128, 103, 76, 51, 25, 1 },
298
299 { 32, 26, 20, 14, 7, 1 },
300#if 0
301
302 { 2048, 1638, 1231, 819, 411, 1 },
303
304
305 { 1024, 817, 615, 412, 204, 1 },
306
307
308 { 1024, 819, 616, 410, 207, 2 },
309
310
311 { 512, 411, 308, 208, 104, 1 },
312
313
314 { 512, 409, 307, 206, 102, 2 },
315
316 { 512, 409, 309, 205, 103, 2 },
317
318
319 { 256, 205, 155, 101, 52, 1 },
320
321
322 { 128, 103, 78, 51, 27, 2 },
323
324
325 { 64, 52, 39, 26, 14, 1 },
326#endif
327};
328
329#define POOLBITS poolwords*32
330#define POOLBYTES poolwords*4
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
379static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
380static struct fasync_struct *fasync;
381
382#if 0
383static int debug;
384module_param(debug, bool, 0644);
385#define DEBUG_ENT(fmt, arg...) do { \
386 if (debug) \
387 printk(KERN_DEBUG "random %04d %04d %04d: " \
388 fmt,\
389 input_pool.entropy_count,\
390 blocking_pool.entropy_count,\
391 nonblocking_pool.entropy_count,\
392 ## arg); } while (0)
393#else
394#define DEBUG_ENT(fmt, arg...) do {} while (0)
395#endif
396
397
398
399
400
401
402
403
404struct entropy_store;
405struct entropy_store {
406
407 struct poolinfo *poolinfo;
408 __u32 *pool;
409 const char *name;
410 struct entropy_store *pull;
411 int limit;
412
413
414 spinlock_t lock;
415 unsigned add_ptr;
416 int entropy_count;
417 int input_rotate;
418 __u8 last_data[EXTRACT_SIZE];
419};
420
421static __u32 input_pool_data[INPUT_POOL_WORDS];
422static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
423static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
424
425static struct entropy_store input_pool = {
426 .poolinfo = &poolinfo_table[0],
427 .name = "input",
428 .limit = 1,
429 .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
430 .pool = input_pool_data
431};
432
433static struct entropy_store blocking_pool = {
434 .poolinfo = &poolinfo_table[1],
435 .name = "blocking",
436 .limit = 1,
437 .pull = &input_pool,
438 .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
439 .pool = blocking_pool_data
440};
441
442static struct entropy_store nonblocking_pool = {
443 .poolinfo = &poolinfo_table[1],
444 .name = "nonblocking",
445 .pull = &input_pool,
446 .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
447 .pool = nonblocking_pool_data
448};
449
450
451
452
453
454
455
456
457
458
459
460static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
461 int nbytes, __u8 out[64])
462{
463 static __u32 const twist_table[8] = {
464 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
465 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
466 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
467 int input_rotate;
468 int wordmask = r->poolinfo->poolwords - 1;
469 const char *bytes = in;
470 __u32 w;
471 unsigned long flags;
472
473
474 tap1 = r->poolinfo->tap1;
475 tap2 = r->poolinfo->tap2;
476 tap3 = r->poolinfo->tap3;
477 tap4 = r->poolinfo->tap4;
478 tap5 = r->poolinfo->tap5;
479
480 spin_lock_irqsave(&r->lock, flags);
481 input_rotate = r->input_rotate;
482 i = r->add_ptr;
483
484
485 while (nbytes--) {
486 w = rol32(*bytes++, input_rotate & 31);
487 i = (i - 1) & wordmask;
488
489
490 w ^= r->pool[i];
491 w ^= r->pool[(i + tap1) & wordmask];
492 w ^= r->pool[(i + tap2) & wordmask];
493 w ^= r->pool[(i + tap3) & wordmask];
494 w ^= r->pool[(i + tap4) & wordmask];
495 w ^= r->pool[(i + tap5) & wordmask];
496
497
498 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
499
500
501
502
503
504
505
506 input_rotate += i ? 7 : 14;
507 }
508
509 r->input_rotate = input_rotate;
510 r->add_ptr = i;
511
512 if (out)
513 for (j = 0; j < 16; j++)
514 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
515
516 spin_unlock_irqrestore(&r->lock, flags);
517}
518
519static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
520{
521 mix_pool_bytes_extract(r, in, bytes, NULL);
522}
523
524
525
526
527static void credit_entropy_bits(struct entropy_store *r, int nbits)
528{
529 unsigned long flags;
530 int entropy_count;
531
532 if (!nbits)
533 return;
534
535 spin_lock_irqsave(&r->lock, flags);
536
537 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
538 entropy_count = r->entropy_count;
539 entropy_count += nbits;
540 if (entropy_count < 0) {
541 DEBUG_ENT("negative entropy/overflow\n");
542 entropy_count = 0;
543 } else if (entropy_count > r->poolinfo->POOLBITS)
544 entropy_count = r->poolinfo->POOLBITS;
545 r->entropy_count = entropy_count;
546
547
548 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
549 wake_up_interruptible(&random_read_wait);
550 kill_fasync(&fasync, SIGIO, POLL_IN);
551 }
552 spin_unlock_irqrestore(&r->lock, flags);
553}
554
555
556
557
558
559
560
561
562struct timer_rand_state {
563 cycles_t last_time;
564 long last_delta, last_delta2;
565 unsigned dont_count_entropy:1;
566};
567
568#ifndef CONFIG_GENERIC_HARDIRQS
569
570static struct timer_rand_state *irq_timer_state[NR_IRQS];
571
572static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
573{
574 return irq_timer_state[irq];
575}
576
577static void set_timer_rand_state(unsigned int irq,
578 struct timer_rand_state *state)
579{
580 irq_timer_state[irq] = state;
581}
582
583#else
584
585static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
586{
587 struct irq_desc *desc;
588
589 desc = irq_to_desc(irq);
590
591 return desc->timer_rand_state;
592}
593
594static void set_timer_rand_state(unsigned int irq,
595 struct timer_rand_state *state)
596{
597 struct irq_desc *desc;
598
599 desc = irq_to_desc(irq);
600
601 desc->timer_rand_state = state;
602}
603#endif
604
605static struct timer_rand_state input_timer_state;
606
607
608
609
610
611
612
613
614
615
616
617static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
618{
619 struct {
620 cycles_t cycles;
621 long jiffies;
622 unsigned num;
623 } sample;
624 long delta, delta2, delta3;
625
626 preempt_disable();
627
628 if (input_pool.entropy_count > trickle_thresh &&
629 ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
630 goto out;
631
632 sample.jiffies = jiffies;
633 sample.cycles = get_cycles();
634 sample.num = num;
635 mix_pool_bytes(&input_pool, &sample, sizeof(sample));
636
637
638
639
640
641
642
643 if (!state->dont_count_entropy) {
644 delta = sample.jiffies - state->last_time;
645 state->last_time = sample.jiffies;
646
647 delta2 = delta - state->last_delta;
648 state->last_delta = delta;
649
650 delta3 = delta2 - state->last_delta2;
651 state->last_delta2 = delta2;
652
653 if (delta < 0)
654 delta = -delta;
655 if (delta2 < 0)
656 delta2 = -delta2;
657 if (delta3 < 0)
658 delta3 = -delta3;
659 if (delta > delta2)
660 delta = delta2;
661 if (delta > delta3)
662 delta = delta3;
663
664
665
666
667
668
669 credit_entropy_bits(&input_pool,
670 min_t(int, fls(delta>>1), 11));
671 }
672out:
673 preempt_enable();
674}
675
676void add_input_randomness(unsigned int type, unsigned int code,
677 unsigned int value)
678{
679 static unsigned char last_value;
680
681
682 if (value == last_value)
683 return;
684
685 DEBUG_ENT("input event\n");
686 last_value = value;
687 add_timer_randomness(&input_timer_state,
688 (type << 4) ^ code ^ (code >> 4) ^ value);
689}
690EXPORT_SYMBOL_GPL(add_input_randomness);
691
692void add_interrupt_randomness(int irq)
693{
694 struct timer_rand_state *state;
695
696 state = get_timer_rand_state(irq);
697
698 if (state == NULL)
699 return;
700
701 DEBUG_ENT("irq event %d\n", irq);
702 add_timer_randomness(state, 0x100 + irq);
703}
704
705#ifdef CONFIG_BLOCK
706void add_disk_randomness(struct gendisk *disk)
707{
708 if (!disk || !disk->random)
709 return;
710
711 DEBUG_ENT("disk event %d:%d\n",
712 MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
713
714 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
715}
716#endif
717
718
719
720
721
722
723
724static ssize_t extract_entropy(struct entropy_store *r, void *buf,
725 size_t nbytes, int min, int rsvd);
726
727
728
729
730
731
732static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
733{
734 __u32 tmp[OUTPUT_POOL_WORDS];
735
736 if (r->pull && r->entropy_count < nbytes * 8 &&
737 r->entropy_count < r->poolinfo->POOLBITS) {
738
739 int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
740 int bytes = nbytes;
741
742
743 bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
744
745 bytes = min_t(int, bytes, sizeof(tmp));
746
747 DEBUG_ENT("going to reseed %s with %d bits "
748 "(%d of %d requested)\n",
749 r->name, bytes * 8, nbytes * 8, r->entropy_count);
750
751 bytes = extract_entropy(r->pull, tmp, bytes,
752 random_read_wakeup_thresh / 8, rsvd);
753 mix_pool_bytes(r, tmp, bytes);
754 credit_entropy_bits(r, bytes*8);
755 }
756}
757
758
759
760
761
762
763
764
765
766
767
768
769
770static size_t account(struct entropy_store *r, size_t nbytes, int min,
771 int reserved)
772{
773 unsigned long flags;
774
775
776 spin_lock_irqsave(&r->lock, flags);
777
778 BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
779 DEBUG_ENT("trying to extract %d bits from %s\n",
780 nbytes * 8, r->name);
781
782
783 if (r->entropy_count / 8 < min + reserved) {
784 nbytes = 0;
785 } else {
786
787 if (r->limit && nbytes + reserved >= r->entropy_count / 8)
788 nbytes = r->entropy_count/8 - reserved;
789
790 if (r->entropy_count / 8 >= nbytes + reserved)
791 r->entropy_count -= nbytes*8;
792 else
793 r->entropy_count = reserved;
794
795 if (r->entropy_count < random_write_wakeup_thresh) {
796 wake_up_interruptible(&random_write_wait);
797 kill_fasync(&fasync, SIGIO, POLL_OUT);
798 }
799 }
800
801 DEBUG_ENT("debiting %d entropy credits from %s%s\n",
802 nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
803
804 spin_unlock_irqrestore(&r->lock, flags);
805
806 return nbytes;
807}
808
809static void extract_buf(struct entropy_store *r, __u8 *out)
810{
811 int i;
812 __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
813 __u8 extract[64];
814
815
816 sha_init(hash);
817 for (i = 0; i < r->poolinfo->poolwords; i += 16)
818 sha_transform(hash, (__u8 *)(r->pool + i), workspace);
819
820
821
822
823
824
825
826
827
828
829 mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
830
831
832
833
834
835 sha_transform(hash, extract, workspace);
836 memset(extract, 0, sizeof(extract));
837 memset(workspace, 0, sizeof(workspace));
838
839
840
841
842
843
844 hash[0] ^= hash[3];
845 hash[1] ^= hash[4];
846 hash[2] ^= rol32(hash[2], 16);
847 memcpy(out, hash, EXTRACT_SIZE);
848 memset(hash, 0, sizeof(hash));
849}
850
851static ssize_t extract_entropy(struct entropy_store *r, void *buf,
852 size_t nbytes, int min, int reserved)
853{
854 ssize_t ret = 0, i;
855 __u8 tmp[EXTRACT_SIZE];
856 unsigned long flags;
857
858 xfer_secondary_pool(r, nbytes);
859 nbytes = account(r, nbytes, min, reserved);
860
861 while (nbytes) {
862 extract_buf(r, tmp);
863
864 if (fips_enabled) {
865 spin_lock_irqsave(&r->lock, flags);
866 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
867 panic("Hardware RNG duplicated output!\n");
868 memcpy(r->last_data, tmp, EXTRACT_SIZE);
869 spin_unlock_irqrestore(&r->lock, flags);
870 }
871 i = min_t(int, nbytes, EXTRACT_SIZE);
872 memcpy(buf, tmp, i);
873 nbytes -= i;
874 buf += i;
875 ret += i;
876 }
877
878
879 memset(tmp, 0, sizeof(tmp));
880
881 return ret;
882}
883
884static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
885 size_t nbytes)
886{
887 ssize_t ret = 0, i;
888 __u8 tmp[EXTRACT_SIZE];
889
890 xfer_secondary_pool(r, nbytes);
891 nbytes = account(r, nbytes, 0, 0);
892
893 while (nbytes) {
894 if (need_resched()) {
895 if (signal_pending(current)) {
896 if (ret == 0)
897 ret = -ERESTARTSYS;
898 break;
899 }
900 schedule();
901 }
902
903 extract_buf(r, tmp);
904 i = min_t(int, nbytes, EXTRACT_SIZE);
905 if (copy_to_user(buf, tmp, i)) {
906 ret = -EFAULT;
907 break;
908 }
909
910 nbytes -= i;
911 buf += i;
912 ret += i;
913 }
914
915
916 memset(tmp, 0, sizeof(tmp));
917
918 return ret;
919}
920
921
922
923
924
925
926void get_random_bytes(void *buf, int nbytes)
927{
928 extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
929}
930EXPORT_SYMBOL(get_random_bytes);
931
932
933
934
935
936
937
938
939
940
941static void init_std_data(struct entropy_store *r)
942{
943 ktime_t now;
944 unsigned long flags;
945
946 spin_lock_irqsave(&r->lock, flags);
947 r->entropy_count = 0;
948 spin_unlock_irqrestore(&r->lock, flags);
949
950 now = ktime_get_real();
951 mix_pool_bytes(r, &now, sizeof(now));
952 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
953}
954
955static int rand_initialize(void)
956{
957 init_std_data(&input_pool);
958 init_std_data(&blocking_pool);
959 init_std_data(&nonblocking_pool);
960 return 0;
961}
962module_init(rand_initialize);
963
964void rand_initialize_irq(int irq)
965{
966 struct timer_rand_state *state;
967
968 state = get_timer_rand_state(irq);
969
970 if (state)
971 return;
972
973
974
975
976
977 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
978 if (state)
979 set_timer_rand_state(irq, state);
980}
981
982#ifdef CONFIG_BLOCK
983void rand_initialize_disk(struct gendisk *disk)
984{
985 struct timer_rand_state *state;
986
987
988
989
990
991 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
992 if (state)
993 disk->random = state;
994}
995#endif
996
997static ssize_t
998random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
999{
1000 ssize_t n, retval = 0, count = 0;
1001
1002 if (nbytes == 0)
1003 return 0;
1004
1005 while (nbytes > 0) {
1006 n = nbytes;
1007 if (n > SEC_XFER_SIZE)
1008 n = SEC_XFER_SIZE;
1009
1010 DEBUG_ENT("reading %d bits\n", n*8);
1011
1012 n = extract_entropy_user(&blocking_pool, buf, n);
1013
1014 DEBUG_ENT("read got %d bits (%d still needed)\n",
1015 n*8, (nbytes-n)*8);
1016
1017 if (n == 0) {
1018 if (file->f_flags & O_NONBLOCK) {
1019 retval = -EAGAIN;
1020 break;
1021 }
1022
1023 DEBUG_ENT("sleeping?\n");
1024
1025 wait_event_interruptible(random_read_wait,
1026 input_pool.entropy_count >=
1027 random_read_wakeup_thresh);
1028
1029 DEBUG_ENT("awake\n");
1030
1031 if (signal_pending(current)) {
1032 retval = -ERESTARTSYS;
1033 break;
1034 }
1035
1036 continue;
1037 }
1038
1039 if (n < 0) {
1040 retval = n;
1041 break;
1042 }
1043 count += n;
1044 buf += n;
1045 nbytes -= n;
1046 break;
1047
1048 }
1049
1050 return (count ? count : retval);
1051}
1052
1053static ssize_t
1054urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1055{
1056 return extract_entropy_user(&nonblocking_pool, buf, nbytes);
1057}
1058
1059static unsigned int
1060random_poll(struct file *file, poll_table * wait)
1061{
1062 unsigned int mask;
1063
1064 poll_wait(file, &random_read_wait, wait);
1065 poll_wait(file, &random_write_wait, wait);
1066 mask = 0;
1067 if (input_pool.entropy_count >= random_read_wakeup_thresh)
1068 mask |= POLLIN | POLLRDNORM;
1069 if (input_pool.entropy_count < random_write_wakeup_thresh)
1070 mask |= POLLOUT | POLLWRNORM;
1071 return mask;
1072}
1073
1074static int
1075write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1076{
1077 size_t bytes;
1078 __u32 buf[16];
1079 const char __user *p = buffer;
1080
1081 while (count > 0) {
1082 bytes = min(count, sizeof(buf));
1083 if (copy_from_user(&buf, p, bytes))
1084 return -EFAULT;
1085
1086 count -= bytes;
1087 p += bytes;
1088
1089 mix_pool_bytes(r, buf, bytes);
1090 cond_resched();
1091 }
1092
1093 return 0;
1094}
1095
1096static ssize_t random_write(struct file *file, const char __user *buffer,
1097 size_t count, loff_t *ppos)
1098{
1099 size_t ret;
1100
1101 ret = write_pool(&blocking_pool, buffer, count);
1102 if (ret)
1103 return ret;
1104 ret = write_pool(&nonblocking_pool, buffer, count);
1105 if (ret)
1106 return ret;
1107
1108 return (ssize_t)count;
1109}
1110
1111static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1112{
1113 int size, ent_count;
1114 int __user *p = (int __user *)arg;
1115 int retval;
1116
1117 switch (cmd) {
1118 case RNDGETENTCNT:
1119
1120 if (put_user(input_pool.entropy_count, p))
1121 return -EFAULT;
1122 return 0;
1123 case RNDADDTOENTCNT:
1124 if (!capable(CAP_SYS_ADMIN))
1125 return -EPERM;
1126 if (get_user(ent_count, p))
1127 return -EFAULT;
1128 credit_entropy_bits(&input_pool, ent_count);
1129 return 0;
1130 case RNDADDENTROPY:
1131 if (!capable(CAP_SYS_ADMIN))
1132 return -EPERM;
1133 if (get_user(ent_count, p++))
1134 return -EFAULT;
1135 if (ent_count < 0)
1136 return -EINVAL;
1137 if (get_user(size, p++))
1138 return -EFAULT;
1139 retval = write_pool(&input_pool, (const char __user *)p,
1140 size);
1141 if (retval < 0)
1142 return retval;
1143 credit_entropy_bits(&input_pool, ent_count);
1144 return 0;
1145 case RNDZAPENTCNT:
1146 case RNDCLEARPOOL:
1147
1148 if (!capable(CAP_SYS_ADMIN))
1149 return -EPERM;
1150 rand_initialize();
1151 return 0;
1152 default:
1153 return -EINVAL;
1154 }
1155}
1156
1157static int random_fasync(int fd, struct file *filp, int on)
1158{
1159 return fasync_helper(fd, filp, on, &fasync);
1160}
1161
1162const struct file_operations random_fops = {
1163 .read = random_read,
1164 .write = random_write,
1165 .poll = random_poll,
1166 .unlocked_ioctl = random_ioctl,
1167 .fasync = random_fasync,
1168 .llseek = noop_llseek,
1169};
1170
1171const struct file_operations urandom_fops = {
1172 .read = urandom_read,
1173 .write = random_write,
1174 .unlocked_ioctl = random_ioctl,
1175 .fasync = random_fasync,
1176 .llseek = noop_llseek,
1177};
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189void generate_random_uuid(unsigned char uuid_out[16])
1190{
1191 get_random_bytes(uuid_out, 16);
1192
1193 uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
1194
1195 uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
1196}
1197EXPORT_SYMBOL(generate_random_uuid);
1198
1199
1200
1201
1202
1203
1204
1205#ifdef CONFIG_SYSCTL
1206
1207#include <linux/sysctl.h>
1208
1209static int min_read_thresh = 8, min_write_thresh;
1210static int max_read_thresh = INPUT_POOL_WORDS * 32;
1211static int max_write_thresh = INPUT_POOL_WORDS * 32;
1212static char sysctl_bootid[16];
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223static int proc_do_uuid(ctl_table *table, int write,
1224 void __user *buffer, size_t *lenp, loff_t *ppos)
1225{
1226 ctl_table fake_table;
1227 unsigned char buf[64], tmp_uuid[16], *uuid;
1228
1229 uuid = table->data;
1230 if (!uuid) {
1231 uuid = tmp_uuid;
1232 uuid[8] = 0;
1233 }
1234 if (uuid[8] == 0)
1235 generate_random_uuid(uuid);
1236
1237 sprintf(buf, "%pU", uuid);
1238
1239 fake_table.data = buf;
1240 fake_table.maxlen = sizeof(buf);
1241
1242 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
1243}
1244
1245static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
1246ctl_table random_table[] = {
1247 {
1248 .procname = "poolsize",
1249 .data = &sysctl_poolsize,
1250 .maxlen = sizeof(int),
1251 .mode = 0444,
1252 .proc_handler = proc_dointvec,
1253 },
1254 {
1255 .procname = "entropy_avail",
1256 .maxlen = sizeof(int),
1257 .mode = 0444,
1258 .proc_handler = proc_dointvec,
1259 .data = &input_pool.entropy_count,
1260 },
1261 {
1262 .procname = "read_wakeup_threshold",
1263 .data = &random_read_wakeup_thresh,
1264 .maxlen = sizeof(int),
1265 .mode = 0644,
1266 .proc_handler = proc_dointvec_minmax,
1267 .extra1 = &min_read_thresh,
1268 .extra2 = &max_read_thresh,
1269 },
1270 {
1271 .procname = "write_wakeup_threshold",
1272 .data = &random_write_wakeup_thresh,
1273 .maxlen = sizeof(int),
1274 .mode = 0644,
1275 .proc_handler = proc_dointvec_minmax,
1276 .extra1 = &min_write_thresh,
1277 .extra2 = &max_write_thresh,
1278 },
1279 {
1280 .procname = "boot_id",
1281 .data = &sysctl_bootid,
1282 .maxlen = 16,
1283 .mode = 0444,
1284 .proc_handler = proc_do_uuid,
1285 },
1286 {
1287 .procname = "uuid",
1288 .maxlen = 16,
1289 .mode = 0444,
1290 .proc_handler = proc_do_uuid,
1291 },
1292 { }
1293};
1294#endif
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
1317#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
1318#define H(x, y, z) ((x) ^ (y) ^ (z))
1319
1320
1321
1322
1323
1324
1325
1326#define ROUND(f, a, b, c, d, x, s) \
1327 (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
1328#define K1 0
1329#define K2 013240474631UL
1330#define K3 015666365641UL
1331
1332#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1333
1334static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
1335{
1336 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
1337
1338
1339 ROUND(F, a, b, c, d, in[ 0] + K1, 3);
1340 ROUND(F, d, a, b, c, in[ 1] + K1, 7);
1341 ROUND(F, c, d, a, b, in[ 2] + K1, 11);
1342 ROUND(F, b, c, d, a, in[ 3] + K1, 19);
1343 ROUND(F, a, b, c, d, in[ 4] + K1, 3);
1344 ROUND(F, d, a, b, c, in[ 5] + K1, 7);
1345 ROUND(F, c, d, a, b, in[ 6] + K1, 11);
1346 ROUND(F, b, c, d, a, in[ 7] + K1, 19);
1347 ROUND(F, a, b, c, d, in[ 8] + K1, 3);
1348 ROUND(F, d, a, b, c, in[ 9] + K1, 7);
1349 ROUND(F, c, d, a, b, in[10] + K1, 11);
1350 ROUND(F, b, c, d, a, in[11] + K1, 19);
1351
1352
1353 ROUND(G, a, b, c, d, in[ 1] + K2, 3);
1354 ROUND(G, d, a, b, c, in[ 3] + K2, 5);
1355 ROUND(G, c, d, a, b, in[ 5] + K2, 9);
1356 ROUND(G, b, c, d, a, in[ 7] + K2, 13);
1357 ROUND(G, a, b, c, d, in[ 9] + K2, 3);
1358 ROUND(G, d, a, b, c, in[11] + K2, 5);
1359 ROUND(G, c, d, a, b, in[ 0] + K2, 9);
1360 ROUND(G, b, c, d, a, in[ 2] + K2, 13);
1361 ROUND(G, a, b, c, d, in[ 4] + K2, 3);
1362 ROUND(G, d, a, b, c, in[ 6] + K2, 5);
1363 ROUND(G, c, d, a, b, in[ 8] + K2, 9);
1364 ROUND(G, b, c, d, a, in[10] + K2, 13);
1365
1366
1367 ROUND(H, a, b, c, d, in[ 3] + K3, 3);
1368 ROUND(H, d, a, b, c, in[ 7] + K3, 9);
1369 ROUND(H, c, d, a, b, in[11] + K3, 11);
1370 ROUND(H, b, c, d, a, in[ 2] + K3, 15);
1371 ROUND(H, a, b, c, d, in[ 6] + K3, 3);
1372 ROUND(H, d, a, b, c, in[10] + K3, 9);
1373 ROUND(H, c, d, a, b, in[ 1] + K3, 11);
1374 ROUND(H, b, c, d, a, in[ 5] + K3, 15);
1375 ROUND(H, a, b, c, d, in[ 9] + K3, 3);
1376 ROUND(H, d, a, b, c, in[ 0] + K3, 9);
1377 ROUND(H, c, d, a, b, in[ 4] + K3, 11);
1378 ROUND(H, b, c, d, a, in[ 8] + K3, 15);
1379
1380 return buf[1] + b;
1381
1382}
1383#endif
1384
1385#undef ROUND
1386#undef F
1387#undef G
1388#undef H
1389#undef K1
1390#undef K2
1391#undef K3
1392
1393
1394#define REKEY_INTERVAL (300 * HZ)
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414#define COUNT_BITS 8
1415#define COUNT_MASK ((1 << COUNT_BITS) - 1)
1416#define HASH_BITS 24
1417#define HASH_MASK ((1 << HASH_BITS) - 1)
1418
1419static struct keydata {
1420 __u32 count;
1421 __u32 secret[12];
1422} ____cacheline_aligned ip_keydata[2];
1423
1424static unsigned int ip_cnt;
1425
1426static void rekey_seq_generator(struct work_struct *work);
1427
1428static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442static void rekey_seq_generator(struct work_struct *work)
1443{
1444 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
1445
1446 get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
1447 keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
1448 smp_wmb();
1449 ip_cnt++;
1450 schedule_delayed_work(&rekey_work,
1451 round_jiffies_relative(REKEY_INTERVAL));
1452}
1453
1454static inline struct keydata *get_keyptr(void)
1455{
1456 struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
1457
1458 smp_rmb();
1459
1460 return keyptr;
1461}
1462
1463static __init int seqgen_init(void)
1464{
1465 rekey_seq_generator(NULL);
1466 return 0;
1467}
1468late_initcall(seqgen_init);
1469
1470#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1471__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
1472 __be16 sport, __be16 dport)
1473{
1474 __u32 seq;
1475 __u32 hash[12];
1476 struct keydata *keyptr = get_keyptr();
1477
1478
1479
1480
1481
1482 memcpy(hash, saddr, 16);
1483 hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
1484 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1485
1486 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
1487 seq += keyptr->count;
1488
1489 seq += ktime_to_ns(ktime_get_real());
1490
1491 return seq;
1492}
1493EXPORT_SYMBOL(secure_tcpv6_sequence_number);
1494#endif
1495
1496
1497
1498
1499__u32 secure_ip_id(__be32 daddr)
1500{
1501 struct keydata *keyptr;
1502 __u32 hash[4];
1503
1504 keyptr = get_keyptr();
1505
1506
1507
1508
1509
1510
1511 hash[0] = (__force __u32)daddr;
1512 hash[1] = keyptr->secret[9];
1513 hash[2] = keyptr->secret[10];
1514 hash[3] = keyptr->secret[11];
1515
1516 return half_md4_transform(hash, keyptr->secret);
1517}
1518
1519#ifdef CONFIG_INET
1520
1521__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1522 __be16 sport, __be16 dport)
1523{
1524 __u32 seq;
1525 __u32 hash[4];
1526 struct keydata *keyptr = get_keyptr();
1527
1528
1529
1530
1531
1532
1533
1534 hash[0] = (__force u32)saddr;
1535 hash[1] = (__force u32)daddr;
1536 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
1537 hash[3] = keyptr->secret[11];
1538
1539 seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
1540 seq += keyptr->count;
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551 seq += ktime_to_ns(ktime_get_real()) >> 6;
1552
1553 return seq;
1554}
1555
1556
1557u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
1558{
1559 struct keydata *keyptr = get_keyptr();
1560 u32 hash[4];
1561
1562
1563
1564
1565
1566 hash[0] = (__force u32)saddr;
1567 hash[1] = (__force u32)daddr;
1568 hash[2] = (__force u32)dport ^ keyptr->secret[10];
1569 hash[3] = keyptr->secret[11];
1570
1571 return half_md4_transform(hash, keyptr->secret);
1572}
1573EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
1574
1575#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1576u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
1577 __be16 dport)
1578{
1579 struct keydata *keyptr = get_keyptr();
1580 u32 hash[12];
1581
1582 memcpy(hash, saddr, 16);
1583 hash[4] = (__force u32)dport;
1584 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1585
1586 return twothirdsMD4Transform((const __u32 *)daddr, hash);
1587}
1588#endif
1589
1590#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
1591
1592
1593
1594
1595u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
1596 __be16 sport, __be16 dport)
1597{
1598 u64 seq;
1599 __u32 hash[4];
1600 struct keydata *keyptr = get_keyptr();
1601
1602 hash[0] = (__force u32)saddr;
1603 hash[1] = (__force u32)daddr;
1604 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
1605 hash[3] = keyptr->secret[11];
1606
1607 seq = half_md4_transform(hash, keyptr->secret);
1608 seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
1609
1610 seq += ktime_to_ns(ktime_get_real());
1611 seq &= (1ull << 48) - 1;
1612
1613 return seq;
1614}
1615EXPORT_SYMBOL(secure_dccp_sequence_number);
1616#endif
1617
1618#endif
1619
1620
1621
1622
1623
1624
1625
1626
1627DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
1628unsigned int get_random_int(void)
1629{
1630 struct keydata *keyptr;
1631 __u32 *hash = get_cpu_var(get_random_int_hash);
1632 int ret;
1633
1634 keyptr = get_keyptr();
1635 hash[0] += current->pid + jiffies + get_cycles();
1636
1637 ret = half_md4_transform(hash, keyptr->secret);
1638 put_cpu_var(get_random_int_hash);
1639
1640 return ret;
1641}
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652unsigned long
1653randomize_range(unsigned long start, unsigned long end, unsigned long len)
1654{
1655 unsigned long range = end - len - start;
1656
1657 if (end <= start + len)
1658 return 0;
1659 return PAGE_ALIGN(get_random_int() % range + start);
1660}
1661