1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226#include <linux/utsname.h>
227#include <linux/module.h>
228#include <linux/kernel.h>
229#include <linux/major.h>
230#include <linux/string.h>
231#include <linux/fcntl.h>
232#include <linux/slab.h>
233#include <linux/random.h>
234#include <linux/poll.h>
235#include <linux/init.h>
236#include <linux/fs.h>
237#include <linux/genhd.h>
238#include <linux/interrupt.h>
239#include <linux/mm.h>
240#include <linux/spinlock.h>
241#include <linux/percpu.h>
242#include <linux/cryptohash.h>
243#include <linux/fips.h>
244
245#ifdef CONFIG_GENERIC_HARDIRQS
246# include <linux/irq.h>
247#endif
248
249#include <asm/processor.h>
250#include <asm/uaccess.h>
251#include <asm/irq.h>
252#include <asm/io.h>
253
254
255
256
257#define INPUT_POOL_WORDS 128
258#define OUTPUT_POOL_WORDS 32
259#define SEC_XFER_SIZE 512
260
261
262
263
264
265static int random_read_wakeup_thresh = 64;
266
267
268
269
270
271
272static int random_write_wakeup_thresh = 128;
273
274
275
276
277
278
279static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
280
281static DEFINE_PER_CPU(int, trickle_count);
282
283
284
285
286
287
288
289
290
291static struct poolinfo {
292 int poolwords;
293 int tap1, tap2, tap3, tap4, tap5;
294} poolinfo_table[] = {
295
296 { 128, 103, 76, 51, 25, 1 },
297
298 { 32, 26, 20, 14, 7, 1 },
299#if 0
300
301 { 2048, 1638, 1231, 819, 411, 1 },
302
303
304 { 1024, 817, 615, 412, 204, 1 },
305
306
307 { 1024, 819, 616, 410, 207, 2 },
308
309
310 { 512, 411, 308, 208, 104, 1 },
311
312
313 { 512, 409, 307, 206, 102, 2 },
314
315 { 512, 409, 309, 205, 103, 2 },
316
317
318 { 256, 205, 155, 101, 52, 1 },
319
320
321 { 128, 103, 78, 51, 27, 2 },
322
323
324 { 64, 52, 39, 26, 14, 1 },
325#endif
326};
327
328#define POOLBITS poolwords*32
329#define POOLBYTES poolwords*4
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
378static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
379static struct fasync_struct *fasync;
380
381#if 0
382static int debug;
383module_param(debug, bool, 0644);
384#define DEBUG_ENT(fmt, arg...) do { \
385 if (debug) \
386 printk(KERN_DEBUG "random %04d %04d %04d: " \
387 fmt,\
388 input_pool.entropy_count,\
389 blocking_pool.entropy_count,\
390 nonblocking_pool.entropy_count,\
391 ## arg); } while (0)
392#else
393#define DEBUG_ENT(fmt, arg...) do {} while (0)
394#endif
395
396
397
398
399
400
401
402
403struct entropy_store;
404struct entropy_store {
405
406 struct poolinfo *poolinfo;
407 __u32 *pool;
408 const char *name;
409 int limit;
410 struct entropy_store *pull;
411
412
413 spinlock_t lock;
414 unsigned add_ptr;
415 int entropy_count;
416 int input_rotate;
417 __u8 *last_data;
418};
419
420static __u32 input_pool_data[INPUT_POOL_WORDS];
421static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
422static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
423
424static struct entropy_store input_pool = {
425 .poolinfo = &poolinfo_table[0],
426 .name = "input",
427 .limit = 1,
428 .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
429 .pool = input_pool_data
430};
431
432static struct entropy_store blocking_pool = {
433 .poolinfo = &poolinfo_table[1],
434 .name = "blocking",
435 .limit = 1,
436 .pull = &input_pool,
437 .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
438 .pool = blocking_pool_data
439};
440
441static struct entropy_store nonblocking_pool = {
442 .poolinfo = &poolinfo_table[1],
443 .name = "nonblocking",
444 .pull = &input_pool,
445 .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
446 .pool = nonblocking_pool_data
447};
448
449
450
451
452
453
454
455
456
457
458
459static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
460 int nbytes, __u8 out[64])
461{
462 static __u32 const twist_table[8] = {
463 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
464 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
465 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
466 int input_rotate;
467 int wordmask = r->poolinfo->poolwords - 1;
468 const char *bytes = in;
469 __u32 w;
470 unsigned long flags;
471
472
473 tap1 = r->poolinfo->tap1;
474 tap2 = r->poolinfo->tap2;
475 tap3 = r->poolinfo->tap3;
476 tap4 = r->poolinfo->tap4;
477 tap5 = r->poolinfo->tap5;
478
479 spin_lock_irqsave(&r->lock, flags);
480 input_rotate = r->input_rotate;
481 i = r->add_ptr;
482
483
484 while (nbytes--) {
485 w = rol32(*bytes++, input_rotate & 31);
486 i = (i - 1) & wordmask;
487
488
489 w ^= r->pool[i];
490 w ^= r->pool[(i + tap1) & wordmask];
491 w ^= r->pool[(i + tap2) & wordmask];
492 w ^= r->pool[(i + tap3) & wordmask];
493 w ^= r->pool[(i + tap4) & wordmask];
494 w ^= r->pool[(i + tap5) & wordmask];
495
496
497 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
498
499
500
501
502
503
504
505 input_rotate += i ? 7 : 14;
506 }
507
508 r->input_rotate = input_rotate;
509 r->add_ptr = i;
510
511 if (out)
512 for (j = 0; j < 16; j++)
513 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
514
515 spin_unlock_irqrestore(&r->lock, flags);
516}
517
518static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
519{
520 mix_pool_bytes_extract(r, in, bytes, NULL);
521}
522
523
524
525
526static void credit_entropy_bits(struct entropy_store *r, int nbits)
527{
528 unsigned long flags;
529 int entropy_count;
530
531 if (!nbits)
532 return;
533
534 spin_lock_irqsave(&r->lock, flags);
535
536 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
537 entropy_count = r->entropy_count;
538 entropy_count += nbits;
539 if (entropy_count < 0) {
540 DEBUG_ENT("negative entropy/overflow\n");
541 entropy_count = 0;
542 } else if (entropy_count > r->poolinfo->POOLBITS)
543 entropy_count = r->poolinfo->POOLBITS;
544 r->entropy_count = entropy_count;
545
546
547 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
548 wake_up_interruptible(&random_read_wait);
549 kill_fasync(&fasync, SIGIO, POLL_IN);
550 }
551 spin_unlock_irqrestore(&r->lock, flags);
552}
553
554
555
556
557
558
559
560
561struct timer_rand_state {
562 cycles_t last_time;
563 long last_delta, last_delta2;
564 unsigned dont_count_entropy:1;
565};
566
567#ifndef CONFIG_GENERIC_HARDIRQS
568
569static struct timer_rand_state *irq_timer_state[NR_IRQS];
570
571static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
572{
573 return irq_timer_state[irq];
574}
575
576static void set_timer_rand_state(unsigned int irq,
577 struct timer_rand_state *state)
578{
579 irq_timer_state[irq] = state;
580}
581
582#else
583
584static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
585{
586 struct irq_desc *desc;
587
588 desc = irq_to_desc(irq);
589
590 return desc->timer_rand_state;
591}
592
593static void set_timer_rand_state(unsigned int irq,
594 struct timer_rand_state *state)
595{
596 struct irq_desc *desc;
597
598 desc = irq_to_desc(irq);
599
600 desc->timer_rand_state = state;
601}
602#endif
603
604static struct timer_rand_state input_timer_state;
605
606
607
608
609
610
611
612
613
614
615
616static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
617{
618 struct {
619 cycles_t cycles;
620 long jiffies;
621 unsigned num;
622 } sample;
623 long delta, delta2, delta3;
624
625 preempt_disable();
626
627 if (input_pool.entropy_count > trickle_thresh &&
628 (__get_cpu_var(trickle_count)++ & 0xfff))
629 goto out;
630
631 sample.jiffies = jiffies;
632 sample.cycles = get_cycles();
633 sample.num = num;
634 mix_pool_bytes(&input_pool, &sample, sizeof(sample));
635
636
637
638
639
640
641
642 if (!state->dont_count_entropy) {
643 delta = sample.jiffies - state->last_time;
644 state->last_time = sample.jiffies;
645
646 delta2 = delta - state->last_delta;
647 state->last_delta = delta;
648
649 delta3 = delta2 - state->last_delta2;
650 state->last_delta2 = delta2;
651
652 if (delta < 0)
653 delta = -delta;
654 if (delta2 < 0)
655 delta2 = -delta2;
656 if (delta3 < 0)
657 delta3 = -delta3;
658 if (delta > delta2)
659 delta = delta2;
660 if (delta > delta3)
661 delta = delta3;
662
663
664
665
666
667
668 credit_entropy_bits(&input_pool,
669 min_t(int, fls(delta>>1), 11));
670 }
671out:
672 preempt_enable();
673}
674
675void add_input_randomness(unsigned int type, unsigned int code,
676 unsigned int value)
677{
678 static unsigned char last_value;
679
680
681 if (value == last_value)
682 return;
683
684 DEBUG_ENT("input event\n");
685 last_value = value;
686 add_timer_randomness(&input_timer_state,
687 (type << 4) ^ code ^ (code >> 4) ^ value);
688}
689EXPORT_SYMBOL_GPL(add_input_randomness);
690
691void add_interrupt_randomness(int irq)
692{
693 struct timer_rand_state *state;
694
695 state = get_timer_rand_state(irq);
696
697 if (state == NULL)
698 return;
699
700 DEBUG_ENT("irq event %d\n", irq);
701 add_timer_randomness(state, 0x100 + irq);
702}
703
704#ifdef CONFIG_BLOCK
705void add_disk_randomness(struct gendisk *disk)
706{
707 if (!disk || !disk->random)
708 return;
709
710 DEBUG_ENT("disk event %d:%d\n",
711 MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
712
713 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
714}
715#endif
716
717#define EXTRACT_SIZE 10
718
719
720
721
722
723
724
725static ssize_t extract_entropy(struct entropy_store *r, void *buf,
726 size_t nbytes, int min, int rsvd);
727
728
729
730
731
732
733static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
734{
735 __u32 tmp[OUTPUT_POOL_WORDS];
736
737 if (r->pull && r->entropy_count < nbytes * 8 &&
738 r->entropy_count < r->poolinfo->POOLBITS) {
739
740 int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
741 int bytes = nbytes;
742
743
744 bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
745
746 bytes = min_t(int, bytes, sizeof(tmp));
747
748 DEBUG_ENT("going to reseed %s with %d bits "
749 "(%d of %d requested)\n",
750 r->name, bytes * 8, nbytes * 8, r->entropy_count);
751
752 bytes = extract_entropy(r->pull, tmp, bytes,
753 random_read_wakeup_thresh / 8, rsvd);
754 mix_pool_bytes(r, tmp, bytes);
755 credit_entropy_bits(r, bytes*8);
756 }
757}
758
759
760
761
762
763
764
765
766
767
768
769
770
771static size_t account(struct entropy_store *r, size_t nbytes, int min,
772 int reserved)
773{
774 unsigned long flags;
775
776
777 spin_lock_irqsave(&r->lock, flags);
778
779 BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
780 DEBUG_ENT("trying to extract %d bits from %s\n",
781 nbytes * 8, r->name);
782
783
784 if (r->entropy_count / 8 < min + reserved) {
785 nbytes = 0;
786 } else {
787
788 if (r->limit && nbytes + reserved >= r->entropy_count / 8)
789 nbytes = r->entropy_count/8 - reserved;
790
791 if (r->entropy_count / 8 >= nbytes + reserved)
792 r->entropy_count -= nbytes*8;
793 else
794 r->entropy_count = reserved;
795
796 if (r->entropy_count < random_write_wakeup_thresh) {
797 wake_up_interruptible(&random_write_wait);
798 kill_fasync(&fasync, SIGIO, POLL_OUT);
799 }
800 }
801
802 DEBUG_ENT("debiting %d entropy credits from %s%s\n",
803 nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
804
805 spin_unlock_irqrestore(&r->lock, flags);
806
807 return nbytes;
808}
809
810static void extract_buf(struct entropy_store *r, __u8 *out)
811{
812 int i;
813 __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
814 __u8 extract[64];
815
816
817 sha_init(hash);
818 for (i = 0; i < r->poolinfo->poolwords; i += 16)
819 sha_transform(hash, (__u8 *)(r->pool + i), workspace);
820
821
822
823
824
825
826
827
828
829
830 mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
831
832
833
834
835
836 sha_transform(hash, extract, workspace);
837 memset(extract, 0, sizeof(extract));
838 memset(workspace, 0, sizeof(workspace));
839
840
841
842
843
844
845 hash[0] ^= hash[3];
846 hash[1] ^= hash[4];
847 hash[2] ^= rol32(hash[2], 16);
848 memcpy(out, hash, EXTRACT_SIZE);
849 memset(hash, 0, sizeof(hash));
850}
851
852static ssize_t extract_entropy(struct entropy_store *r, void *buf,
853 size_t nbytes, int min, int reserved)
854{
855 ssize_t ret = 0, i;
856 __u8 tmp[EXTRACT_SIZE];
857 unsigned long flags;
858
859 xfer_secondary_pool(r, nbytes);
860 nbytes = account(r, nbytes, min, reserved);
861
862 while (nbytes) {
863 extract_buf(r, tmp);
864
865 if (r->last_data) {
866 spin_lock_irqsave(&r->lock, flags);
867 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
868 panic("Hardware RNG duplicated output!\n");
869 memcpy(r->last_data, tmp, EXTRACT_SIZE);
870 spin_unlock_irqrestore(&r->lock, flags);
871 }
872 i = min_t(int, nbytes, EXTRACT_SIZE);
873 memcpy(buf, tmp, i);
874 nbytes -= i;
875 buf += i;
876 ret += i;
877 }
878
879
880 memset(tmp, 0, sizeof(tmp));
881
882 return ret;
883}
884
885static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
886 size_t nbytes)
887{
888 ssize_t ret = 0, i;
889 __u8 tmp[EXTRACT_SIZE];
890
891 xfer_secondary_pool(r, nbytes);
892 nbytes = account(r, nbytes, 0, 0);
893
894 while (nbytes) {
895 if (need_resched()) {
896 if (signal_pending(current)) {
897 if (ret == 0)
898 ret = -ERESTARTSYS;
899 break;
900 }
901 schedule();
902 }
903
904 extract_buf(r, tmp);
905 i = min_t(int, nbytes, EXTRACT_SIZE);
906 if (copy_to_user(buf, tmp, i)) {
907 ret = -EFAULT;
908 break;
909 }
910
911 nbytes -= i;
912 buf += i;
913 ret += i;
914 }
915
916
917 memset(tmp, 0, sizeof(tmp));
918
919 return ret;
920}
921
922
923
924
925
926
927void get_random_bytes(void *buf, int nbytes)
928{
929 extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
930}
931EXPORT_SYMBOL(get_random_bytes);
932
933
934
935
936
937
938
939
940
941
942static void init_std_data(struct entropy_store *r)
943{
944 ktime_t now;
945 unsigned long flags;
946
947 spin_lock_irqsave(&r->lock, flags);
948 r->entropy_count = 0;
949 spin_unlock_irqrestore(&r->lock, flags);
950
951 now = ktime_get_real();
952 mix_pool_bytes(r, &now, sizeof(now));
953 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
954
955 if (fips_enabled)
956 r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
957}
958
959static int rand_initialize(void)
960{
961 init_std_data(&input_pool);
962 init_std_data(&blocking_pool);
963 init_std_data(&nonblocking_pool);
964 return 0;
965}
966module_init(rand_initialize);
967
968void rand_initialize_irq(int irq)
969{
970 struct timer_rand_state *state;
971
972 state = get_timer_rand_state(irq);
973
974 if (state)
975 return;
976
977
978
979
980
981 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
982 if (state)
983 set_timer_rand_state(irq, state);
984}
985
986#ifdef CONFIG_BLOCK
987void rand_initialize_disk(struct gendisk *disk)
988{
989 struct timer_rand_state *state;
990
991
992
993
994
995 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
996 if (state)
997 disk->random = state;
998}
999#endif
1000
1001static ssize_t
1002random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1003{
1004 ssize_t n, retval = 0, count = 0;
1005
1006 if (nbytes == 0)
1007 return 0;
1008
1009 while (nbytes > 0) {
1010 n = nbytes;
1011 if (n > SEC_XFER_SIZE)
1012 n = SEC_XFER_SIZE;
1013
1014 DEBUG_ENT("reading %d bits\n", n*8);
1015
1016 n = extract_entropy_user(&blocking_pool, buf, n);
1017
1018 DEBUG_ENT("read got %d bits (%d still needed)\n",
1019 n*8, (nbytes-n)*8);
1020
1021 if (n == 0) {
1022 if (file->f_flags & O_NONBLOCK) {
1023 retval = -EAGAIN;
1024 break;
1025 }
1026
1027 DEBUG_ENT("sleeping?\n");
1028
1029 wait_event_interruptible(random_read_wait,
1030 input_pool.entropy_count >=
1031 random_read_wakeup_thresh);
1032
1033 DEBUG_ENT("awake\n");
1034
1035 if (signal_pending(current)) {
1036 retval = -ERESTARTSYS;
1037 break;
1038 }
1039
1040 continue;
1041 }
1042
1043 if (n < 0) {
1044 retval = n;
1045 break;
1046 }
1047 count += n;
1048 buf += n;
1049 nbytes -= n;
1050 break;
1051
1052 }
1053
1054
1055
1056
1057 if (count)
1058 file_accessed(file);
1059
1060 return (count ? count : retval);
1061}
1062
1063static ssize_t
1064urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1065{
1066 return extract_entropy_user(&nonblocking_pool, buf, nbytes);
1067}
1068
1069static unsigned int
1070random_poll(struct file *file, poll_table * wait)
1071{
1072 unsigned int mask;
1073
1074 poll_wait(file, &random_read_wait, wait);
1075 poll_wait(file, &random_write_wait, wait);
1076 mask = 0;
1077 if (input_pool.entropy_count >= random_read_wakeup_thresh)
1078 mask |= POLLIN | POLLRDNORM;
1079 if (input_pool.entropy_count < random_write_wakeup_thresh)
1080 mask |= POLLOUT | POLLWRNORM;
1081 return mask;
1082}
1083
1084static int
1085write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1086{
1087 size_t bytes;
1088 __u32 buf[16];
1089 const char __user *p = buffer;
1090
1091 while (count > 0) {
1092 bytes = min(count, sizeof(buf));
1093 if (copy_from_user(&buf, p, bytes))
1094 return -EFAULT;
1095
1096 count -= bytes;
1097 p += bytes;
1098
1099 mix_pool_bytes(r, buf, bytes);
1100 cond_resched();
1101 }
1102
1103 return 0;
1104}
1105
1106static ssize_t random_write(struct file *file, const char __user *buffer,
1107 size_t count, loff_t *ppos)
1108{
1109 size_t ret;
1110 struct inode *inode = file->f_path.dentry->d_inode;
1111
1112 ret = write_pool(&blocking_pool, buffer, count);
1113 if (ret)
1114 return ret;
1115 ret = write_pool(&nonblocking_pool, buffer, count);
1116 if (ret)
1117 return ret;
1118
1119 inode->i_mtime = current_fs_time(inode->i_sb);
1120 mark_inode_dirty(inode);
1121 return (ssize_t)count;
1122}
1123
1124static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1125{
1126 int size, ent_count;
1127 int __user *p = (int __user *)arg;
1128 int retval;
1129
1130 switch (cmd) {
1131 case RNDGETENTCNT:
1132
1133 if (put_user(input_pool.entropy_count, p))
1134 return -EFAULT;
1135 return 0;
1136 case RNDADDTOENTCNT:
1137 if (!capable(CAP_SYS_ADMIN))
1138 return -EPERM;
1139 if (get_user(ent_count, p))
1140 return -EFAULT;
1141 credit_entropy_bits(&input_pool, ent_count);
1142 return 0;
1143 case RNDADDENTROPY:
1144 if (!capable(CAP_SYS_ADMIN))
1145 return -EPERM;
1146 if (get_user(ent_count, p++))
1147 return -EFAULT;
1148 if (ent_count < 0)
1149 return -EINVAL;
1150 if (get_user(size, p++))
1151 return -EFAULT;
1152 retval = write_pool(&input_pool, (const char __user *)p,
1153 size);
1154 if (retval < 0)
1155 return retval;
1156 credit_entropy_bits(&input_pool, ent_count);
1157 return 0;
1158 case RNDZAPENTCNT:
1159 case RNDCLEARPOOL:
1160
1161 if (!capable(CAP_SYS_ADMIN))
1162 return -EPERM;
1163 rand_initialize();
1164 return 0;
1165 default:
1166 return -EINVAL;
1167 }
1168}
1169
1170static int random_fasync(int fd, struct file *filp, int on)
1171{
1172 return fasync_helper(fd, filp, on, &fasync);
1173}
1174
1175const struct file_operations random_fops = {
1176 .read = random_read,
1177 .write = random_write,
1178 .poll = random_poll,
1179 .unlocked_ioctl = random_ioctl,
1180 .fasync = random_fasync,
1181};
1182
1183const struct file_operations urandom_fops = {
1184 .read = urandom_read,
1185 .write = random_write,
1186 .unlocked_ioctl = random_ioctl,
1187 .fasync = random_fasync,
1188};
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200void generate_random_uuid(unsigned char uuid_out[16])
1201{
1202 get_random_bytes(uuid_out, 16);
1203
1204 uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
1205
1206 uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
1207}
1208EXPORT_SYMBOL(generate_random_uuid);
1209
1210
1211
1212
1213
1214
1215
1216#ifdef CONFIG_SYSCTL
1217
1218#include <linux/sysctl.h>
1219
1220static int min_read_thresh = 8, min_write_thresh;
1221static int max_read_thresh = INPUT_POOL_WORDS * 32;
1222static int max_write_thresh = INPUT_POOL_WORDS * 32;
1223static char sysctl_bootid[16];
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234static int proc_do_uuid(ctl_table *table, int write,
1235 void __user *buffer, size_t *lenp, loff_t *ppos)
1236{
1237 ctl_table fake_table;
1238 unsigned char buf[64], tmp_uuid[16], *uuid;
1239
1240 uuid = table->data;
1241 if (!uuid) {
1242 uuid = tmp_uuid;
1243 uuid[8] = 0;
1244 }
1245 if (uuid[8] == 0)
1246 generate_random_uuid(uuid);
1247
1248 sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-"
1249 "%02x%02x%02x%02x%02x%02x",
1250 uuid[0], uuid[1], uuid[2], uuid[3],
1251 uuid[4], uuid[5], uuid[6], uuid[7],
1252 uuid[8], uuid[9], uuid[10], uuid[11],
1253 uuid[12], uuid[13], uuid[14], uuid[15]);
1254 fake_table.data = buf;
1255 fake_table.maxlen = sizeof(buf);
1256
1257 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
1258}
1259
1260static int uuid_strategy(ctl_table *table,
1261 void __user *oldval, size_t __user *oldlenp,
1262 void __user *newval, size_t newlen)
1263{
1264 unsigned char tmp_uuid[16], *uuid;
1265 unsigned int len;
1266
1267 if (!oldval || !oldlenp)
1268 return 1;
1269
1270 uuid = table->data;
1271 if (!uuid) {
1272 uuid = tmp_uuid;
1273 uuid[8] = 0;
1274 }
1275 if (uuid[8] == 0)
1276 generate_random_uuid(uuid);
1277
1278 if (get_user(len, oldlenp))
1279 return -EFAULT;
1280 if (len) {
1281 if (len > 16)
1282 len = 16;
1283 if (copy_to_user(oldval, uuid, len) ||
1284 put_user(len, oldlenp))
1285 return -EFAULT;
1286 }
1287 return 1;
1288}
1289
1290static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
1291ctl_table random_table[] = {
1292 {
1293 .ctl_name = RANDOM_POOLSIZE,
1294 .procname = "poolsize",
1295 .data = &sysctl_poolsize,
1296 .maxlen = sizeof(int),
1297 .mode = 0444,
1298 .proc_handler = &proc_dointvec,
1299 },
1300 {
1301 .ctl_name = RANDOM_ENTROPY_COUNT,
1302 .procname = "entropy_avail",
1303 .maxlen = sizeof(int),
1304 .mode = 0444,
1305 .proc_handler = &proc_dointvec,
1306 .data = &input_pool.entropy_count,
1307 },
1308 {
1309 .ctl_name = RANDOM_READ_THRESH,
1310 .procname = "read_wakeup_threshold",
1311 .data = &random_read_wakeup_thresh,
1312 .maxlen = sizeof(int),
1313 .mode = 0644,
1314 .proc_handler = &proc_dointvec_minmax,
1315 .strategy = &sysctl_intvec,
1316 .extra1 = &min_read_thresh,
1317 .extra2 = &max_read_thresh,
1318 },
1319 {
1320 .ctl_name = RANDOM_WRITE_THRESH,
1321 .procname = "write_wakeup_threshold",
1322 .data = &random_write_wakeup_thresh,
1323 .maxlen = sizeof(int),
1324 .mode = 0644,
1325 .proc_handler = &proc_dointvec_minmax,
1326 .strategy = &sysctl_intvec,
1327 .extra1 = &min_write_thresh,
1328 .extra2 = &max_write_thresh,
1329 },
1330 {
1331 .ctl_name = RANDOM_BOOT_ID,
1332 .procname = "boot_id",
1333 .data = &sysctl_bootid,
1334 .maxlen = 16,
1335 .mode = 0444,
1336 .proc_handler = &proc_do_uuid,
1337 .strategy = &uuid_strategy,
1338 },
1339 {
1340 .ctl_name = RANDOM_UUID,
1341 .procname = "uuid",
1342 .maxlen = 16,
1343 .mode = 0444,
1344 .proc_handler = &proc_do_uuid,
1345 .strategy = &uuid_strategy,
1346 },
1347 { .ctl_name = 0 }
1348};
1349#endif
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
1372#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
1373#define H(x, y, z) ((x) ^ (y) ^ (z))
1374
1375
1376
1377
1378
1379
1380
1381#define ROUND(f, a, b, c, d, x, s) \
1382 (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
1383#define K1 0
1384#define K2 013240474631UL
1385#define K3 015666365641UL
1386
1387#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1388
1389static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
1390{
1391 __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
1392
1393
1394 ROUND(F, a, b, c, d, in[ 0] + K1, 3);
1395 ROUND(F, d, a, b, c, in[ 1] + K1, 7);
1396 ROUND(F, c, d, a, b, in[ 2] + K1, 11);
1397 ROUND(F, b, c, d, a, in[ 3] + K1, 19);
1398 ROUND(F, a, b, c, d, in[ 4] + K1, 3);
1399 ROUND(F, d, a, b, c, in[ 5] + K1, 7);
1400 ROUND(F, c, d, a, b, in[ 6] + K1, 11);
1401 ROUND(F, b, c, d, a, in[ 7] + K1, 19);
1402 ROUND(F, a, b, c, d, in[ 8] + K1, 3);
1403 ROUND(F, d, a, b, c, in[ 9] + K1, 7);
1404 ROUND(F, c, d, a, b, in[10] + K1, 11);
1405 ROUND(F, b, c, d, a, in[11] + K1, 19);
1406
1407
1408 ROUND(G, a, b, c, d, in[ 1] + K2, 3);
1409 ROUND(G, d, a, b, c, in[ 3] + K2, 5);
1410 ROUND(G, c, d, a, b, in[ 5] + K2, 9);
1411 ROUND(G, b, c, d, a, in[ 7] + K2, 13);
1412 ROUND(G, a, b, c, d, in[ 9] + K2, 3);
1413 ROUND(G, d, a, b, c, in[11] + K2, 5);
1414 ROUND(G, c, d, a, b, in[ 0] + K2, 9);
1415 ROUND(G, b, c, d, a, in[ 2] + K2, 13);
1416 ROUND(G, a, b, c, d, in[ 4] + K2, 3);
1417 ROUND(G, d, a, b, c, in[ 6] + K2, 5);
1418 ROUND(G, c, d, a, b, in[ 8] + K2, 9);
1419 ROUND(G, b, c, d, a, in[10] + K2, 13);
1420
1421
1422 ROUND(H, a, b, c, d, in[ 3] + K3, 3);
1423 ROUND(H, d, a, b, c, in[ 7] + K3, 9);
1424 ROUND(H, c, d, a, b, in[11] + K3, 11);
1425 ROUND(H, b, c, d, a, in[ 2] + K3, 15);
1426 ROUND(H, a, b, c, d, in[ 6] + K3, 3);
1427 ROUND(H, d, a, b, c, in[10] + K3, 9);
1428 ROUND(H, c, d, a, b, in[ 1] + K3, 11);
1429 ROUND(H, b, c, d, a, in[ 5] + K3, 15);
1430 ROUND(H, a, b, c, d, in[ 9] + K3, 3);
1431 ROUND(H, d, a, b, c, in[ 0] + K3, 9);
1432 ROUND(H, c, d, a, b, in[ 4] + K3, 11);
1433 ROUND(H, b, c, d, a, in[ 8] + K3, 15);
1434
1435 return buf[1] + b;
1436
1437}
1438#endif
1439
1440#undef ROUND
1441#undef F
1442#undef G
1443#undef H
1444#undef K1
1445#undef K2
1446#undef K3
1447
1448
1449#define REKEY_INTERVAL (300 * HZ)
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469#define COUNT_BITS 8
1470#define COUNT_MASK ((1 << COUNT_BITS) - 1)
1471#define HASH_BITS 24
1472#define HASH_MASK ((1 << HASH_BITS) - 1)
1473
1474static struct keydata {
1475 __u32 count;
1476 __u32 secret[12];
1477} ____cacheline_aligned ip_keydata[2];
1478
1479static unsigned int ip_cnt;
1480
1481static void rekey_seq_generator(struct work_struct *work);
1482
1483static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497static void rekey_seq_generator(struct work_struct *work)
1498{
1499 struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
1500
1501 get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
1502 keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
1503 smp_wmb();
1504 ip_cnt++;
1505 schedule_delayed_work(&rekey_work,
1506 round_jiffies_relative(REKEY_INTERVAL));
1507}
1508
1509static inline struct keydata *get_keyptr(void)
1510{
1511 struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
1512
1513 smp_rmb();
1514
1515 return keyptr;
1516}
1517
1518static __init int seqgen_init(void)
1519{
1520 rekey_seq_generator(NULL);
1521 return 0;
1522}
1523late_initcall(seqgen_init);
1524
1525#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1526__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
1527 __be16 sport, __be16 dport)
1528{
1529 __u32 seq;
1530 __u32 hash[12];
1531 struct keydata *keyptr = get_keyptr();
1532
1533
1534
1535
1536
1537 memcpy(hash, saddr, 16);
1538 hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
1539 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1540
1541 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
1542 seq += keyptr->count;
1543
1544 seq += ktime_to_ns(ktime_get_real());
1545
1546 return seq;
1547}
1548EXPORT_SYMBOL(secure_tcpv6_sequence_number);
1549#endif
1550
1551
1552
1553
1554__u32 secure_ip_id(__be32 daddr)
1555{
1556 struct keydata *keyptr;
1557 __u32 hash[4];
1558
1559 keyptr = get_keyptr();
1560
1561
1562
1563
1564
1565
1566 hash[0] = (__force __u32)daddr;
1567 hash[1] = keyptr->secret[9];
1568 hash[2] = keyptr->secret[10];
1569 hash[3] = keyptr->secret[11];
1570
1571 return half_md4_transform(hash, keyptr->secret);
1572}
1573
1574#ifdef CONFIG_INET
1575
1576__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
1577 __be16 sport, __be16 dport)
1578{
1579 __u32 seq;
1580 __u32 hash[4];
1581 struct keydata *keyptr = get_keyptr();
1582
1583
1584
1585
1586
1587
1588
1589 hash[0] = (__force u32)saddr;
1590 hash[1] = (__force u32)daddr;
1591 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
1592 hash[3] = keyptr->secret[11];
1593
1594 seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
1595 seq += keyptr->count;
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606 seq += ktime_to_ns(ktime_get_real()) >> 6;
1607
1608 return seq;
1609}
1610
1611
1612u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
1613{
1614 struct keydata *keyptr = get_keyptr();
1615 u32 hash[4];
1616
1617
1618
1619
1620
1621 hash[0] = (__force u32)saddr;
1622 hash[1] = (__force u32)daddr;
1623 hash[2] = (__force u32)dport ^ keyptr->secret[10];
1624 hash[3] = keyptr->secret[11];
1625
1626 return half_md4_transform(hash, keyptr->secret);
1627}
1628EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
1629
1630#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1631u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
1632 __be16 dport)
1633{
1634 struct keydata *keyptr = get_keyptr();
1635 u32 hash[12];
1636
1637 memcpy(hash, saddr, 16);
1638 hash[4] = (__force u32)dport;
1639 memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
1640
1641 return twothirdsMD4Transform((const __u32 *)daddr, hash);
1642}
1643#endif
1644
1645#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
1646
1647
1648
1649
1650u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
1651 __be16 sport, __be16 dport)
1652{
1653 u64 seq;
1654 __u32 hash[4];
1655 struct keydata *keyptr = get_keyptr();
1656
1657 hash[0] = (__force u32)saddr;
1658 hash[1] = (__force u32)daddr;
1659 hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
1660 hash[3] = keyptr->secret[11];
1661
1662 seq = half_md4_transform(hash, keyptr->secret);
1663 seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
1664
1665 seq += ktime_to_ns(ktime_get_real());
1666 seq &= (1ull << 48) - 1;
1667
1668 return seq;
1669}
1670EXPORT_SYMBOL(secure_dccp_sequence_number);
1671#endif
1672
1673#endif
1674
1675
1676
1677
1678
1679
1680
1681
1682DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
1683unsigned int get_random_int(void)
1684{
1685 struct keydata *keyptr;
1686 __u32 *hash = get_cpu_var(get_random_int_hash);
1687 int ret;
1688
1689 keyptr = get_keyptr();
1690 hash[0] += current->pid + jiffies + get_cycles();
1691
1692 ret = half_md4_transform(hash, keyptr->secret);
1693 put_cpu_var(get_random_int_hash);
1694
1695 return ret;
1696}
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707unsigned long
1708randomize_range(unsigned long start, unsigned long end, unsigned long len)
1709{
1710 unsigned long range = end - len - start;
1711
1712 if (end <= start + len)
1713 return 0;
1714 return PAGE_ALIGN(get_random_int() % range + start);
1715}
1716