linux/lib/random32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * This is a maximally equidistributed combined Tausworthe generator
   4 * based on code from GNU Scientific Library 1.5 (30 Jun 2004)
   5 *
   6 * lfsr113 version:
   7 *
   8 * x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
   9 *
  10 * s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n <<  6) ^ s1_n) >> 13))
  11 * s2_{n+1} = (((s2_n & 4294967288) <<  2) ^ (((s2_n <<  2) ^ s2_n) >> 27))
  12 * s3_{n+1} = (((s3_n & 4294967280) <<  7) ^ (((s3_n << 13) ^ s3_n) >> 21))
  13 * s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n <<  3) ^ s4_n) >> 12))
  14 *
  15 * The period of this generator is about 2^113 (see erratum paper).
  16 *
  17 * From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
  18 * Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
  19 * http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
  20 * ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
  21 *
  22 * There is an erratum in the paper "Tables of Maximally Equidistributed
  23 * Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
  24 * 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
  25 *
  26 *      ... the k_j most significant bits of z_j must be non-zero,
  27 *      for each j. (Note: this restriction also applies to the
  28 *      computer code given in [4], but was mistakenly not mentioned
  29 *      in that paper.)
  30 *
  31 * This affects the seeding procedure by imposing the requirement
  32 * s1 > 1, s2 > 7, s3 > 15, s4 > 127.
  33 */
  34
  35#include <linux/types.h>
  36#include <linux/percpu.h>
  37#include <linux/export.h>
  38#include <linux/jiffies.h>
  39#include <linux/random.h>
  40#include <linux/sched.h>
  41#include <linux/bitops.h>
  42#include <asm/unaligned.h>
  43#include <trace/events/random.h>
  44
  45/**
  46 *      prandom_u32_state - seeded pseudo-random number generator.
  47 *      @state: pointer to state structure holding seeded state.
  48 *
  49 *      This is used for pseudo-randomness with no outside seeding.
  50 *      For more random results, use prandom_u32().
  51 */
  52u32 prandom_u32_state(struct rnd_state *state)
  53{
  54#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
  55        state->s1 = TAUSWORTHE(state->s1,  6U, 13U, 4294967294U, 18U);
  56        state->s2 = TAUSWORTHE(state->s2,  2U, 27U, 4294967288U,  2U);
  57        state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U,  7U);
  58        state->s4 = TAUSWORTHE(state->s4,  3U, 12U, 4294967168U, 13U);
  59
  60        return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
  61}
  62EXPORT_SYMBOL(prandom_u32_state);
  63
  64/**
  65 *      prandom_bytes_state - get the requested number of pseudo-random bytes
  66 *
  67 *      @state: pointer to state structure holding seeded state.
  68 *      @buf: where to copy the pseudo-random bytes to
  69 *      @bytes: the requested number of bytes
  70 *
  71 *      This is used for pseudo-randomness with no outside seeding.
  72 *      For more random results, use prandom_bytes().
  73 */
  74void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
  75{
  76        u8 *ptr = buf;
  77
  78        while (bytes >= sizeof(u32)) {
  79                put_unaligned(prandom_u32_state(state), (u32 *) ptr);
  80                ptr += sizeof(u32);
  81                bytes -= sizeof(u32);
  82        }
  83
  84        if (bytes > 0) {
  85                u32 rem = prandom_u32_state(state);
  86                do {
  87                        *ptr++ = (u8) rem;
  88                        bytes--;
  89                        rem >>= BITS_PER_BYTE;
  90                } while (bytes > 0);
  91        }
  92}
  93EXPORT_SYMBOL(prandom_bytes_state);
  94
  95static void prandom_warmup(struct rnd_state *state)
  96{
  97        /* Calling RNG ten times to satisfy recurrence condition */
  98        prandom_u32_state(state);
  99        prandom_u32_state(state);
 100        prandom_u32_state(state);
 101        prandom_u32_state(state);
 102        prandom_u32_state(state);
 103        prandom_u32_state(state);
 104        prandom_u32_state(state);
 105        prandom_u32_state(state);
 106        prandom_u32_state(state);
 107        prandom_u32_state(state);
 108}
 109
 110void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
 111{
 112        int i;
 113
 114        for_each_possible_cpu(i) {
 115                struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
 116                u32 seeds[4];
 117
 118                get_random_bytes(&seeds, sizeof(seeds));
 119                state->s1 = __seed(seeds[0],   2U);
 120                state->s2 = __seed(seeds[1],   8U);
 121                state->s3 = __seed(seeds[2],  16U);
 122                state->s4 = __seed(seeds[3], 128U);
 123
 124                prandom_warmup(state);
 125        }
 126}
 127EXPORT_SYMBOL(prandom_seed_full_state);
 128
 129#ifdef CONFIG_RANDOM32_SELFTEST
 130static struct prandom_test1 {
 131        u32 seed;
 132        u32 result;
 133} test1[] = {
 134        { 1U, 3484351685U },
 135        { 2U, 2623130059U },
 136        { 3U, 3125133893U },
 137        { 4U,  984847254U },
 138};
 139
 140static struct prandom_test2 {
 141        u32 seed;
 142        u32 iteration;
 143        u32 result;
 144} test2[] = {
 145        /* Test cases against taus113 from GSL library. */
 146        {  931557656U, 959U, 2975593782U },
 147        { 1339693295U, 876U, 3887776532U },
 148        { 1545556285U, 961U, 1615538833U },
 149        {  601730776U, 723U, 1776162651U },
 150        { 1027516047U, 687U,  511983079U },
 151        {  416526298U, 700U,  916156552U },
 152        { 1395522032U, 652U, 2222063676U },
 153        {  366221443U, 617U, 2992857763U },
 154        { 1539836965U, 714U, 3783265725U },
 155        {  556206671U, 994U,  799626459U },
 156        {  684907218U, 799U,  367789491U },
 157        { 2121230701U, 931U, 2115467001U },
 158        { 1668516451U, 644U, 3620590685U },
 159        {  768046066U, 883U, 2034077390U },
 160        { 1989159136U, 833U, 1195767305U },
 161        {  536585145U, 996U, 3577259204U },
 162        { 1008129373U, 642U, 1478080776U },
 163        { 1740775604U, 939U, 1264980372U },
 164        { 1967883163U, 508U,   10734624U },
 165        { 1923019697U, 730U, 3821419629U },
 166        {  442079932U, 560U, 3440032343U },
 167        { 1961302714U, 845U,  841962572U },
 168        { 2030205964U, 962U, 1325144227U },
 169        { 1160407529U, 507U,  240940858U },
 170        {  635482502U, 779U, 4200489746U },
 171        { 1252788931U, 699U,  867195434U },
 172        { 1961817131U, 719U,  668237657U },
 173        { 1071468216U, 983U,  917876630U },
 174        { 1281848367U, 932U, 1003100039U },
 175        {  582537119U, 780U, 1127273778U },
 176        { 1973672777U, 853U, 1071368872U },
 177        { 1896756996U, 762U, 1127851055U },
 178        {  847917054U, 500U, 1717499075U },
 179        { 1240520510U, 951U, 2849576657U },
 180        { 1685071682U, 567U, 1961810396U },
 181        { 1516232129U, 557U,    3173877U },
 182        { 1208118903U, 612U, 1613145022U },
 183        { 1817269927U, 693U, 4279122573U },
 184        { 1510091701U, 717U,  638191229U },
 185        {  365916850U, 807U,  600424314U },
 186        {  399324359U, 702U, 1803598116U },
 187        { 1318480274U, 779U, 2074237022U },
 188        {  697758115U, 840U, 1483639402U },
 189        { 1696507773U, 840U,  577415447U },
 190        { 2081979121U, 981U, 3041486449U },
 191        {  955646687U, 742U, 3846494357U },
 192        { 1250683506U, 749U,  836419859U },
 193        {  595003102U, 534U,  366794109U },
 194        {   47485338U, 558U, 3521120834U },
 195        {  619433479U, 610U, 3991783875U },
 196        {  704096520U, 518U, 4139493852U },
 197        { 1712224984U, 606U, 2393312003U },
 198        { 1318233152U, 922U, 3880361134U },
 199        {  855572992U, 761U, 1472974787U },
 200        {   64721421U, 703U,  683860550U },
 201        {  678931758U, 840U,  380616043U },
 202        {  692711973U, 778U, 1382361947U },
 203        {  677703619U, 530U, 2826914161U },
 204        {   92393223U, 586U, 1522128471U },
 205        { 1222592920U, 743U, 3466726667U },
 206        {  358288986U, 695U, 1091956998U },
 207        { 1935056945U, 958U,  514864477U },
 208        {  735675993U, 990U, 1294239989U },
 209        { 1560089402U, 897U, 2238551287U },
 210        {   70616361U, 829U,   22483098U },
 211        {  368234700U, 731U, 2913875084U },
 212        {   20221190U, 879U, 1564152970U },
 213        {  539444654U, 682U, 1835141259U },
 214        { 1314987297U, 840U, 1801114136U },
 215        { 2019295544U, 645U, 3286438930U },
 216        {  469023838U, 716U, 1637918202U },
 217        { 1843754496U, 653U, 2562092152U },
 218        {  400672036U, 809U, 4264212785U },
 219        {  404722249U, 965U, 2704116999U },
 220        {  600702209U, 758U,  584979986U },
 221        {  519953954U, 667U, 2574436237U },
 222        { 1658071126U, 694U, 2214569490U },
 223        {  420480037U, 749U, 3430010866U },
 224        {  690103647U, 969U, 3700758083U },
 225        { 1029424799U, 937U, 3787746841U },
 226        { 2012608669U, 506U, 3362628973U },
 227        { 1535432887U, 998U,   42610943U },
 228        { 1330635533U, 857U, 3040806504U },
 229        { 1223800550U, 539U, 3954229517U },
 230        { 1322411537U, 680U, 3223250324U },
 231        { 1877847898U, 945U, 2915147143U },
 232        { 1646356099U, 874U,  965988280U },
 233        {  805687536U, 744U, 4032277920U },
 234        { 1948093210U, 633U, 1346597684U },
 235        {  392609744U, 783U, 1636083295U },
 236        {  690241304U, 770U, 1201031298U },
 237        { 1360302965U, 696U, 1665394461U },
 238        { 1220090946U, 780U, 1316922812U },
 239        {  447092251U, 500U, 3438743375U },
 240        { 1613868791U, 592U,  828546883U },
 241        {  523430951U, 548U, 2552392304U },
 242        {  726692899U, 810U, 1656872867U },
 243        { 1364340021U, 836U, 3710513486U },
 244        { 1986257729U, 931U,  935013962U },
 245        {  407983964U, 921U,  728767059U },
 246};
 247
 248static u32 __extract_hwseed(void)
 249{
 250        unsigned int val = 0;
 251
 252        (void)(arch_get_random_seed_int(&val) ||
 253               arch_get_random_int(&val));
 254
 255        return val;
 256}
 257
 258static void prandom_seed_early(struct rnd_state *state, u32 seed,
 259                               bool mix_with_hwseed)
 260{
 261#define LCG(x)   ((x) * 69069U) /* super-duper LCG */
 262#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
 263        state->s1 = __seed(HWSEED() ^ LCG(seed),        2U);
 264        state->s2 = __seed(HWSEED() ^ LCG(state->s1),   8U);
 265        state->s3 = __seed(HWSEED() ^ LCG(state->s2),  16U);
 266        state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
 267}
 268
 269static int __init prandom_state_selftest(void)
 270{
 271        int i, j, errors = 0, runs = 0;
 272        bool error = false;
 273
 274        for (i = 0; i < ARRAY_SIZE(test1); i++) {
 275                struct rnd_state state;
 276
 277                prandom_seed_early(&state, test1[i].seed, false);
 278                prandom_warmup(&state);
 279
 280                if (test1[i].result != prandom_u32_state(&state))
 281                        error = true;
 282        }
 283
 284        if (error)
 285                pr_warn("prandom: seed boundary self test failed\n");
 286        else
 287                pr_info("prandom: seed boundary self test passed\n");
 288
 289        for (i = 0; i < ARRAY_SIZE(test2); i++) {
 290                struct rnd_state state;
 291
 292                prandom_seed_early(&state, test2[i].seed, false);
 293                prandom_warmup(&state);
 294
 295                for (j = 0; j < test2[i].iteration - 1; j++)
 296                        prandom_u32_state(&state);
 297
 298                if (test2[i].result != prandom_u32_state(&state))
 299                        errors++;
 300
 301                runs++;
 302                cond_resched();
 303        }
 304
 305        if (errors)
 306                pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
 307        else
 308                pr_info("prandom: %d self tests passed\n", runs);
 309        return 0;
 310}
 311core_initcall(prandom_state_selftest);
 312#endif
 313
 314/*
 315 * The prandom_u32() implementation is now completely separate from the
 316 * prandom_state() functions, which are retained (for now) for compatibility.
 317 *
 318 * Because of (ab)use in the networking code for choosing random TCP/UDP port
 319 * numbers, which open DoS possibilities if guessable, we want something
 320 * stronger than a standard PRNG.  But the performance requirements of
 321 * the network code do not allow robust crypto for this application.
 322 *
 323 * So this is a homebrew Junior Spaceman implementation, based on the
 324 * lowest-latency trustworthy crypto primitive available, SipHash.
 325 * (The authors of SipHash have not been consulted about this abuse of
 326 * their work.)
 327 *
 328 * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
 329 * one word of output.  This abbreviated version uses 2 rounds per word
 330 * of output.
 331 */
 332
 333struct siprand_state {
 334        unsigned long v0;
 335        unsigned long v1;
 336        unsigned long v2;
 337        unsigned long v3;
 338};
 339
 340static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
 341DEFINE_PER_CPU(unsigned long, net_rand_noise);
 342EXPORT_PER_CPU_SYMBOL(net_rand_noise);
 343
 344/*
 345 * This is the core CPRNG function.  As "pseudorandom", this is not used
 346 * for truly valuable things, just intended to be a PITA to guess.
 347 * For maximum speed, we do just two SipHash rounds per word.  This is
 348 * the same rate as 4 rounds per 64 bits that SipHash normally uses,
 349 * so hopefully it's reasonably secure.
 350 *
 351 * There are two changes from the official SipHash finalization:
 352 * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
 353 *   they are there only to make the output rounds distinct from the input
 354 *   rounds, and this application has no input rounds.
 355 * - Rather than returning v0^v1^v2^v3, return v1+v3.
 356 *   If you look at the SipHash round, the last operation on v3 is
 357 *   "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
 358 *   Likewise "v1 ^= v2".  (The rotate of v2 makes a difference, but
 359 *   it still cancels out half of the bits in v2 for no benefit.)
 360 *   Second, since the last combining operation was xor, continue the
 361 *   pattern of alternating xor/add for a tiny bit of extra non-linearity.
 362 */
 363static inline u32 siprand_u32(struct siprand_state *s)
 364{
 365        unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
 366        unsigned long n = raw_cpu_read(net_rand_noise);
 367
 368        v3 ^= n;
 369        PRND_SIPROUND(v0, v1, v2, v3);
 370        PRND_SIPROUND(v0, v1, v2, v3);
 371        v0 ^= n;
 372        s->v0 = v0;  s->v1 = v1;  s->v2 = v2;  s->v3 = v3;
 373        return v1 + v3;
 374}
 375
 376
 377/**
 378 *      prandom_u32 - pseudo random number generator
 379 *
 380 *      A 32 bit pseudo-random number is generated using a fast
 381 *      algorithm suitable for simulation. This algorithm is NOT
 382 *      considered safe for cryptographic use.
 383 */
 384u32 prandom_u32(void)
 385{
 386        struct siprand_state *state = get_cpu_ptr(&net_rand_state);
 387        u32 res = siprand_u32(state);
 388
 389        trace_prandom_u32(res);
 390        put_cpu_ptr(&net_rand_state);
 391        return res;
 392}
 393EXPORT_SYMBOL(prandom_u32);
 394
 395/**
 396 *      prandom_bytes - get the requested number of pseudo-random bytes
 397 *      @buf: where to copy the pseudo-random bytes to
 398 *      @bytes: the requested number of bytes
 399 */
 400void prandom_bytes(void *buf, size_t bytes)
 401{
 402        struct siprand_state *state = get_cpu_ptr(&net_rand_state);
 403        u8 *ptr = buf;
 404
 405        while (bytes >= sizeof(u32)) {
 406                put_unaligned(siprand_u32(state), (u32 *)ptr);
 407                ptr += sizeof(u32);
 408                bytes -= sizeof(u32);
 409        }
 410
 411        if (bytes > 0) {
 412                u32 rem = siprand_u32(state);
 413
 414                do {
 415                        *ptr++ = (u8)rem;
 416                        rem >>= BITS_PER_BYTE;
 417                } while (--bytes > 0);
 418        }
 419        put_cpu_ptr(&net_rand_state);
 420}
 421EXPORT_SYMBOL(prandom_bytes);
 422
 423/**
 424 *      prandom_seed - add entropy to pseudo random number generator
 425 *      @entropy: entropy value
 426 *
 427 *      Add some additional seed material to the prandom pool.
 428 *      The "entropy" is actually our IP address (the only caller is
 429 *      the network code), not for unpredictability, but to ensure that
 430 *      different machines are initialized differently.
 431 */
 432void prandom_seed(u32 entropy)
 433{
 434        int i;
 435
 436        add_device_randomness(&entropy, sizeof(entropy));
 437
 438        for_each_possible_cpu(i) {
 439                struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
 440                unsigned long v0 = state->v0, v1 = state->v1;
 441                unsigned long v2 = state->v2, v3 = state->v3;
 442
 443                do {
 444                        v3 ^= entropy;
 445                        PRND_SIPROUND(v0, v1, v2, v3);
 446                        PRND_SIPROUND(v0, v1, v2, v3);
 447                        v0 ^= entropy;
 448                } while (unlikely(!v0 || !v1 || !v2 || !v3));
 449
 450                WRITE_ONCE(state->v0, v0);
 451                WRITE_ONCE(state->v1, v1);
 452                WRITE_ONCE(state->v2, v2);
 453                WRITE_ONCE(state->v3, v3);
 454        }
 455}
 456EXPORT_SYMBOL(prandom_seed);
 457
 458/*
 459 *      Generate some initially weak seeding values to allow
 460 *      the prandom_u32() engine to be started.
 461 */
 462static int __init prandom_init_early(void)
 463{
 464        int i;
 465        unsigned long v0, v1, v2, v3;
 466
 467        if (!arch_get_random_long(&v0))
 468                v0 = jiffies;
 469        if (!arch_get_random_long(&v1))
 470                v1 = random_get_entropy();
 471        v2 = v0 ^ PRND_K0;
 472        v3 = v1 ^ PRND_K1;
 473
 474        for_each_possible_cpu(i) {
 475                struct siprand_state *state;
 476
 477                v3 ^= i;
 478                PRND_SIPROUND(v0, v1, v2, v3);
 479                PRND_SIPROUND(v0, v1, v2, v3);
 480                v0 ^= i;
 481
 482                state = per_cpu_ptr(&net_rand_state, i);
 483                state->v0 = v0;  state->v1 = v1;
 484                state->v2 = v2;  state->v3 = v3;
 485        }
 486
 487        return 0;
 488}
 489core_initcall(prandom_init_early);
 490
 491
 492/* Stronger reseeding when available, and periodically thereafter. */
 493static void prandom_reseed(struct timer_list *unused);
 494
 495static DEFINE_TIMER(seed_timer, prandom_reseed);
 496
 497static void prandom_reseed(struct timer_list *unused)
 498{
 499        unsigned long expires;
 500        int i;
 501
 502        /*
 503         * Reinitialize each CPU's PRNG with 128 bits of key.
 504         * No locking on the CPUs, but then somewhat random results are,
 505         * well, expected.
 506         */
 507        for_each_possible_cpu(i) {
 508                struct siprand_state *state;
 509                unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
 510                unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
 511#if BITS_PER_LONG == 32
 512                int j;
 513
 514                /*
 515                 * On 32-bit machines, hash in two extra words to
 516                 * approximate 128-bit key length.  Not that the hash
 517                 * has that much security, but this prevents a trivial
 518                 * 64-bit brute force.
 519                 */
 520                for (j = 0; j < 2; j++) {
 521                        unsigned long m = get_random_long();
 522
 523                        v3 ^= m;
 524                        PRND_SIPROUND(v0, v1, v2, v3);
 525                        PRND_SIPROUND(v0, v1, v2, v3);
 526                        v0 ^= m;
 527                }
 528#endif
 529                /*
 530                 * Probably impossible in practice, but there is a
 531                 * theoretical risk that a race between this reseeding
 532                 * and the target CPU writing its state back could
 533                 * create the all-zero SipHash fixed point.
 534                 *
 535                 * To ensure that never happens, ensure the state
 536                 * we write contains no zero words.
 537                 */
 538                state = per_cpu_ptr(&net_rand_state, i);
 539                WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
 540                WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
 541                WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
 542                WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
 543        }
 544
 545        /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
 546        expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
 547        mod_timer(&seed_timer, expires);
 548}
 549
 550/*
 551 * The random ready callback can be called from almost any interrupt.
 552 * To avoid worrying about whether it's safe to delay that interrupt
 553 * long enough to seed all CPUs, just schedule an immediate timer event.
 554 */
 555static void prandom_timer_start(struct random_ready_callback *unused)
 556{
 557        mod_timer(&seed_timer, jiffies);
 558}
 559
 560#ifdef CONFIG_RANDOM32_SELFTEST
 561/* Principle: True 32-bit random numbers will all have 16 differing bits on
 562 * average. For each 32-bit number, there are 601M numbers differing by 16
 563 * bits, and 89% of the numbers differ by at least 12 bits. Note that more
 564 * than 16 differing bits also implies a correlation with inverted bits. Thus
 565 * we take 1024 random numbers and compare each of them to the other ones,
 566 * counting the deviation of correlated bits to 16. Constants report 32,
 567 * counters 32-log2(TEST_SIZE), and pure randoms, around 6 or lower. With the
 568 * u32 total, TEST_SIZE may be as large as 4096 samples.
 569 */
 570#define TEST_SIZE 1024
 571static int __init prandom32_state_selftest(void)
 572{
 573        unsigned int x, y, bits, samples;
 574        u32 xor, flip;
 575        u32 total;
 576        u32 *data;
 577
 578        data = kmalloc(sizeof(*data) * TEST_SIZE, GFP_KERNEL);
 579        if (!data)
 580                return 0;
 581
 582        for (samples = 0; samples < TEST_SIZE; samples++)
 583                data[samples] = prandom_u32();
 584
 585        flip = total = 0;
 586        for (x = 0; x < samples; x++) {
 587                for (y = 0; y < samples; y++) {
 588                        if (x == y)
 589                                continue;
 590                        xor = data[x] ^ data[y];
 591                        flip |= xor;
 592                        bits = hweight32(xor);
 593                        total += (bits - 16) * (bits - 16);
 594                }
 595        }
 596
 597        /* We'll return the average deviation as 2*sqrt(corr/samples), which
 598         * is also sqrt(4*corr/samples) which provides a better resolution.
 599         */
 600        bits = int_sqrt(total / (samples * (samples - 1)) * 4);
 601        if (bits > 6)
 602                pr_warn("prandom32: self test failed (at least %u bits"
 603                        " correlated, fixed_mask=%#x fixed_value=%#x\n",
 604                        bits, ~flip, data[0] & ~flip);
 605        else
 606                pr_info("prandom32: self test passed (less than %u bits"
 607                        " correlated)\n",
 608                        bits+1);
 609        kfree(data);
 610        return 0;
 611}
 612core_initcall(prandom32_state_selftest);
 613#endif /*  CONFIG_RANDOM32_SELFTEST */
 614
 615/*
 616 * Start periodic full reseeding as soon as strong
 617 * random numbers are available.
 618 */
 619static int __init prandom_init_late(void)
 620{
 621        static struct random_ready_callback random_ready = {
 622                .func = prandom_timer_start
 623        };
 624        int ret = add_random_ready_callback(&random_ready);
 625
 626        if (ret == -EALREADY) {
 627                prandom_timer_start(&random_ready);
 628                ret = 0;
 629        }
 630        return ret;
 631}
 632late_initcall(prandom_init_late);
 633