linux/drivers/char/random.c
<<
>>
Prefs
   1/*
   2 * random.c -- A strong random number generator
   3 *
   4 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
   5 * Rights Reserved.
   6 *
   7 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
   8 *
   9 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
  10 * rights reserved.
  11 *
  12 * Redistribution and use in source and binary forms, with or without
  13 * modification, are permitted provided that the following conditions
  14 * are met:
  15 * 1. Redistributions of source code must retain the above copyright
  16 *    notice, and the entire permission notice in its entirety,
  17 *    including the disclaimer of warranties.
  18 * 2. Redistributions in binary form must reproduce the above copyright
  19 *    notice, this list of conditions and the following disclaimer in the
  20 *    documentation and/or other materials provided with the distribution.
  21 * 3. The name of the author may not be used to endorse or promote
  22 *    products derived from this software without specific prior
  23 *    written permission.
  24 *
  25 * ALTERNATIVELY, this product may be distributed under the terms of
  26 * the GNU General Public License, in which case the provisions of the GPL are
  27 * required INSTEAD OF the above restrictions.  (This clause is
  28 * necessary due to a potential bad interaction between the GPL and
  29 * the restrictions contained in a BSD-style copyright.)
  30 *
  31 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  32 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  33 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
  34 * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
  35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
  37 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  39 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  41 * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
  42 * DAMAGE.
  43 */
  44
  45/*
  46 * (now, with legal B.S. out of the way.....)
  47 *
  48 * This routine gathers environmental noise from device drivers, etc.,
  49 * and returns good random numbers, suitable for cryptographic use.
  50 * Besides the obvious cryptographic uses, these numbers are also good
  51 * for seeding TCP sequence numbers, and other places where it is
  52 * desirable to have numbers which are not only random, but hard to
  53 * predict by an attacker.
  54 *
  55 * Theory of operation
  56 * ===================
  57 *
  58 * Computers are very predictable devices.  Hence it is extremely hard
  59 * to produce truly random numbers on a computer --- as opposed to
  60 * pseudo-random numbers, which can easily generated by using a
  61 * algorithm.  Unfortunately, it is very easy for attackers to guess
  62 * the sequence of pseudo-random number generators, and for some
  63 * applications this is not acceptable.  So instead, we must try to
  64 * gather "environmental noise" from the computer's environment, which
  65 * must be hard for outside attackers to observe, and use that to
  66 * generate random numbers.  In a Unix environment, this is best done
  67 * from inside the kernel.
  68 *
  69 * Sources of randomness from the environment include inter-keyboard
  70 * timings, inter-interrupt timings from some interrupts, and other
  71 * events which are both (a) non-deterministic and (b) hard for an
  72 * outside observer to measure.  Randomness from these sources are
  73 * added to an "entropy pool", which is mixed using a CRC-like function.
  74 * This is not cryptographically strong, but it is adequate assuming
  75 * the randomness is not chosen maliciously, and it is fast enough that
  76 * the overhead of doing it on every interrupt is very reasonable.
  77 * As random bytes are mixed into the entropy pool, the routines keep
  78 * an *estimate* of how many bits of randomness have been stored into
  79 * the random number generator's internal state.
  80 *
  81 * When random bytes are desired, they are obtained by taking the SHA
  82 * hash of the contents of the "entropy pool".  The SHA hash avoids
  83 * exposing the internal state of the entropy pool.  It is believed to
  84 * be computationally infeasible to derive any useful information
  85 * about the input of SHA from its output.  Even if it is possible to
  86 * analyze SHA in some clever way, as long as the amount of data
  87 * returned from the generator is less than the inherent entropy in
  88 * the pool, the output data is totally unpredictable.  For this
  89 * reason, the routine decreases its internal estimate of how many
  90 * bits of "true randomness" are contained in the entropy pool as it
  91 * outputs random numbers.
  92 *
  93 * If this estimate goes to zero, the routine can still generate
  94 * random numbers; however, an attacker may (at least in theory) be
  95 * able to infer the future output of the generator from prior
  96 * outputs.  This requires successful cryptanalysis of SHA, which is
  97 * not believed to be feasible, but there is a remote possibility.
  98 * Nonetheless, these numbers should be useful for the vast majority
  99 * of purposes.
 100 *
 101 * Exported interfaces ---- output
 102 * ===============================
 103 *
 104 * There are three exported interfaces; the first is one designed to
 105 * be used from within the kernel:
 106 *
 107 *      void get_random_bytes(void *buf, int nbytes);
 108 *
 109 * This interface will return the requested number of random bytes,
 110 * and place it in the requested buffer.
 111 *
 112 * The two other interfaces are two character devices /dev/random and
 113 * /dev/urandom.  /dev/random is suitable for use when very high
 114 * quality randomness is desired (for example, for key generation or
 115 * one-time pads), as it will only return a maximum of the number of
 116 * bits of randomness (as estimated by the random number generator)
 117 * contained in the entropy pool.
 118 *
 119 * The /dev/urandom device does not have this limit, and will return
 120 * as many bytes as are requested.  As more and more random bytes are
 121 * requested without giving time for the entropy pool to recharge,
 122 * this will result in random numbers that are merely cryptographically
 123 * strong.  For many applications, however, this is acceptable.
 124 *
 125 * Exported interfaces ---- input
 126 * ==============================
 127 *
 128 * The current exported interfaces for gathering environmental noise
 129 * from the devices are:
 130 *
 131 *      void add_device_randomness(const void *buf, unsigned int size);
 132 *      void add_input_randomness(unsigned int type, unsigned int code,
 133 *                                unsigned int value);
 134 *      void add_interrupt_randomness(int irq, int irq_flags);
 135 *      void add_disk_randomness(struct gendisk *disk);
 136 *
 137 * add_device_randomness() is for adding data to the random pool that
 138 * is likely to differ between two devices (or possibly even per boot).
 139 * This would be things like MAC addresses or serial numbers, or the
 140 * read-out of the RTC. This does *not* add any actual entropy to the
 141 * pool, but it initializes the pool to different values for devices
 142 * that might otherwise be identical and have very little entropy
 143 * available to them (particularly common in the embedded world).
 144 *
 145 * add_input_randomness() uses the input layer interrupt timing, as well as
 146 * the event type information from the hardware.
 147 *
 148 * add_interrupt_randomness() uses the interrupt timing as random
 149 * inputs to the entropy pool. Using the cycle counters and the irq source
 150 * as inputs, it feeds the randomness roughly once a second.
 151 *
 152 * add_disk_randomness() uses what amounts to the seek time of block
 153 * layer request events, on a per-disk_devt basis, as input to the
 154 * entropy pool. Note that high-speed solid state drives with very low
 155 * seek times do not make for good sources of entropy, as their seek
 156 * times are usually fairly consistent.
 157 *
 158 * All of these routines try to estimate how many bits of randomness a
 159 * particular randomness source.  They do this by keeping track of the
 160 * first and second order deltas of the event timings.
 161 *
 162 * Ensuring unpredictability at system startup
 163 * ============================================
 164 *
 165 * When any operating system starts up, it will go through a sequence
 166 * of actions that are fairly predictable by an adversary, especially
 167 * if the start-up does not involve interaction with a human operator.
 168 * This reduces the actual number of bits of unpredictability in the
 169 * entropy pool below the value in entropy_count.  In order to
 170 * counteract this effect, it helps to carry information in the
 171 * entropy pool across shut-downs and start-ups.  To do this, put the
 172 * following lines an appropriate script which is run during the boot
 173 * sequence:
 174 *
 175 *      echo "Initializing random number generator..."
 176 *      random_seed=/var/run/random-seed
 177 *      # Carry a random seed from start-up to start-up
 178 *      # Load and then save the whole entropy pool
 179 *      if [ -f $random_seed ]; then
 180 *              cat $random_seed >/dev/urandom
 181 *      else
 182 *              touch $random_seed
 183 *      fi
 184 *      chmod 600 $random_seed
 185 *      dd if=/dev/urandom of=$random_seed count=1 bs=512
 186 *
 187 * and the following lines in an appropriate script which is run as
 188 * the system is shutdown:
 189 *
 190 *      # Carry a random seed from shut-down to start-up
 191 *      # Save the whole entropy pool
 192 *      echo "Saving random seed..."
 193 *      random_seed=/var/run/random-seed
 194 *      touch $random_seed
 195 *      chmod 600 $random_seed
 196 *      dd if=/dev/urandom of=$random_seed count=1 bs=512
 197 *
 198 * For example, on most modern systems using the System V init
 199 * scripts, such code fragments would be found in
 200 * /etc/rc.d/init.d/random.  On older Linux systems, the correct script
 201 * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
 202 *
 203 * Effectively, these commands cause the contents of the entropy pool
 204 * to be saved at shut-down time and reloaded into the entropy pool at
 205 * start-up.  (The 'dd' in the addition to the bootup script is to
 206 * make sure that /etc/random-seed is different for every start-up,
 207 * even if the system crashes without executing rc.0.)  Even with
 208 * complete knowledge of the start-up activities, predicting the state
 209 * of the entropy pool requires knowledge of the previous history of
 210 * the system.
 211 *
 212 * Configuring the /dev/random driver under Linux
 213 * ==============================================
 214 *
 215 * The /dev/random driver under Linux uses minor numbers 8 and 9 of
 216 * the /dev/mem major number (#1).  So if your system does not have
 217 * /dev/random and /dev/urandom created already, they can be created
 218 * by using the commands:
 219 *
 220 *      mknod /dev/random c 1 8
 221 *      mknod /dev/urandom c 1 9
 222 *
 223 * Acknowledgements:
 224 * =================
 225 *
 226 * Ideas for constructing this random number generator were derived
 227 * from Pretty Good Privacy's random number generator, and from private
 228 * discussions with Phil Karn.  Colin Plumb provided a faster random
 229 * number generator, which speed up the mixing function of the entropy
 230 * pool, taken from PGPfone.  Dale Worley has also contributed many
 231 * useful ideas and suggestions to improve this driver.
 232 *
 233 * Any flaws in the design are solely my responsibility, and should
 234 * not be attributed to the Phil, Colin, or any of authors of PGP.
 235 *
 236 * Further background information on this topic may be obtained from
 237 * RFC 1750, "Randomness Recommendations for Security", by Donald
 238 * Eastlake, Steve Crocker, and Jeff Schiller.
 239 */
 240
 241#include <linux/utsname.h>
 242#include <linux/module.h>
 243#include <linux/kernel.h>
 244#include <linux/major.h>
 245#include <linux/string.h>
 246#include <linux/fcntl.h>
 247#include <linux/slab.h>
 248#include <linux/random.h>
 249#include <linux/poll.h>
 250#include <linux/init.h>
 251#include <linux/fs.h>
 252#include <linux/genhd.h>
 253#include <linux/interrupt.h>
 254#include <linux/mm.h>
 255#include <linux/nodemask.h>
 256#include <linux/spinlock.h>
 257#include <linux/kthread.h>
 258#include <linux/percpu.h>
 259#include <linux/cryptohash.h>
 260#include <linux/fips.h>
 261#include <linux/ptrace.h>
 262#include <linux/workqueue.h>
 263#include <linux/irq.h>
 264#include <linux/ratelimit.h>
 265#include <linux/syscalls.h>
 266#include <linux/completion.h>
 267#include <linux/uuid.h>
 268#include <crypto/chacha.h>
 269
 270#include <asm/processor.h>
 271#include <linux/uaccess.h>
 272#include <asm/irq.h>
 273#include <asm/irq_regs.h>
 274#include <asm/io.h>
 275
 276#define CREATE_TRACE_POINTS
 277#include <trace/events/random.h>
 278
 279/* #define ADD_INTERRUPT_BENCH */
 280
 281/*
 282 * Configuration information
 283 */
 284#define INPUT_POOL_SHIFT        12
 285#define INPUT_POOL_WORDS        (1 << (INPUT_POOL_SHIFT-5))
 286#define OUTPUT_POOL_SHIFT       10
 287#define OUTPUT_POOL_WORDS       (1 << (OUTPUT_POOL_SHIFT-5))
 288#define SEC_XFER_SIZE           512
 289#define EXTRACT_SIZE            10
 290
 291
 292#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
 293
 294/*
 295 * To allow fractional bits to be tracked, the entropy_count field is
 296 * denominated in units of 1/8th bits.
 297 *
 298 * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
 299 * credit_entropy_bits() needs to be 64 bits wide.
 300 */
 301#define ENTROPY_SHIFT 3
 302#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
 303
 304/*
 305 * The minimum number of bits of entropy before we wake up a read on
 306 * /dev/random.  Should be enough to do a significant reseed.
 307 */
 308static int random_read_wakeup_bits = 64;
 309
 310/*
 311 * If the entropy count falls under this number of bits, then we
 312 * should wake up processes which are selecting or polling on write
 313 * access to /dev/random.
 314 */
 315static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
 316
 317/*
 318 * Originally, we used a primitive polynomial of degree .poolwords
 319 * over GF(2).  The taps for various sizes are defined below.  They
 320 * were chosen to be evenly spaced except for the last tap, which is 1
 321 * to get the twisting happening as fast as possible.
 322 *
 323 * For the purposes of better mixing, we use the CRC-32 polynomial as
 324 * well to make a (modified) twisted Generalized Feedback Shift
 325 * Register.  (See M. Matsumoto & Y. Kurita, 1992.  Twisted GFSR
 326 * generators.  ACM Transactions on Modeling and Computer Simulation
 327 * 2(3):179-194.  Also see M. Matsumoto & Y. Kurita, 1994.  Twisted
 328 * GFSR generators II.  ACM Transactions on Modeling and Computer
 329 * Simulation 4:254-266)
 330 *
 331 * Thanks to Colin Plumb for suggesting this.
 332 *
 333 * The mixing operation is much less sensitive than the output hash,
 334 * where we use SHA-1.  All that we want of mixing operation is that
 335 * it be a good non-cryptographic hash; i.e. it not produce collisions
 336 * when fed "random" data of the sort we expect to see.  As long as
 337 * the pool state differs for different inputs, we have preserved the
 338 * input entropy and done a good job.  The fact that an intelligent
 339 * attacker can construct inputs that will produce controlled
 340 * alterations to the pool's state is not important because we don't
 341 * consider such inputs to contribute any randomness.  The only
 342 * property we need with respect to them is that the attacker can't
 343 * increase his/her knowledge of the pool's state.  Since all
 344 * additions are reversible (knowing the final state and the input,
 345 * you can reconstruct the initial state), if an attacker has any
 346 * uncertainty about the initial state, he/she can only shuffle that
 347 * uncertainty about, but never cause any collisions (which would
 348 * decrease the uncertainty).
 349 *
 350 * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
 351 * Videau in their paper, "The Linux Pseudorandom Number Generator
 352 * Revisited" (see: http://eprint.iacr.org/2012/251.pdf).  In their
 353 * paper, they point out that we are not using a true Twisted GFSR,
 354 * since Matsumoto & Kurita used a trinomial feedback polynomial (that
 355 * is, with only three taps, instead of the six that we are using).
 356 * As a result, the resulting polynomial is neither primitive nor
 357 * irreducible, and hence does not have a maximal period over
 358 * GF(2**32).  They suggest a slight change to the generator
 359 * polynomial which improves the resulting TGFSR polynomial to be
 360 * irreducible, which we have made here.
 361 */
 362static struct poolinfo {
 363        int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
 364#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
 365        int tap1, tap2, tap3, tap4, tap5;
 366} poolinfo_table[] = {
 367        /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
 368        /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
 369        { S(128),       104,    76,     51,     25,     1 },
 370        /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
 371        /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
 372        { S(32),        26,     19,     14,     7,      1 },
 373#if 0
 374        /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
 375        { S(2048),      1638,   1231,   819,    411,    1 },
 376
 377        /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
 378        { S(1024),      817,    615,    412,    204,    1 },
 379
 380        /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
 381        { S(1024),      819,    616,    410,    207,    2 },
 382
 383        /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
 384        { S(512),       411,    308,    208,    104,    1 },
 385
 386        /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
 387        { S(512),       409,    307,    206,    102,    2 },
 388        /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
 389        { S(512),       409,    309,    205,    103,    2 },
 390
 391        /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
 392        { S(256),       205,    155,    101,    52,     1 },
 393
 394        /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
 395        { S(128),       103,    78,     51,     27,     2 },
 396
 397        /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
 398        { S(64),        52,     39,     26,     14,     1 },
 399#endif
 400};
 401
 402/*
 403 * Static global variables
 404 */
 405static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
 406static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 407static struct fasync_struct *fasync;
 408
 409static DEFINE_SPINLOCK(random_ready_list_lock);
 410static LIST_HEAD(random_ready_list);
 411
 412struct crng_state {
 413        __u32           state[16];
 414        unsigned long   init_time;
 415        spinlock_t      lock;
 416};
 417
 418struct crng_state primary_crng = {
 419        .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
 420};
 421
 422/*
 423 * crng_init =  0 --> Uninitialized
 424 *              1 --> Initialized
 425 *              2 --> Initialized from input_pool
 426 *
 427 * crng_init is protected by primary_crng->lock, and only increases
 428 * its value (from 0->1->2).
 429 */
 430static int crng_init = 0;
 431#define crng_ready() (likely(crng_init > 1))
 432static int crng_init_cnt = 0;
 433static unsigned long crng_global_init_time = 0;
 434#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
 435static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
 436static void _crng_backtrack_protect(struct crng_state *crng,
 437                                    __u8 tmp[CHACHA_BLOCK_SIZE], int used);
 438static void process_random_ready_list(void);
 439static void _get_random_bytes(void *buf, int nbytes);
 440
 441static struct ratelimit_state unseeded_warning =
 442        RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
 443static struct ratelimit_state urandom_warning =
 444        RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
 445
 446static int ratelimit_disable __read_mostly;
 447
 448module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
 449MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
 450
 451/**********************************************************************
 452 *
 453 * OS independent entropy store.   Here are the functions which handle
 454 * storing entropy in an entropy pool.
 455 *
 456 **********************************************************************/
 457
 458struct entropy_store;
 459struct entropy_store {
 460        /* read-only data: */
 461        const struct poolinfo *poolinfo;
 462        __u32 *pool;
 463        const char *name;
 464        struct entropy_store *pull;
 465        struct work_struct push_work;
 466
 467        /* read-write data: */
 468        unsigned long last_pulled;
 469        spinlock_t lock;
 470        unsigned short add_ptr;
 471        unsigned short input_rotate;
 472        int entropy_count;
 473        int entropy_total;
 474        unsigned int initialized:1;
 475        unsigned int last_data_init:1;
 476        __u8 last_data[EXTRACT_SIZE];
 477};
 478
 479static ssize_t extract_entropy(struct entropy_store *r, void *buf,
 480                               size_t nbytes, int min, int rsvd);
 481static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
 482                                size_t nbytes, int fips);
 483
 484static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
 485static void push_to_pool(struct work_struct *work);
 486static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
 487static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
 488
 489static struct entropy_store input_pool = {
 490        .poolinfo = &poolinfo_table[0],
 491        .name = "input",
 492        .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
 493        .pool = input_pool_data
 494};
 495
 496static struct entropy_store blocking_pool = {
 497        .poolinfo = &poolinfo_table[1],
 498        .name = "blocking",
 499        .pull = &input_pool,
 500        .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
 501        .pool = blocking_pool_data,
 502        .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
 503                                        push_to_pool),
 504};
 505
 506static __u32 const twist_table[8] = {
 507        0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
 508        0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
 509
 510/*
 511 * This function adds bytes into the entropy "pool".  It does not
 512 * update the entropy estimate.  The caller should call
 513 * credit_entropy_bits if this is appropriate.
 514 *
 515 * The pool is stirred with a primitive polynomial of the appropriate
 516 * degree, and then twisted.  We twist by three bits at a time because
 517 * it's cheap to do so and helps slightly in the expected case where
 518 * the entropy is concentrated in the low-order bits.
 519 */
 520static void _mix_pool_bytes(struct entropy_store *r, const void *in,
 521                            int nbytes)
 522{
 523        unsigned long i, tap1, tap2, tap3, tap4, tap5;
 524        int input_rotate;
 525        int wordmask = r->poolinfo->poolwords - 1;
 526        const char *bytes = in;
 527        __u32 w;
 528
 529        tap1 = r->poolinfo->tap1;
 530        tap2 = r->poolinfo->tap2;
 531        tap3 = r->poolinfo->tap3;
 532        tap4 = r->poolinfo->tap4;
 533        tap5 = r->poolinfo->tap5;
 534
 535        input_rotate = r->input_rotate;
 536        i = r->add_ptr;
 537
 538        /* mix one byte at a time to simplify size handling and churn faster */
 539        while (nbytes--) {
 540                w = rol32(*bytes++, input_rotate);
 541                i = (i - 1) & wordmask;
 542
 543                /* XOR in the various taps */
 544                w ^= r->pool[i];
 545                w ^= r->pool[(i + tap1) & wordmask];
 546                w ^= r->pool[(i + tap2) & wordmask];
 547                w ^= r->pool[(i + tap3) & wordmask];
 548                w ^= r->pool[(i + tap4) & wordmask];
 549                w ^= r->pool[(i + tap5) & wordmask];
 550
 551                /* Mix the result back in with a twist */
 552                r->pool[i] = (w >> 3) ^ twist_table[w & 7];
 553
 554                /*
 555                 * Normally, we add 7 bits of rotation to the pool.
 556                 * At the beginning of the pool, add an extra 7 bits
 557                 * rotation, so that successive passes spread the
 558                 * input bits across the pool evenly.
 559                 */
 560                input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
 561        }
 562
 563        r->input_rotate = input_rotate;
 564        r->add_ptr = i;
 565}
 566
 567static void __mix_pool_bytes(struct entropy_store *r, const void *in,
 568                             int nbytes)
 569{
 570        trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
 571        _mix_pool_bytes(r, in, nbytes);
 572}
 573
 574static void mix_pool_bytes(struct entropy_store *r, const void *in,
 575                           int nbytes)
 576{
 577        unsigned long flags;
 578
 579        trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
 580        spin_lock_irqsave(&r->lock, flags);
 581        _mix_pool_bytes(r, in, nbytes);
 582        spin_unlock_irqrestore(&r->lock, flags);
 583}
 584
 585struct fast_pool {
 586        __u32           pool[4];
 587        unsigned long   last;
 588        unsigned short  reg_idx;
 589        unsigned char   count;
 590};
 591
 592/*
 593 * This is a fast mixing routine used by the interrupt randomness
 594 * collector.  It's hardcoded for an 128 bit pool and assumes that any
 595 * locks that might be needed are taken by the caller.
 596 */
 597static void fast_mix(struct fast_pool *f)
 598{
 599        __u32 a = f->pool[0],   b = f->pool[1];
 600        __u32 c = f->pool[2],   d = f->pool[3];
 601
 602        a += b;                 c += d;
 603        b = rol32(b, 6);        d = rol32(d, 27);
 604        d ^= a;                 b ^= c;
 605
 606        a += b;                 c += d;
 607        b = rol32(b, 16);       d = rol32(d, 14);
 608        d ^= a;                 b ^= c;
 609
 610        a += b;                 c += d;
 611        b = rol32(b, 6);        d = rol32(d, 27);
 612        d ^= a;                 b ^= c;
 613
 614        a += b;                 c += d;
 615        b = rol32(b, 16);       d = rol32(d, 14);
 616        d ^= a;                 b ^= c;
 617
 618        f->pool[0] = a;  f->pool[1] = b;
 619        f->pool[2] = c;  f->pool[3] = d;
 620        f->count++;
 621}
 622
 623static void process_random_ready_list(void)
 624{
 625        unsigned long flags;
 626        struct random_ready_callback *rdy, *tmp;
 627
 628        spin_lock_irqsave(&random_ready_list_lock, flags);
 629        list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
 630                struct module *owner = rdy->owner;
 631
 632                list_del_init(&rdy->list);
 633                rdy->func(rdy);
 634                module_put(owner);
 635        }
 636        spin_unlock_irqrestore(&random_ready_list_lock, flags);
 637}
 638
 639/*
 640 * Credit (or debit) the entropy store with n bits of entropy.
 641 * Use credit_entropy_bits_safe() if the value comes from userspace
 642 * or otherwise should be checked for extreme values.
 643 */
 644static void credit_entropy_bits(struct entropy_store *r, int nbits)
 645{
 646        int entropy_count, orig;
 647        const int pool_size = r->poolinfo->poolfracbits;
 648        int nfrac = nbits << ENTROPY_SHIFT;
 649
 650        if (!nbits)
 651                return;
 652
 653retry:
 654        entropy_count = orig = READ_ONCE(r->entropy_count);
 655        if (nfrac < 0) {
 656                /* Debit */
 657                entropy_count += nfrac;
 658        } else {
 659                /*
 660                 * Credit: we have to account for the possibility of
 661                 * overwriting already present entropy.  Even in the
 662                 * ideal case of pure Shannon entropy, new contributions
 663                 * approach the full value asymptotically:
 664                 *
 665                 * entropy <- entropy + (pool_size - entropy) *
 666                 *      (1 - exp(-add_entropy/pool_size))
 667                 *
 668                 * For add_entropy <= pool_size/2 then
 669                 * (1 - exp(-add_entropy/pool_size)) >=
 670                 *    (add_entropy/pool_size)*0.7869...
 671                 * so we can approximate the exponential with
 672                 * 3/4*add_entropy/pool_size and still be on the
 673                 * safe side by adding at most pool_size/2 at a time.
 674                 *
 675                 * The use of pool_size-2 in the while statement is to
 676                 * prevent rounding artifacts from making the loop
 677                 * arbitrarily long; this limits the loop to log2(pool_size)*2
 678                 * turns no matter how large nbits is.
 679                 */
 680                int pnfrac = nfrac;
 681                const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
 682                /* The +2 corresponds to the /4 in the denominator */
 683
 684                do {
 685                        unsigned int anfrac = min(pnfrac, pool_size/2);
 686                        unsigned int add =
 687                                ((pool_size - entropy_count)*anfrac*3) >> s;
 688
 689                        entropy_count += add;
 690                        pnfrac -= anfrac;
 691                } while (unlikely(entropy_count < pool_size-2 && pnfrac));
 692        }
 693
 694        if (unlikely(entropy_count < 0)) {
 695                pr_warn("random: negative entropy/overflow: pool %s count %d\n",
 696                        r->name, entropy_count);
 697                WARN_ON(1);
 698                entropy_count = 0;
 699        } else if (entropy_count > pool_size)
 700                entropy_count = pool_size;
 701        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
 702                goto retry;
 703
 704        r->entropy_total += nbits;
 705        if (!r->initialized && r->entropy_total > 128) {
 706                r->initialized = 1;
 707                r->entropy_total = 0;
 708        }
 709
 710        trace_credit_entropy_bits(r->name, nbits,
 711                                  entropy_count >> ENTROPY_SHIFT,
 712                                  r->entropy_total, _RET_IP_);
 713
 714        if (r == &input_pool) {
 715                int entropy_bits = entropy_count >> ENTROPY_SHIFT;
 716
 717                if (crng_init < 2 && entropy_bits >= 128) {
 718                        crng_reseed(&primary_crng, r);
 719                        entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
 720                }
 721
 722                /* should we wake readers? */
 723                if (entropy_bits >= random_read_wakeup_bits &&
 724                    wq_has_sleeper(&random_read_wait)) {
 725                        wake_up_interruptible(&random_read_wait);
 726                        kill_fasync(&fasync, SIGIO, POLL_IN);
 727                }
 728                /* If the input pool is getting full, send some
 729                 * entropy to the blocking pool until it is 75% full.
 730                 */
 731                if (entropy_bits > random_write_wakeup_bits &&
 732                    r->initialized &&
 733                    r->entropy_total >= 2*random_read_wakeup_bits) {
 734                        struct entropy_store *other = &blocking_pool;
 735
 736                        if (other->entropy_count <=
 737                            3 * other->poolinfo->poolfracbits / 4) {
 738                                schedule_work(&other->push_work);
 739                                r->entropy_total = 0;
 740                        }
 741                }
 742        }
 743}
 744
 745static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
 746{
 747        const int nbits_max = r->poolinfo->poolwords * 32;
 748
 749        if (nbits < 0)
 750                return -EINVAL;
 751
 752        /* Cap the value to avoid overflows */
 753        nbits = min(nbits,  nbits_max);
 754
 755        credit_entropy_bits(r, nbits);
 756        return 0;
 757}
 758
 759/*********************************************************************
 760 *
 761 * CRNG using CHACHA20
 762 *
 763 *********************************************************************/
 764
 765#define CRNG_RESEED_INTERVAL (300*HZ)
 766
 767static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
 768
 769#ifdef CONFIG_NUMA
 770/*
 771 * Hack to deal with crazy userspace progams when they are all trying
 772 * to access /dev/urandom in parallel.  The programs are almost
 773 * certainly doing something terribly wrong, but we'll work around
 774 * their brain damage.
 775 */
 776static struct crng_state **crng_node_pool __read_mostly;
 777#endif
 778
 779static void invalidate_batched_entropy(void);
 780
 781static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
 782static int __init parse_trust_cpu(char *arg)
 783{
 784        return kstrtobool(arg, &trust_cpu);
 785}
 786early_param("random.trust_cpu", parse_trust_cpu);
 787
 788static void crng_initialize(struct crng_state *crng)
 789{
 790        int             i;
 791        int             arch_init = 1;
 792        unsigned long   rv;
 793
 794        memcpy(&crng->state[0], "expand 32-byte k", 16);
 795        if (crng == &primary_crng)
 796                _extract_entropy(&input_pool, &crng->state[4],
 797                                 sizeof(__u32) * 12, 0);
 798        else
 799                _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
 800        for (i = 4; i < 16; i++) {
 801                if (!arch_get_random_seed_long(&rv) &&
 802                    !arch_get_random_long(&rv)) {
 803                        rv = random_get_entropy();
 804                        arch_init = 0;
 805                }
 806                crng->state[i] ^= rv;
 807        }
 808        if (trust_cpu && arch_init) {
 809                crng_init = 2;
 810                pr_notice("random: crng done (trusting CPU's manufacturer)\n");
 811        }
 812        crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
 813}
 814
 815#ifdef CONFIG_NUMA
 816static void do_numa_crng_init(struct work_struct *work)
 817{
 818        int i;
 819        struct crng_state *crng;
 820        struct crng_state **pool;
 821
 822        pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
 823        for_each_online_node(i) {
 824                crng = kmalloc_node(sizeof(struct crng_state),
 825                                    GFP_KERNEL | __GFP_NOFAIL, i);
 826                spin_lock_init(&crng->lock);
 827                crng_initialize(crng);
 828                pool[i] = crng;
 829        }
 830        mb();
 831        if (cmpxchg(&crng_node_pool, NULL, pool)) {
 832                for_each_node(i)
 833                        kfree(pool[i]);
 834                kfree(pool);
 835        }
 836}
 837
 838static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
 839
 840static void numa_crng_init(void)
 841{
 842        schedule_work(&numa_crng_init_work);
 843}
 844#else
 845static void numa_crng_init(void) {}
 846#endif
 847
 848/*
 849 * crng_fast_load() can be called by code in the interrupt service
 850 * path.  So we can't afford to dilly-dally.
 851 */
 852static int crng_fast_load(const char *cp, size_t len)
 853{
 854        unsigned long flags;
 855        char *p;
 856
 857        if (!spin_trylock_irqsave(&primary_crng.lock, flags))
 858                return 0;
 859        if (crng_init != 0) {
 860                spin_unlock_irqrestore(&primary_crng.lock, flags);
 861                return 0;
 862        }
 863        p = (unsigned char *) &primary_crng.state[4];
 864        while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
 865                p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
 866                cp++; crng_init_cnt++; len--;
 867        }
 868        spin_unlock_irqrestore(&primary_crng.lock, flags);
 869        if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
 870                invalidate_batched_entropy();
 871                crng_init = 1;
 872                wake_up_interruptible(&crng_init_wait);
 873                pr_notice("random: fast init done\n");
 874        }
 875        return 1;
 876}
 877
 878/*
 879 * crng_slow_load() is called by add_device_randomness, which has two
 880 * attributes.  (1) We can't trust the buffer passed to it is
 881 * guaranteed to be unpredictable (so it might not have any entropy at
 882 * all), and (2) it doesn't have the performance constraints of
 883 * crng_fast_load().
 884 *
 885 * So we do something more comprehensive which is guaranteed to touch
 886 * all of the primary_crng's state, and which uses a LFSR with a
 887 * period of 255 as part of the mixing algorithm.  Finally, we do
 888 * *not* advance crng_init_cnt since buffer we may get may be something
 889 * like a fixed DMI table (for example), which might very well be
 890 * unique to the machine, but is otherwise unvarying.
 891 */
 892static int crng_slow_load(const char *cp, size_t len)
 893{
 894        unsigned long           flags;
 895        static unsigned char    lfsr = 1;
 896        unsigned char           tmp;
 897        unsigned                i, max = CHACHA_KEY_SIZE;
 898        const char *            src_buf = cp;
 899        char *                  dest_buf = (char *) &primary_crng.state[4];
 900
 901        if (!spin_trylock_irqsave(&primary_crng.lock, flags))
 902                return 0;
 903        if (crng_init != 0) {
 904                spin_unlock_irqrestore(&primary_crng.lock, flags);
 905                return 0;
 906        }
 907        if (len > max)
 908                max = len;
 909
 910        for (i = 0; i < max ; i++) {
 911                tmp = lfsr;
 912                lfsr >>= 1;
 913                if (tmp & 1)
 914                        lfsr ^= 0xE1;
 915                tmp = dest_buf[i % CHACHA_KEY_SIZE];
 916                dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
 917                lfsr += (tmp << 3) | (tmp >> 5);
 918        }
 919        spin_unlock_irqrestore(&primary_crng.lock, flags);
 920        return 1;
 921}
 922
 923static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
 924{
 925        unsigned long   flags;
 926        int             i, num;
 927        union {
 928                __u8    block[CHACHA_BLOCK_SIZE];
 929                __u32   key[8];
 930        } buf;
 931
 932        if (r) {
 933                num = extract_entropy(r, &buf, 32, 16, 0);
 934                if (num == 0)
 935                        return;
 936        } else {
 937                _extract_crng(&primary_crng, buf.block);
 938                _crng_backtrack_protect(&primary_crng, buf.block,
 939                                        CHACHA_KEY_SIZE);
 940        }
 941        spin_lock_irqsave(&crng->lock, flags);
 942        for (i = 0; i < 8; i++) {
 943                unsigned long   rv;
 944                if (!arch_get_random_seed_long(&rv) &&
 945                    !arch_get_random_long(&rv))
 946                        rv = random_get_entropy();
 947                crng->state[i+4] ^= buf.key[i] ^ rv;
 948        }
 949        memzero_explicit(&buf, sizeof(buf));
 950        crng->init_time = jiffies;
 951        spin_unlock_irqrestore(&crng->lock, flags);
 952        if (crng == &primary_crng && crng_init < 2) {
 953                invalidate_batched_entropy();
 954                numa_crng_init();
 955                crng_init = 2;
 956                process_random_ready_list();
 957                wake_up_interruptible(&crng_init_wait);
 958                pr_notice("random: crng init done\n");
 959                if (unseeded_warning.missed) {
 960                        pr_notice("random: %d get_random_xx warning(s) missed "
 961                                  "due to ratelimiting\n",
 962                                  unseeded_warning.missed);
 963                        unseeded_warning.missed = 0;
 964                }
 965                if (urandom_warning.missed) {
 966                        pr_notice("random: %d urandom warning(s) missed "
 967                                  "due to ratelimiting\n",
 968                                  urandom_warning.missed);
 969                        urandom_warning.missed = 0;
 970                }
 971        }
 972}
 973
 974static void _extract_crng(struct crng_state *crng,
 975                          __u8 out[CHACHA_BLOCK_SIZE])
 976{
 977        unsigned long v, flags;
 978
 979        if (crng_ready() &&
 980            (time_after(crng_global_init_time, crng->init_time) ||
 981             time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
 982                crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
 983        spin_lock_irqsave(&crng->lock, flags);
 984        if (arch_get_random_long(&v))
 985                crng->state[14] ^= v;
 986        chacha20_block(&crng->state[0], out);
 987        if (crng->state[12] == 0)
 988                crng->state[13]++;
 989        spin_unlock_irqrestore(&crng->lock, flags);
 990}
 991
 992static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
 993{
 994        struct crng_state *crng = NULL;
 995
 996#ifdef CONFIG_NUMA
 997        if (crng_node_pool)
 998                crng = crng_node_pool[numa_node_id()];
 999        if (crng == NULL)
1000#endif
1001                crng = &primary_crng;
1002        _extract_crng(crng, out);
1003}
1004
1005/*
1006 * Use the leftover bytes from the CRNG block output (if there is
1007 * enough) to mutate the CRNG key to provide backtracking protection.
1008 */
1009static void _crng_backtrack_protect(struct crng_state *crng,
1010                                    __u8 tmp[CHACHA_BLOCK_SIZE], int used)
1011{
1012        unsigned long   flags;
1013        __u32           *s, *d;
1014        int             i;
1015
1016        used = round_up(used, sizeof(__u32));
1017        if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
1018                extract_crng(tmp);
1019                used = 0;
1020        }
1021        spin_lock_irqsave(&crng->lock, flags);
1022        s = (__u32 *) &tmp[used];
1023        d = &crng->state[4];
1024        for (i=0; i < 8; i++)
1025                *d++ ^= *s++;
1026        spin_unlock_irqrestore(&crng->lock, flags);
1027}
1028
1029static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
1030{
1031        struct crng_state *crng = NULL;
1032
1033#ifdef CONFIG_NUMA
1034        if (crng_node_pool)
1035                crng = crng_node_pool[numa_node_id()];
1036        if (crng == NULL)
1037#endif
1038                crng = &primary_crng;
1039        _crng_backtrack_protect(crng, tmp, used);
1040}
1041
1042static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1043{
1044        ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
1045        __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1046        int large_request = (nbytes > 256);
1047
1048        while (nbytes) {
1049                if (large_request && need_resched()) {
1050                        if (signal_pending(current)) {
1051                                if (ret == 0)
1052                                        ret = -ERESTARTSYS;
1053                                break;
1054                        }
1055                        schedule();
1056                }
1057
1058                extract_crng(tmp);
1059                i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
1060                if (copy_to_user(buf, tmp, i)) {
1061                        ret = -EFAULT;
1062                        break;
1063                }
1064
1065                nbytes -= i;
1066                buf += i;
1067                ret += i;
1068        }
1069        crng_backtrack_protect(tmp, i);
1070
1071        /* Wipe data just written to memory */
1072        memzero_explicit(tmp, sizeof(tmp));
1073
1074        return ret;
1075}
1076
1077
1078/*********************************************************************
1079 *
1080 * Entropy input management
1081 *
1082 *********************************************************************/
1083
1084/* There is one of these per entropy source */
1085struct timer_rand_state {
1086        cycles_t last_time;
1087        long last_delta, last_delta2;
1088};
1089
1090#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1091
1092/*
1093 * Add device- or boot-specific data to the input pool to help
1094 * initialize it.
1095 *
1096 * None of this adds any entropy; it is meant to avoid the problem of
1097 * the entropy pool having similar initial state across largely
1098 * identical devices.
1099 */
1100void add_device_randomness(const void *buf, unsigned int size)
1101{
1102        unsigned long time = random_get_entropy() ^ jiffies;
1103        unsigned long flags;
1104
1105        if (!crng_ready() && size)
1106                crng_slow_load(buf, size);
1107
1108        trace_add_device_randomness(size, _RET_IP_);
1109        spin_lock_irqsave(&input_pool.lock, flags);
1110        _mix_pool_bytes(&input_pool, buf, size);
1111        _mix_pool_bytes(&input_pool, &time, sizeof(time));
1112        spin_unlock_irqrestore(&input_pool.lock, flags);
1113}
1114EXPORT_SYMBOL(add_device_randomness);
1115
1116static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
1117
1118/*
1119 * This function adds entropy to the entropy "pool" by using timing
1120 * delays.  It uses the timer_rand_state structure to make an estimate
1121 * of how many bits of entropy this call has added to the pool.
1122 *
1123 * The number "num" is also added to the pool - it should somehow describe
1124 * the type of event which just happened.  This is currently 0-255 for
1125 * keyboard scan codes, and 256 upwards for interrupts.
1126 *
1127 */
1128static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1129{
1130        struct entropy_store    *r;
1131        struct {
1132                long jiffies;
1133                unsigned cycles;
1134                unsigned num;
1135        } sample;
1136        long delta, delta2, delta3;
1137
1138        sample.jiffies = jiffies;
1139        sample.cycles = random_get_entropy();
1140        sample.num = num;
1141        r = &input_pool;
1142        mix_pool_bytes(r, &sample, sizeof(sample));
1143
1144        /*
1145         * Calculate number of bits of randomness we probably added.
1146         * We take into account the first, second and third-order deltas
1147         * in order to make our estimate.
1148         */
1149        delta = sample.jiffies - state->last_time;
1150        state->last_time = sample.jiffies;
1151
1152        delta2 = delta - state->last_delta;
1153        state->last_delta = delta;
1154
1155        delta3 = delta2 - state->last_delta2;
1156        state->last_delta2 = delta2;
1157
1158        if (delta < 0)
1159                delta = -delta;
1160        if (delta2 < 0)
1161                delta2 = -delta2;
1162        if (delta3 < 0)
1163                delta3 = -delta3;
1164        if (delta > delta2)
1165                delta = delta2;
1166        if (delta > delta3)
1167                delta = delta3;
1168
1169        /*
1170         * delta is now minimum absolute delta.
1171         * Round down by 1 bit on general principles,
1172         * and limit entropy entimate to 12 bits.
1173         */
1174        credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
1175}
1176
1177void add_input_randomness(unsigned int type, unsigned int code,
1178                                 unsigned int value)
1179{
1180        static unsigned char last_value;
1181
1182        /* ignore autorepeat and the like */
1183        if (value == last_value)
1184                return;
1185
1186        last_value = value;
1187        add_timer_randomness(&input_timer_state,
1188                             (type << 4) ^ code ^ (code >> 4) ^ value);
1189        trace_add_input_randomness(ENTROPY_BITS(&input_pool));
1190}
1191EXPORT_SYMBOL_GPL(add_input_randomness);
1192
1193static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1194
1195#ifdef ADD_INTERRUPT_BENCH
1196static unsigned long avg_cycles, avg_deviation;
1197
1198#define AVG_SHIFT 8     /* Exponential average factor k=1/256 */
1199#define FIXED_1_2 (1 << (AVG_SHIFT-1))
1200
1201static void add_interrupt_bench(cycles_t start)
1202{
1203        long delta = random_get_entropy() - start;
1204
1205        /* Use a weighted moving average */
1206        delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
1207        avg_cycles += delta;
1208        /* And average deviation */
1209        delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
1210        avg_deviation += delta;
1211}
1212#else
1213#define add_interrupt_bench(x)
1214#endif
1215
1216static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1217{
1218        __u32 *ptr = (__u32 *) regs;
1219        unsigned int idx;
1220
1221        if (regs == NULL)
1222                return 0;
1223        idx = READ_ONCE(f->reg_idx);
1224        if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1225                idx = 0;
1226        ptr += idx++;
1227        WRITE_ONCE(f->reg_idx, idx);
1228        return *ptr;
1229}
1230
1231void add_interrupt_randomness(int irq, int irq_flags)
1232{
1233        struct entropy_store    *r;
1234        struct fast_pool        *fast_pool = this_cpu_ptr(&irq_randomness);
1235        struct pt_regs          *regs = get_irq_regs();
1236        unsigned long           now = jiffies;
1237        cycles_t                cycles = random_get_entropy();
1238        __u32                   c_high, j_high;
1239        __u64                   ip;
1240        unsigned long           seed;
1241        int                     credit = 0;
1242
1243        if (cycles == 0)
1244                cycles = get_reg(fast_pool, regs);
1245        c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
1246        j_high = (sizeof(now) > 4) ? now >> 32 : 0;
1247        fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
1248        fast_pool->pool[1] ^= now ^ c_high;
1249        ip = regs ? instruction_pointer(regs) : _RET_IP_;
1250        fast_pool->pool[2] ^= ip;
1251        fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
1252                get_reg(fast_pool, regs);
1253
1254        fast_mix(fast_pool);
1255        add_interrupt_bench(cycles);
1256
1257        if (unlikely(crng_init == 0)) {
1258                if ((fast_pool->count >= 64) &&
1259                    crng_fast_load((char *) fast_pool->pool,
1260                                   sizeof(fast_pool->pool))) {
1261                        fast_pool->count = 0;
1262                        fast_pool->last = now;
1263                }
1264                return;
1265        }
1266
1267        if ((fast_pool->count < 64) &&
1268            !time_after(now, fast_pool->last + HZ))
1269                return;
1270
1271        r = &input_pool;
1272        if (!spin_trylock(&r->lock))
1273                return;
1274
1275        fast_pool->last = now;
1276        __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
1277
1278        /*
1279         * If we have architectural seed generator, produce a seed and
1280         * add it to the pool.  For the sake of paranoia don't let the
1281         * architectural seed generator dominate the input from the
1282         * interrupt noise.
1283         */
1284        if (arch_get_random_seed_long(&seed)) {
1285                __mix_pool_bytes(r, &seed, sizeof(seed));
1286                credit = 1;
1287        }
1288        spin_unlock(&r->lock);
1289
1290        fast_pool->count = 0;
1291
1292        /* award one bit for the contents of the fast pool */
1293        credit_entropy_bits(r, credit + 1);
1294}
1295EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1296
1297#ifdef CONFIG_BLOCK
1298void add_disk_randomness(struct gendisk *disk)
1299{
1300        if (!disk || !disk->random)
1301                return;
1302        /* first major is 1, so we get >= 0x200 here */
1303        add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1304        trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
1305}
1306EXPORT_SYMBOL_GPL(add_disk_randomness);
1307#endif
1308
1309/*********************************************************************
1310 *
1311 * Entropy extraction routines
1312 *
1313 *********************************************************************/
1314
1315/*
1316 * This utility inline function is responsible for transferring entropy
1317 * from the primary pool to the secondary extraction pool. We make
1318 * sure we pull enough for a 'catastrophic reseed'.
1319 */
1320static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
1321static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1322{
1323        if (!r->pull ||
1324            r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
1325            r->entropy_count > r->poolinfo->poolfracbits)
1326                return;
1327
1328        _xfer_secondary_pool(r, nbytes);
1329}
1330
1331static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1332{
1333        __u32   tmp[OUTPUT_POOL_WORDS];
1334
1335        int bytes = nbytes;
1336
1337        /* pull at least as much as a wakeup */
1338        bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
1339        /* but never more than the buffer size */
1340        bytes = min_t(int, bytes, sizeof(tmp));
1341
1342        trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
1343                                  ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
1344        bytes = extract_entropy(r->pull, tmp, bytes,
1345                                random_read_wakeup_bits / 8, 0);
1346        mix_pool_bytes(r, tmp, bytes);
1347        credit_entropy_bits(r, bytes*8);
1348}
1349
1350/*
1351 * Used as a workqueue function so that when the input pool is getting
1352 * full, we can "spill over" some entropy to the output pools.  That
1353 * way the output pools can store some of the excess entropy instead
1354 * of letting it go to waste.
1355 */
1356static void push_to_pool(struct work_struct *work)
1357{
1358        struct entropy_store *r = container_of(work, struct entropy_store,
1359                                              push_work);
1360        BUG_ON(!r);
1361        _xfer_secondary_pool(r, random_read_wakeup_bits/8);
1362        trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
1363                           r->pull->entropy_count >> ENTROPY_SHIFT);
1364}
1365
1366/*
1367 * This function decides how many bytes to actually take from the
1368 * given pool, and also debits the entropy count accordingly.
1369 */
1370static size_t account(struct entropy_store *r, size_t nbytes, int min,
1371                      int reserved)
1372{
1373        int entropy_count, orig, have_bytes;
1374        size_t ibytes, nfrac;
1375
1376        BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1377
1378        /* Can we pull enough? */
1379retry:
1380        entropy_count = orig = READ_ONCE(r->entropy_count);
1381        ibytes = nbytes;
1382        /* never pull more than available */
1383        have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1384
1385        if ((have_bytes -= reserved) < 0)
1386                have_bytes = 0;
1387        ibytes = min_t(size_t, ibytes, have_bytes);
1388        if (ibytes < min)
1389                ibytes = 0;
1390
1391        if (unlikely(entropy_count < 0)) {
1392                pr_warn("random: negative entropy count: pool %s count %d\n",
1393                        r->name, entropy_count);
1394                WARN_ON(1);
1395                entropy_count = 0;
1396        }
1397        nfrac = ibytes << (ENTROPY_SHIFT + 3);
1398        if ((size_t) entropy_count > nfrac)
1399                entropy_count -= nfrac;
1400        else
1401                entropy_count = 0;
1402
1403        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1404                goto retry;
1405
1406        trace_debit_entropy(r->name, 8 * ibytes);
1407        if (ibytes &&
1408            (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
1409                wake_up_interruptible(&random_write_wait);
1410                kill_fasync(&fasync, SIGIO, POLL_OUT);
1411        }
1412
1413        return ibytes;
1414}
1415
1416/*
1417 * This function does the actual extraction for extract_entropy and
1418 * extract_entropy_user.
1419 *
1420 * Note: we assume that .poolwords is a multiple of 16 words.
1421 */
1422static void extract_buf(struct entropy_store *r, __u8 *out)
1423{
1424        int i;
1425        union {
1426                __u32 w[5];
1427                unsigned long l[LONGS(20)];
1428        } hash;
1429        __u32 workspace[SHA_WORKSPACE_WORDS];
1430        unsigned long flags;
1431
1432        /*
1433         * If we have an architectural hardware random number
1434         * generator, use it for SHA's initial vector
1435         */
1436        sha_init(hash.w);
1437        for (i = 0; i < LONGS(20); i++) {
1438                unsigned long v;
1439                if (!arch_get_random_long(&v))
1440                        break;
1441                hash.l[i] = v;
1442        }
1443
1444        /* Generate a hash across the pool, 16 words (512 bits) at a time */
1445        spin_lock_irqsave(&r->lock, flags);
1446        for (i = 0; i < r->poolinfo->poolwords; i += 16)
1447                sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1448
1449        /*
1450         * We mix the hash back into the pool to prevent backtracking
1451         * attacks (where the attacker knows the state of the pool
1452         * plus the current outputs, and attempts to find previous
1453         * ouputs), unless the hash function can be inverted. By
1454         * mixing at least a SHA1 worth of hash data back, we make
1455         * brute-forcing the feedback as hard as brute-forcing the
1456         * hash.
1457         */
1458        __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1459        spin_unlock_irqrestore(&r->lock, flags);
1460
1461        memzero_explicit(workspace, sizeof(workspace));
1462
1463        /*
1464         * In case the hash function has some recognizable output
1465         * pattern, we fold it in half. Thus, we always feed back
1466         * twice as much data as we output.
1467         */
1468        hash.w[0] ^= hash.w[3];
1469        hash.w[1] ^= hash.w[4];
1470        hash.w[2] ^= rol32(hash.w[2], 16);
1471
1472        memcpy(out, &hash, EXTRACT_SIZE);
1473        memzero_explicit(&hash, sizeof(hash));
1474}
1475
1476static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1477                                size_t nbytes, int fips)
1478{
1479        ssize_t ret = 0, i;
1480        __u8 tmp[EXTRACT_SIZE];
1481        unsigned long flags;
1482
1483        while (nbytes) {
1484                extract_buf(r, tmp);
1485
1486                if (fips) {
1487                        spin_lock_irqsave(&r->lock, flags);
1488                        if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1489                                panic("Hardware RNG duplicated output!\n");
1490                        memcpy(r->last_data, tmp, EXTRACT_SIZE);
1491                        spin_unlock_irqrestore(&r->lock, flags);
1492                }
1493                i = min_t(int, nbytes, EXTRACT_SIZE);
1494                memcpy(buf, tmp, i);
1495                nbytes -= i;
1496                buf += i;
1497                ret += i;
1498        }
1499
1500        /* Wipe data just returned from memory */
1501        memzero_explicit(tmp, sizeof(tmp));
1502
1503        return ret;
1504}
1505
1506/*
1507 * This function extracts randomness from the "entropy pool", and
1508 * returns it in a buffer.
1509 *
1510 * The min parameter specifies the minimum amount we can pull before
1511 * failing to avoid races that defeat catastrophic reseeding while the
1512 * reserved parameter indicates how much entropy we must leave in the
1513 * pool after each pull to avoid starving other readers.
1514 */
1515static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1516                                 size_t nbytes, int min, int reserved)
1517{
1518        __u8 tmp[EXTRACT_SIZE];
1519        unsigned long flags;
1520
1521        /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
1522        if (fips_enabled) {
1523                spin_lock_irqsave(&r->lock, flags);
1524                if (!r->last_data_init) {
1525                        r->last_data_init = 1;
1526                        spin_unlock_irqrestore(&r->lock, flags);
1527                        trace_extract_entropy(r->name, EXTRACT_SIZE,
1528                                              ENTROPY_BITS(r), _RET_IP_);
1529                        xfer_secondary_pool(r, EXTRACT_SIZE);
1530                        extract_buf(r, tmp);
1531                        spin_lock_irqsave(&r->lock, flags);
1532                        memcpy(r->last_data, tmp, EXTRACT_SIZE);
1533                }
1534                spin_unlock_irqrestore(&r->lock, flags);
1535        }
1536
1537        trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1538        xfer_secondary_pool(r, nbytes);
1539        nbytes = account(r, nbytes, min, reserved);
1540
1541        return _extract_entropy(r, buf, nbytes, fips_enabled);
1542}
1543
1544/*
1545 * This function extracts randomness from the "entropy pool", and
1546 * returns it in a userspace buffer.
1547 */
1548static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1549                                    size_t nbytes)
1550{
1551        ssize_t ret = 0, i;
1552        __u8 tmp[EXTRACT_SIZE];
1553        int large_request = (nbytes > 256);
1554
1555        trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1556        xfer_secondary_pool(r, nbytes);
1557        nbytes = account(r, nbytes, 0, 0);
1558
1559        while (nbytes) {
1560                if (large_request && need_resched()) {
1561                        if (signal_pending(current)) {
1562                                if (ret == 0)
1563                                        ret = -ERESTARTSYS;
1564                                break;
1565                        }
1566                        schedule();
1567                }
1568
1569                extract_buf(r, tmp);
1570                i = min_t(int, nbytes, EXTRACT_SIZE);
1571                if (copy_to_user(buf, tmp, i)) {
1572                        ret = -EFAULT;
1573                        break;
1574                }
1575
1576                nbytes -= i;
1577                buf += i;
1578                ret += i;
1579        }
1580
1581        /* Wipe data just returned from memory */
1582        memzero_explicit(tmp, sizeof(tmp));
1583
1584        return ret;
1585}
1586
1587#define warn_unseeded_randomness(previous) \
1588        _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1589
1590static void _warn_unseeded_randomness(const char *func_name, void *caller,
1591                                      void **previous)
1592{
1593#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1594        const bool print_once = false;
1595#else
1596        static bool print_once __read_mostly;
1597#endif
1598
1599        if (print_once ||
1600            crng_ready() ||
1601            (previous && (caller == READ_ONCE(*previous))))
1602                return;
1603        WRITE_ONCE(*previous, caller);
1604#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1605        print_once = true;
1606#endif
1607        if (__ratelimit(&unseeded_warning))
1608                pr_notice("random: %s called from %pS with crng_init=%d\n",
1609                          func_name, caller, crng_init);
1610}
1611
1612/*
1613 * This function is the exported kernel interface.  It returns some
1614 * number of good random numbers, suitable for key generation, seeding
1615 * TCP sequence numbers, etc.  It does not rely on the hardware random
1616 * number generator.  For random bytes direct from the hardware RNG
1617 * (when available), use get_random_bytes_arch(). In order to ensure
1618 * that the randomness provided by this function is okay, the function
1619 * wait_for_random_bytes() should be called and return 0 at least once
1620 * at any point prior.
1621 */
1622static void _get_random_bytes(void *buf, int nbytes)
1623{
1624        __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1625
1626        trace_get_random_bytes(nbytes, _RET_IP_);
1627
1628        while (nbytes >= CHACHA_BLOCK_SIZE) {
1629                extract_crng(buf);
1630                buf += CHACHA_BLOCK_SIZE;
1631                nbytes -= CHACHA_BLOCK_SIZE;
1632        }
1633
1634        if (nbytes > 0) {
1635                extract_crng(tmp);
1636                memcpy(buf, tmp, nbytes);
1637                crng_backtrack_protect(tmp, nbytes);
1638        } else
1639                crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
1640        memzero_explicit(tmp, sizeof(tmp));
1641}
1642
1643void get_random_bytes(void *buf, int nbytes)
1644{
1645        static void *previous;
1646
1647        warn_unseeded_randomness(&previous);
1648        _get_random_bytes(buf, nbytes);
1649}
1650EXPORT_SYMBOL(get_random_bytes);
1651
1652/*
1653 * Wait for the urandom pool to be seeded and thus guaranteed to supply
1654 * cryptographically secure random numbers. This applies to: the /dev/urandom
1655 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1656 * family of functions. Using any of these functions without first calling
1657 * this function forfeits the guarantee of security.
1658 *
1659 * Returns: 0 if the urandom pool has been seeded.
1660 *          -ERESTARTSYS if the function was interrupted by a signal.
1661 */
1662int wait_for_random_bytes(void)
1663{
1664        if (likely(crng_ready()))
1665                return 0;
1666        return wait_event_interruptible(crng_init_wait, crng_ready());
1667}
1668EXPORT_SYMBOL(wait_for_random_bytes);
1669
1670/*
1671 * Returns whether or not the urandom pool has been seeded and thus guaranteed
1672 * to supply cryptographically secure random numbers. This applies to: the
1673 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
1674 * ,u64,int,long} family of functions.
1675 *
1676 * Returns: true if the urandom pool has been seeded.
1677 *          false if the urandom pool has not been seeded.
1678 */
1679bool rng_is_initialized(void)
1680{
1681        return crng_ready();
1682}
1683EXPORT_SYMBOL(rng_is_initialized);
1684
1685/*
1686 * Add a callback function that will be invoked when the nonblocking
1687 * pool is initialised.
1688 *
1689 * returns: 0 if callback is successfully added
1690 *          -EALREADY if pool is already initialised (callback not called)
1691 *          -ENOENT if module for callback is not alive
1692 */
1693int add_random_ready_callback(struct random_ready_callback *rdy)
1694{
1695        struct module *owner;
1696        unsigned long flags;
1697        int err = -EALREADY;
1698
1699        if (crng_ready())
1700                return err;
1701
1702        owner = rdy->owner;
1703        if (!try_module_get(owner))
1704                return -ENOENT;
1705
1706        spin_lock_irqsave(&random_ready_list_lock, flags);
1707        if (crng_ready())
1708                goto out;
1709
1710        owner = NULL;
1711
1712        list_add(&rdy->list, &random_ready_list);
1713        err = 0;
1714
1715out:
1716        spin_unlock_irqrestore(&random_ready_list_lock, flags);
1717
1718        module_put(owner);
1719
1720        return err;
1721}
1722EXPORT_SYMBOL(add_random_ready_callback);
1723
1724/*
1725 * Delete a previously registered readiness callback function.
1726 */
1727void del_random_ready_callback(struct random_ready_callback *rdy)
1728{
1729        unsigned long flags;
1730        struct module *owner = NULL;
1731
1732        spin_lock_irqsave(&random_ready_list_lock, flags);
1733        if (!list_empty(&rdy->list)) {
1734                list_del_init(&rdy->list);
1735                owner = rdy->owner;
1736        }
1737        spin_unlock_irqrestore(&random_ready_list_lock, flags);
1738
1739        module_put(owner);
1740}
1741EXPORT_SYMBOL(del_random_ready_callback);
1742
1743/*
1744 * This function will use the architecture-specific hardware random
1745 * number generator if it is available.  The arch-specific hw RNG will
1746 * almost certainly be faster than what we can do in software, but it
1747 * is impossible to verify that it is implemented securely (as
1748 * opposed, to, say, the AES encryption of a sequence number using a
1749 * key known by the NSA).  So it's useful if we need the speed, but
1750 * only if we're willing to trust the hardware manufacturer not to
1751 * have put in a back door.
1752 *
1753 * Return number of bytes filled in.
1754 */
1755int __must_check get_random_bytes_arch(void *buf, int nbytes)
1756{
1757        int left = nbytes;
1758        char *p = buf;
1759
1760        trace_get_random_bytes_arch(left, _RET_IP_);
1761        while (left) {
1762                unsigned long v;
1763                int chunk = min_t(int, left, sizeof(unsigned long));
1764
1765                if (!arch_get_random_long(&v))
1766                        break;
1767
1768                memcpy(p, &v, chunk);
1769                p += chunk;
1770                left -= chunk;
1771        }
1772
1773        return nbytes - left;
1774}
1775EXPORT_SYMBOL(get_random_bytes_arch);
1776
1777/*
1778 * init_std_data - initialize pool with system data
1779 *
1780 * @r: pool to initialize
1781 *
1782 * This function clears the pool's entropy count and mixes some system
1783 * data into the pool to prepare it for use. The pool is not cleared
1784 * as that can only decrease the entropy in the pool.
1785 */
1786static void init_std_data(struct entropy_store *r)
1787{
1788        int i;
1789        ktime_t now = ktime_get_real();
1790        unsigned long rv;
1791
1792        r->last_pulled = jiffies;
1793        mix_pool_bytes(r, &now, sizeof(now));
1794        for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1795                if (!arch_get_random_seed_long(&rv) &&
1796                    !arch_get_random_long(&rv))
1797                        rv = random_get_entropy();
1798                mix_pool_bytes(r, &rv, sizeof(rv));
1799        }
1800        mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1801}
1802
1803/*
1804 * Note that setup_arch() may call add_device_randomness()
1805 * long before we get here. This allows seeding of the pools
1806 * with some platform dependent data very early in the boot
1807 * process. But it limits our options here. We must use
1808 * statically allocated structures that already have all
1809 * initializations complete at compile time. We should also
1810 * take care not to overwrite the precious per platform data
1811 * we were given.
1812 */
1813static int rand_initialize(void)
1814{
1815        init_std_data(&input_pool);
1816        init_std_data(&blocking_pool);
1817        crng_initialize(&primary_crng);
1818        crng_global_init_time = jiffies;
1819        if (ratelimit_disable) {
1820                urandom_warning.interval = 0;
1821                unseeded_warning.interval = 0;
1822        }
1823        return 0;
1824}
1825early_initcall(rand_initialize);
1826
1827#ifdef CONFIG_BLOCK
1828void rand_initialize_disk(struct gendisk *disk)
1829{
1830        struct timer_rand_state *state;
1831
1832        /*
1833         * If kzalloc returns null, we just won't use that entropy
1834         * source.
1835         */
1836        state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1837        if (state) {
1838                state->last_time = INITIAL_JIFFIES;
1839                disk->random = state;
1840        }
1841}
1842#endif
1843
1844static ssize_t
1845_random_read(int nonblock, char __user *buf, size_t nbytes)
1846{
1847        ssize_t n;
1848
1849        if (nbytes == 0)
1850                return 0;
1851
1852        nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
1853        while (1) {
1854                n = extract_entropy_user(&blocking_pool, buf, nbytes);
1855                if (n < 0)
1856                        return n;
1857                trace_random_read(n*8, (nbytes-n)*8,
1858                                  ENTROPY_BITS(&blocking_pool),
1859                                  ENTROPY_BITS(&input_pool));
1860                if (n > 0)
1861                        return n;
1862
1863                /* Pool is (near) empty.  Maybe wait and retry. */
1864                if (nonblock)
1865                        return -EAGAIN;
1866
1867                wait_event_interruptible(random_read_wait,
1868                        ENTROPY_BITS(&input_pool) >=
1869                        random_read_wakeup_bits);
1870                if (signal_pending(current))
1871                        return -ERESTARTSYS;
1872        }
1873}
1874
1875static ssize_t
1876random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1877{
1878        return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
1879}
1880
1881static ssize_t
1882urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1883{
1884        unsigned long flags;
1885        static int maxwarn = 10;
1886        int ret;
1887
1888        if (!crng_ready() && maxwarn > 0) {
1889                maxwarn--;
1890                if (__ratelimit(&urandom_warning))
1891                        printk(KERN_NOTICE "random: %s: uninitialized "
1892                               "urandom read (%zd bytes read)\n",
1893                               current->comm, nbytes);
1894                spin_lock_irqsave(&primary_crng.lock, flags);
1895                crng_init_cnt = 0;
1896                spin_unlock_irqrestore(&primary_crng.lock, flags);
1897        }
1898        nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1899        ret = extract_crng_user(buf, nbytes);
1900        trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
1901        return ret;
1902}
1903
1904static __poll_t
1905random_poll(struct file *file, poll_table * wait)
1906{
1907        __poll_t mask;
1908
1909        poll_wait(file, &random_read_wait, wait);
1910        poll_wait(file, &random_write_wait, wait);
1911        mask = 0;
1912        if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
1913                mask |= EPOLLIN | EPOLLRDNORM;
1914        if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
1915                mask |= EPOLLOUT | EPOLLWRNORM;
1916        return mask;
1917}
1918
1919static int
1920write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1921{
1922        size_t bytes;
1923        __u32 t, buf[16];
1924        const char __user *p = buffer;
1925
1926        while (count > 0) {
1927                int b, i = 0;
1928
1929                bytes = min(count, sizeof(buf));
1930                if (copy_from_user(&buf, p, bytes))
1931                        return -EFAULT;
1932
1933                for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1934                        if (!arch_get_random_int(&t))
1935                                break;
1936                        buf[i] ^= t;
1937                }
1938
1939                count -= bytes;
1940                p += bytes;
1941
1942                mix_pool_bytes(r, buf, bytes);
1943                cond_resched();
1944        }
1945
1946        return 0;
1947}
1948
1949static ssize_t random_write(struct file *file, const char __user *buffer,
1950                            size_t count, loff_t *ppos)
1951{
1952        size_t ret;
1953
1954        ret = write_pool(&input_pool, buffer, count);
1955        if (ret)
1956                return ret;
1957
1958        return (ssize_t)count;
1959}
1960
1961static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1962{
1963        int size, ent_count;
1964        int __user *p = (int __user *)arg;
1965        int retval;
1966
1967        switch (cmd) {
1968        case RNDGETENTCNT:
1969                /* inherently racy, no point locking */
1970                ent_count = ENTROPY_BITS(&input_pool);
1971                if (put_user(ent_count, p))
1972                        return -EFAULT;
1973                return 0;
1974        case RNDADDTOENTCNT:
1975                if (!capable(CAP_SYS_ADMIN))
1976                        return -EPERM;
1977                if (get_user(ent_count, p))
1978                        return -EFAULT;
1979                return credit_entropy_bits_safe(&input_pool, ent_count);
1980        case RNDADDENTROPY:
1981                if (!capable(CAP_SYS_ADMIN))
1982                        return -EPERM;
1983                if (get_user(ent_count, p++))
1984                        return -EFAULT;
1985                if (ent_count < 0)
1986                        return -EINVAL;
1987                if (get_user(size, p++))
1988                        return -EFAULT;
1989                retval = write_pool(&input_pool, (const char __user *)p,
1990                                    size);
1991                if (retval < 0)
1992                        return retval;
1993                return credit_entropy_bits_safe(&input_pool, ent_count);
1994        case RNDZAPENTCNT:
1995        case RNDCLEARPOOL:
1996                /*
1997                 * Clear the entropy pool counters. We no longer clear
1998                 * the entropy pool, as that's silly.
1999                 */
2000                if (!capable(CAP_SYS_ADMIN))
2001                        return -EPERM;
2002                input_pool.entropy_count = 0;
2003                blocking_pool.entropy_count = 0;
2004                return 0;
2005        case RNDRESEEDCRNG:
2006                if (!capable(CAP_SYS_ADMIN))
2007                        return -EPERM;
2008                if (crng_init < 2)
2009                        return -ENODATA;
2010                crng_reseed(&primary_crng, NULL);
2011                crng_global_init_time = jiffies - 1;
2012                return 0;
2013        default:
2014                return -EINVAL;
2015        }
2016}
2017
2018static int random_fasync(int fd, struct file *filp, int on)
2019{
2020        return fasync_helper(fd, filp, on, &fasync);
2021}
2022
2023const struct file_operations random_fops = {
2024        .read  = random_read,
2025        .write = random_write,
2026        .poll  = random_poll,
2027        .unlocked_ioctl = random_ioctl,
2028        .fasync = random_fasync,
2029        .llseek = noop_llseek,
2030};
2031
2032const struct file_operations urandom_fops = {
2033        .read  = urandom_read,
2034        .write = random_write,
2035        .unlocked_ioctl = random_ioctl,
2036        .fasync = random_fasync,
2037        .llseek = noop_llseek,
2038};
2039
2040SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
2041                unsigned int, flags)
2042{
2043        int ret;
2044
2045        if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
2046                return -EINVAL;
2047
2048        if (count > INT_MAX)
2049                count = INT_MAX;
2050
2051        if (flags & GRND_RANDOM)
2052                return _random_read(flags & GRND_NONBLOCK, buf, count);
2053
2054        if (!crng_ready()) {
2055                if (flags & GRND_NONBLOCK)
2056                        return -EAGAIN;
2057                ret = wait_for_random_bytes();
2058                if (unlikely(ret))
2059                        return ret;
2060        }
2061        return urandom_read(NULL, buf, count, NULL);
2062}
2063
2064/********************************************************************
2065 *
2066 * Sysctl interface
2067 *
2068 ********************************************************************/
2069
2070#ifdef CONFIG_SYSCTL
2071
2072#include <linux/sysctl.h>
2073
2074static int min_read_thresh = 8, min_write_thresh;
2075static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
2076static int max_write_thresh = INPUT_POOL_WORDS * 32;
2077static int random_min_urandom_seed = 60;
2078static char sysctl_bootid[16];
2079
2080/*
2081 * This function is used to return both the bootid UUID, and random
2082 * UUID.  The difference is in whether table->data is NULL; if it is,
2083 * then a new UUID is generated and returned to the user.
2084 *
2085 * If the user accesses this via the proc interface, the UUID will be
2086 * returned as an ASCII string in the standard UUID format; if via the
2087 * sysctl system call, as 16 bytes of binary data.
2088 */
2089static int proc_do_uuid(struct ctl_table *table, int write,
2090                        void __user *buffer, size_t *lenp, loff_t *ppos)
2091{
2092        struct ctl_table fake_table;
2093        unsigned char buf[64], tmp_uuid[16], *uuid;
2094
2095        uuid = table->data;
2096        if (!uuid) {
2097                uuid = tmp_uuid;
2098                generate_random_uuid(uuid);
2099        } else {
2100                static DEFINE_SPINLOCK(bootid_spinlock);
2101
2102                spin_lock(&bootid_spinlock);
2103                if (!uuid[8])
2104                        generate_random_uuid(uuid);
2105                spin_unlock(&bootid_spinlock);
2106        }
2107
2108        sprintf(buf, "%pU", uuid);
2109
2110        fake_table.data = buf;
2111        fake_table.maxlen = sizeof(buf);
2112
2113        return proc_dostring(&fake_table, write, buffer, lenp, ppos);
2114}
2115
2116/*
2117 * Return entropy available scaled to integral bits
2118 */
2119static int proc_do_entropy(struct ctl_table *table, int write,
2120                           void __user *buffer, size_t *lenp, loff_t *ppos)
2121{
2122        struct ctl_table fake_table;
2123        int entropy_count;
2124
2125        entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
2126
2127        fake_table.data = &entropy_count;
2128        fake_table.maxlen = sizeof(entropy_count);
2129
2130        return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
2131}
2132
2133static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
2134extern struct ctl_table random_table[];
2135struct ctl_table random_table[] = {
2136        {
2137                .procname       = "poolsize",
2138                .data           = &sysctl_poolsize,
2139                .maxlen         = sizeof(int),
2140                .mode           = 0444,
2141                .proc_handler   = proc_dointvec,
2142        },
2143        {
2144                .procname       = "entropy_avail",
2145                .maxlen         = sizeof(int),
2146                .mode           = 0444,
2147                .proc_handler   = proc_do_entropy,
2148                .data           = &input_pool.entropy_count,
2149        },
2150        {
2151                .procname       = "read_wakeup_threshold",
2152                .data           = &random_read_wakeup_bits,
2153                .maxlen         = sizeof(int),
2154                .mode           = 0644,
2155                .proc_handler   = proc_dointvec_minmax,
2156                .extra1         = &min_read_thresh,
2157                .extra2         = &max_read_thresh,
2158        },
2159        {
2160                .procname       = "write_wakeup_threshold",
2161                .data           = &random_write_wakeup_bits,
2162                .maxlen         = sizeof(int),
2163                .mode           = 0644,
2164                .proc_handler   = proc_dointvec_minmax,
2165                .extra1         = &min_write_thresh,
2166                .extra2         = &max_write_thresh,
2167        },
2168        {
2169                .procname       = "urandom_min_reseed_secs",
2170                .data           = &random_min_urandom_seed,
2171                .maxlen         = sizeof(int),
2172                .mode           = 0644,
2173                .proc_handler   = proc_dointvec,
2174        },
2175        {
2176                .procname       = "boot_id",
2177                .data           = &sysctl_bootid,
2178                .maxlen         = 16,
2179                .mode           = 0444,
2180                .proc_handler   = proc_do_uuid,
2181        },
2182        {
2183                .procname       = "uuid",
2184                .maxlen         = 16,
2185                .mode           = 0444,
2186                .proc_handler   = proc_do_uuid,
2187        },
2188#ifdef ADD_INTERRUPT_BENCH
2189        {
2190                .procname       = "add_interrupt_avg_cycles",
2191                .data           = &avg_cycles,
2192                .maxlen         = sizeof(avg_cycles),
2193                .mode           = 0444,
2194                .proc_handler   = proc_doulongvec_minmax,
2195        },
2196        {
2197                .procname       = "add_interrupt_avg_deviation",
2198                .data           = &avg_deviation,
2199                .maxlen         = sizeof(avg_deviation),
2200                .mode           = 0444,
2201                .proc_handler   = proc_doulongvec_minmax,
2202        },
2203#endif
2204        { }
2205};
2206#endif  /* CONFIG_SYSCTL */
2207
2208struct batched_entropy {
2209        union {
2210                u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
2211                u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2212        };
2213        unsigned int position;
2214};
2215static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
2216
2217/*
2218 * Get a random word for internal kernel use only. The quality of the random
2219 * number is either as good as RDRAND or as good as /dev/urandom, with the
2220 * goal of being quite fast and not depleting entropy. In order to ensure
2221 * that the randomness provided by this function is okay, the function
2222 * wait_for_random_bytes() should be called and return 0 at least once
2223 * at any point prior.
2224 */
2225static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2226u64 get_random_u64(void)
2227{
2228        u64 ret;
2229        bool use_lock;
2230        unsigned long flags = 0;
2231        struct batched_entropy *batch;
2232        static void *previous;
2233
2234#if BITS_PER_LONG == 64
2235        if (arch_get_random_long((unsigned long *)&ret))
2236                return ret;
2237#else
2238        if (arch_get_random_long((unsigned long *)&ret) &&
2239            arch_get_random_long((unsigned long *)&ret + 1))
2240            return ret;
2241#endif
2242
2243        warn_unseeded_randomness(&previous);
2244
2245        use_lock = READ_ONCE(crng_init) < 2;
2246        batch = &get_cpu_var(batched_entropy_u64);
2247        if (use_lock)
2248                read_lock_irqsave(&batched_entropy_reset_lock, flags);
2249        if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2250                extract_crng((u8 *)batch->entropy_u64);
2251                batch->position = 0;
2252        }
2253        ret = batch->entropy_u64[batch->position++];
2254        if (use_lock)
2255                read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2256        put_cpu_var(batched_entropy_u64);
2257        return ret;
2258}
2259EXPORT_SYMBOL(get_random_u64);
2260
2261static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2262u32 get_random_u32(void)
2263{
2264        u32 ret;
2265        bool use_lock;
2266        unsigned long flags = 0;
2267        struct batched_entropy *batch;
2268        static void *previous;
2269
2270        if (arch_get_random_int(&ret))
2271                return ret;
2272
2273        warn_unseeded_randomness(&previous);
2274
2275        use_lock = READ_ONCE(crng_init) < 2;
2276        batch = &get_cpu_var(batched_entropy_u32);
2277        if (use_lock)
2278                read_lock_irqsave(&batched_entropy_reset_lock, flags);
2279        if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2280                extract_crng((u8 *)batch->entropy_u32);
2281                batch->position = 0;
2282        }
2283        ret = batch->entropy_u32[batch->position++];
2284        if (use_lock)
2285                read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2286        put_cpu_var(batched_entropy_u32);
2287        return ret;
2288}
2289EXPORT_SYMBOL(get_random_u32);
2290
2291/* It's important to invalidate all potential batched entropy that might
2292 * be stored before the crng is initialized, which we can do lazily by
2293 * simply resetting the counter to zero so that it's re-extracted on the
2294 * next usage. */
2295static void invalidate_batched_entropy(void)
2296{
2297        int cpu;
2298        unsigned long flags;
2299
2300        write_lock_irqsave(&batched_entropy_reset_lock, flags);
2301        for_each_possible_cpu (cpu) {
2302                per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2303                per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2304        }
2305        write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2306}
2307
2308/**
2309 * randomize_page - Generate a random, page aligned address
2310 * @start:      The smallest acceptable address the caller will take.
2311 * @range:      The size of the area, starting at @start, within which the
2312 *              random address must fall.
2313 *
2314 * If @start + @range would overflow, @range is capped.
2315 *
2316 * NOTE: Historical use of randomize_range, which this replaces, presumed that
2317 * @start was already page aligned.  We now align it regardless.
2318 *
2319 * Return: A page aligned address within [start, start + range).  On error,
2320 * @start is returned.
2321 */
2322unsigned long
2323randomize_page(unsigned long start, unsigned long range)
2324{
2325        if (!PAGE_ALIGNED(start)) {
2326                range -= PAGE_ALIGN(start) - start;
2327                start = PAGE_ALIGN(start);
2328        }
2329
2330        if (start > ULONG_MAX - range)
2331                range = ULONG_MAX - start;
2332
2333        range >>= PAGE_SHIFT;
2334
2335        if (range == 0)
2336                return start;
2337
2338        return start + (get_random_long() % range << PAGE_SHIFT);
2339}
2340
2341/* Interface for in-kernel drivers of true hardware RNGs.
2342 * Those devices may produce endless random bits and will be throttled
2343 * when our pool is full.
2344 */
2345void add_hwgenerator_randomness(const char *buffer, size_t count,
2346                                size_t entropy)
2347{
2348        struct entropy_store *poolp = &input_pool;
2349
2350        if (unlikely(crng_init == 0)) {
2351                crng_fast_load(buffer, count);
2352                return;
2353        }
2354
2355        /* Suspend writing if we're above the trickle threshold.
2356         * We'll be woken up again once below random_write_wakeup_thresh,
2357         * or when the calling thread is about to terminate.
2358         */
2359        wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2360                        ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2361        mix_pool_bytes(poolp, buffer, count);
2362        credit_entropy_bits(poolp, entropy);
2363}
2364EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2365