linux/drivers/char/hw_random/core.c
<<
>>
Prefs
   1/*
   2 * hw_random/core.c: HWRNG core API
   3 *
   4 * Copyright 2006 Michael Buesch <m@bues.ch>
   5 * Copyright 2005 (c) MontaVista Software, Inc.
   6 *
   7 * Please read Documentation/admin-guide/hw_random.rst for details on use.
   8 *
   9 * This software may be used and distributed according to the terms
  10 * of the GNU General Public License, incorporated herein by reference.
  11 */
  12
  13#include <linux/delay.h>
  14#include <linux/device.h>
  15#include <linux/err.h>
  16#include <linux/fs.h>
  17#include <linux/hw_random.h>
  18#include <linux/kernel.h>
  19#include <linux/kthread.h>
  20#include <linux/sched/signal.h>
  21#include <linux/miscdevice.h>
  22#include <linux/module.h>
  23#include <linux/random.h>
  24#include <linux/sched.h>
  25#include <linux/slab.h>
  26#include <linux/uaccess.h>
  27
  28#define RNG_MODULE_NAME         "hw_random"
  29
  30static struct hwrng *current_rng;
  31/* the current rng has been explicitly chosen by user via sysfs */
  32static int cur_rng_set_by_user;
  33static struct task_struct *hwrng_fill;
  34/* list of registered rngs, sorted decending by quality */
  35static LIST_HEAD(rng_list);
  36/* Protects rng_list and current_rng */
  37static DEFINE_MUTEX(rng_mutex);
  38/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
  39static DEFINE_MUTEX(reading_mutex);
  40static int data_avail;
  41static u8 *rng_buffer, *rng_fillbuf;
  42static unsigned short current_quality;
  43static unsigned short default_quality; /* = 0; default to "off" */
  44
  45module_param(current_quality, ushort, 0644);
  46MODULE_PARM_DESC(current_quality,
  47                 "current hwrng entropy estimation per 1024 bits of input");
  48module_param(default_quality, ushort, 0644);
  49MODULE_PARM_DESC(default_quality,
  50                 "default entropy content of hwrng per 1024 bits of input");
  51
  52static void drop_current_rng(void);
  53static int hwrng_init(struct hwrng *rng);
  54static void start_khwrngd(void);
  55
  56static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
  57                               int wait);
  58
  59static size_t rng_buffer_size(void)
  60{
  61        return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
  62}
  63
  64static void add_early_randomness(struct hwrng *rng)
  65{
  66        int bytes_read;
  67        size_t size = min_t(size_t, 16, rng_buffer_size());
  68
  69        mutex_lock(&reading_mutex);
  70        bytes_read = rng_get_data(rng, rng_buffer, size, 1);
  71        mutex_unlock(&reading_mutex);
  72        if (bytes_read > 0)
  73                add_device_randomness(rng_buffer, bytes_read);
  74}
  75
  76static inline void cleanup_rng(struct kref *kref)
  77{
  78        struct hwrng *rng = container_of(kref, struct hwrng, ref);
  79
  80        if (rng->cleanup)
  81                rng->cleanup(rng);
  82
  83        complete(&rng->cleanup_done);
  84}
  85
  86static int set_current_rng(struct hwrng *rng)
  87{
  88        int err;
  89
  90        BUG_ON(!mutex_is_locked(&rng_mutex));
  91
  92        err = hwrng_init(rng);
  93        if (err)
  94                return err;
  95
  96        drop_current_rng();
  97        current_rng = rng;
  98
  99        return 0;
 100}
 101
 102static void drop_current_rng(void)
 103{
 104        BUG_ON(!mutex_is_locked(&rng_mutex));
 105        if (!current_rng)
 106                return;
 107
 108        /* decrease last reference for triggering the cleanup */
 109        kref_put(&current_rng->ref, cleanup_rng);
 110        current_rng = NULL;
 111}
 112
 113/* Returns ERR_PTR(), NULL or refcounted hwrng */
 114static struct hwrng *get_current_rng(void)
 115{
 116        struct hwrng *rng;
 117
 118        if (mutex_lock_interruptible(&rng_mutex))
 119                return ERR_PTR(-ERESTARTSYS);
 120
 121        rng = current_rng;
 122        if (rng)
 123                kref_get(&rng->ref);
 124
 125        mutex_unlock(&rng_mutex);
 126        return rng;
 127}
 128
 129static void put_rng(struct hwrng *rng)
 130{
 131        /*
 132         * Hold rng_mutex here so we serialize in case they set_current_rng
 133         * on rng again immediately.
 134         */
 135        mutex_lock(&rng_mutex);
 136        if (rng)
 137                kref_put(&rng->ref, cleanup_rng);
 138        mutex_unlock(&rng_mutex);
 139}
 140
 141static int hwrng_init(struct hwrng *rng)
 142{
 143        if (kref_get_unless_zero(&rng->ref))
 144                goto skip_init;
 145
 146        if (rng->init) {
 147                int ret;
 148
 149                ret =  rng->init(rng);
 150                if (ret)
 151                        return ret;
 152        }
 153
 154        kref_init(&rng->ref);
 155        reinit_completion(&rng->cleanup_done);
 156
 157skip_init:
 158        add_early_randomness(rng);
 159
 160        current_quality = rng->quality ? : default_quality;
 161        if (current_quality > 1024)
 162                current_quality = 1024;
 163
 164        if (current_quality == 0 && hwrng_fill)
 165                kthread_stop(hwrng_fill);
 166        if (current_quality > 0 && !hwrng_fill)
 167                start_khwrngd();
 168
 169        return 0;
 170}
 171
 172static int rng_dev_open(struct inode *inode, struct file *filp)
 173{
 174        /* enforce read-only access to this chrdev */
 175        if ((filp->f_mode & FMODE_READ) == 0)
 176                return -EINVAL;
 177        if (filp->f_mode & FMODE_WRITE)
 178                return -EINVAL;
 179        return 0;
 180}
 181
 182static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
 183                        int wait) {
 184        int present;
 185
 186        BUG_ON(!mutex_is_locked(&reading_mutex));
 187        if (rng->read)
 188                return rng->read(rng, (void *)buffer, size, wait);
 189
 190        if (rng->data_present)
 191                present = rng->data_present(rng, wait);
 192        else
 193                present = 1;
 194
 195        if (present)
 196                return rng->data_read(rng, (u32 *)buffer);
 197
 198        return 0;
 199}
 200
 201static ssize_t rng_dev_read(struct file *filp, char __user *buf,
 202                            size_t size, loff_t *offp)
 203{
 204        ssize_t ret = 0;
 205        int err = 0;
 206        int bytes_read, len;
 207        struct hwrng *rng;
 208
 209        while (size) {
 210                rng = get_current_rng();
 211                if (IS_ERR(rng)) {
 212                        err = PTR_ERR(rng);
 213                        goto out;
 214                }
 215                if (!rng) {
 216                        err = -ENODEV;
 217                        goto out;
 218                }
 219
 220                if (mutex_lock_interruptible(&reading_mutex)) {
 221                        err = -ERESTARTSYS;
 222                        goto out_put;
 223                }
 224                if (!data_avail) {
 225                        bytes_read = rng_get_data(rng, rng_buffer,
 226                                rng_buffer_size(),
 227                                !(filp->f_flags & O_NONBLOCK));
 228                        if (bytes_read < 0) {
 229                                err = bytes_read;
 230                                goto out_unlock_reading;
 231                        }
 232                        data_avail = bytes_read;
 233                }
 234
 235                if (!data_avail) {
 236                        if (filp->f_flags & O_NONBLOCK) {
 237                                err = -EAGAIN;
 238                                goto out_unlock_reading;
 239                        }
 240                } else {
 241                        len = data_avail;
 242                        if (len > size)
 243                                len = size;
 244
 245                        data_avail -= len;
 246
 247                        if (copy_to_user(buf + ret, rng_buffer + data_avail,
 248                                                                len)) {
 249                                err = -EFAULT;
 250                                goto out_unlock_reading;
 251                        }
 252
 253                        size -= len;
 254                        ret += len;
 255                }
 256
 257                mutex_unlock(&reading_mutex);
 258                put_rng(rng);
 259
 260                if (need_resched())
 261                        schedule_timeout_interruptible(1);
 262
 263                if (signal_pending(current)) {
 264                        err = -ERESTARTSYS;
 265                        goto out;
 266                }
 267        }
 268out:
 269        return ret ? : err;
 270
 271out_unlock_reading:
 272        mutex_unlock(&reading_mutex);
 273out_put:
 274        put_rng(rng);
 275        goto out;
 276}
 277
 278static const struct file_operations rng_chrdev_ops = {
 279        .owner          = THIS_MODULE,
 280        .open           = rng_dev_open,
 281        .read           = rng_dev_read,
 282        .llseek         = noop_llseek,
 283};
 284
 285static const struct attribute_group *rng_dev_groups[];
 286
 287static struct miscdevice rng_miscdev = {
 288        .minor          = HWRNG_MINOR,
 289        .name           = RNG_MODULE_NAME,
 290        .nodename       = "hwrng",
 291        .fops           = &rng_chrdev_ops,
 292        .groups         = rng_dev_groups,
 293};
 294
 295static int enable_best_rng(void)
 296{
 297        int ret = -ENODEV;
 298
 299        BUG_ON(!mutex_is_locked(&rng_mutex));
 300
 301        /* rng_list is sorted by quality, use the best (=first) one */
 302        if (!list_empty(&rng_list)) {
 303                struct hwrng *new_rng;
 304
 305                new_rng = list_entry(rng_list.next, struct hwrng, list);
 306                ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
 307                if (!ret)
 308                        cur_rng_set_by_user = 0;
 309        } else {
 310                drop_current_rng();
 311                cur_rng_set_by_user = 0;
 312                ret = 0;
 313        }
 314
 315        return ret;
 316}
 317
 318static ssize_t hwrng_attr_current_store(struct device *dev,
 319                                        struct device_attribute *attr,
 320                                        const char *buf, size_t len)
 321{
 322        int err = -ENODEV;
 323        struct hwrng *rng;
 324
 325        err = mutex_lock_interruptible(&rng_mutex);
 326        if (err)
 327                return -ERESTARTSYS;
 328
 329        if (sysfs_streq(buf, "")) {
 330                err = enable_best_rng();
 331        } else {
 332                list_for_each_entry(rng, &rng_list, list) {
 333                        if (sysfs_streq(rng->name, buf)) {
 334                                cur_rng_set_by_user = 1;
 335                                err = set_current_rng(rng);
 336                                break;
 337                        }
 338                }
 339        }
 340
 341        mutex_unlock(&rng_mutex);
 342
 343        return err ? : len;
 344}
 345
 346static ssize_t hwrng_attr_current_show(struct device *dev,
 347                                       struct device_attribute *attr,
 348                                       char *buf)
 349{
 350        ssize_t ret;
 351        struct hwrng *rng;
 352
 353        rng = get_current_rng();
 354        if (IS_ERR(rng))
 355                return PTR_ERR(rng);
 356
 357        ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
 358        put_rng(rng);
 359
 360        return ret;
 361}
 362
 363static ssize_t hwrng_attr_available_show(struct device *dev,
 364                                         struct device_attribute *attr,
 365                                         char *buf)
 366{
 367        int err;
 368        struct hwrng *rng;
 369
 370        err = mutex_lock_interruptible(&rng_mutex);
 371        if (err)
 372                return -ERESTARTSYS;
 373        buf[0] = '\0';
 374        list_for_each_entry(rng, &rng_list, list) {
 375                strlcat(buf, rng->name, PAGE_SIZE);
 376                strlcat(buf, " ", PAGE_SIZE);
 377        }
 378        strlcat(buf, "\n", PAGE_SIZE);
 379        mutex_unlock(&rng_mutex);
 380
 381        return strlen(buf);
 382}
 383
 384static ssize_t hwrng_attr_selected_show(struct device *dev,
 385                                        struct device_attribute *attr,
 386                                        char *buf)
 387{
 388        return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
 389}
 390
 391static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
 392                   hwrng_attr_current_show,
 393                   hwrng_attr_current_store);
 394static DEVICE_ATTR(rng_available, S_IRUGO,
 395                   hwrng_attr_available_show,
 396                   NULL);
 397static DEVICE_ATTR(rng_selected, S_IRUGO,
 398                   hwrng_attr_selected_show,
 399                   NULL);
 400
 401static struct attribute *rng_dev_attrs[] = {
 402        &dev_attr_rng_current.attr,
 403        &dev_attr_rng_available.attr,
 404        &dev_attr_rng_selected.attr,
 405        NULL
 406};
 407
 408ATTRIBUTE_GROUPS(rng_dev);
 409
 410static void __exit unregister_miscdev(void)
 411{
 412        misc_deregister(&rng_miscdev);
 413}
 414
 415static int __init register_miscdev(void)
 416{
 417        return misc_register(&rng_miscdev);
 418}
 419
 420static int hwrng_fillfn(void *unused)
 421{
 422        long rc;
 423
 424        while (!kthread_should_stop()) {
 425                struct hwrng *rng;
 426
 427                rng = get_current_rng();
 428                if (IS_ERR(rng) || !rng)
 429                        break;
 430                mutex_lock(&reading_mutex);
 431                rc = rng_get_data(rng, rng_fillbuf,
 432                                  rng_buffer_size(), 1);
 433                mutex_unlock(&reading_mutex);
 434                put_rng(rng);
 435                if (rc <= 0) {
 436                        pr_warn("hwrng: no data available\n");
 437                        msleep_interruptible(10000);
 438                        continue;
 439                }
 440                /* Outside lock, sure, but y'know: randomness. */
 441                add_hwgenerator_randomness((void *)rng_fillbuf, rc,
 442                                           rc * current_quality * 8 >> 10);
 443        }
 444        hwrng_fill = NULL;
 445        return 0;
 446}
 447
 448static void start_khwrngd(void)
 449{
 450        hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
 451        if (IS_ERR(hwrng_fill)) {
 452                pr_err("hwrng_fill thread creation failed\n");
 453                hwrng_fill = NULL;
 454        }
 455}
 456
 457int hwrng_register(struct hwrng *rng)
 458{
 459        int err = -EINVAL;
 460        struct hwrng *old_rng, *tmp;
 461        struct list_head *rng_list_ptr;
 462
 463        if (!rng->name || (!rng->data_read && !rng->read))
 464                goto out;
 465
 466        mutex_lock(&rng_mutex);
 467        /* Must not register two RNGs with the same name. */
 468        err = -EEXIST;
 469        list_for_each_entry(tmp, &rng_list, list) {
 470                if (strcmp(tmp->name, rng->name) == 0)
 471                        goto out_unlock;
 472        }
 473
 474        init_completion(&rng->cleanup_done);
 475        complete(&rng->cleanup_done);
 476
 477        /* rng_list is sorted by decreasing quality */
 478        list_for_each(rng_list_ptr, &rng_list) {
 479                tmp = list_entry(rng_list_ptr, struct hwrng, list);
 480                if (tmp->quality < rng->quality)
 481                        break;
 482        }
 483        list_add_tail(&rng->list, rng_list_ptr);
 484
 485        old_rng = current_rng;
 486        err = 0;
 487        if (!old_rng ||
 488            (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
 489                /*
 490                 * Set new rng as current as the new rng source
 491                 * provides better entropy quality and was not
 492                 * chosen by userspace.
 493                 */
 494                err = set_current_rng(rng);
 495                if (err)
 496                        goto out_unlock;
 497        }
 498
 499        if (old_rng && !rng->init) {
 500                /*
 501                 * Use a new device's input to add some randomness to
 502                 * the system.  If this rng device isn't going to be
 503                 * used right away, its init function hasn't been
 504                 * called yet; so only use the randomness from devices
 505                 * that don't need an init callback.
 506                 */
 507                add_early_randomness(rng);
 508        }
 509
 510out_unlock:
 511        mutex_unlock(&rng_mutex);
 512out:
 513        return err;
 514}
 515EXPORT_SYMBOL_GPL(hwrng_register);
 516
 517void hwrng_unregister(struct hwrng *rng)
 518{
 519        int err;
 520
 521        mutex_lock(&rng_mutex);
 522
 523        list_del(&rng->list);
 524        if (current_rng == rng) {
 525                err = enable_best_rng();
 526                if (err) {
 527                        drop_current_rng();
 528                        cur_rng_set_by_user = 0;
 529                }
 530        }
 531
 532        if (list_empty(&rng_list)) {
 533                mutex_unlock(&rng_mutex);
 534                if (hwrng_fill)
 535                        kthread_stop(hwrng_fill);
 536        } else
 537                mutex_unlock(&rng_mutex);
 538
 539        wait_for_completion(&rng->cleanup_done);
 540}
 541EXPORT_SYMBOL_GPL(hwrng_unregister);
 542
 543static void devm_hwrng_release(struct device *dev, void *res)
 544{
 545        hwrng_unregister(*(struct hwrng **)res);
 546}
 547
 548static int devm_hwrng_match(struct device *dev, void *res, void *data)
 549{
 550        struct hwrng **r = res;
 551
 552        if (WARN_ON(!r || !*r))
 553                return 0;
 554
 555        return *r == data;
 556}
 557
 558int devm_hwrng_register(struct device *dev, struct hwrng *rng)
 559{
 560        struct hwrng **ptr;
 561        int error;
 562
 563        ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
 564        if (!ptr)
 565                return -ENOMEM;
 566
 567        error = hwrng_register(rng);
 568        if (error) {
 569                devres_free(ptr);
 570                return error;
 571        }
 572
 573        *ptr = rng;
 574        devres_add(dev, ptr);
 575        return 0;
 576}
 577EXPORT_SYMBOL_GPL(devm_hwrng_register);
 578
 579void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
 580{
 581        devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
 582}
 583EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
 584
 585static int __init hwrng_modinit(void)
 586{
 587        int ret = -ENOMEM;
 588
 589        /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
 590        rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
 591        if (!rng_buffer)
 592                return -ENOMEM;
 593
 594        rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
 595        if (!rng_fillbuf) {
 596                kfree(rng_buffer);
 597                return -ENOMEM;
 598        }
 599
 600        ret = register_miscdev();
 601        if (ret) {
 602                kfree(rng_fillbuf);
 603                kfree(rng_buffer);
 604        }
 605
 606        return ret;
 607}
 608
 609static void __exit hwrng_modexit(void)
 610{
 611        mutex_lock(&rng_mutex);
 612        BUG_ON(current_rng);
 613        kfree(rng_buffer);
 614        kfree(rng_fillbuf);
 615        mutex_unlock(&rng_mutex);
 616
 617        unregister_miscdev();
 618}
 619
 620module_init(hwrng_modinit);
 621module_exit(hwrng_modexit);
 622
 623MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
 624MODULE_LICENSE("GPL");
 625