linux/drivers/char/mem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/drivers/char/mem.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  Added devfs support.
   8 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
   9 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  10 */
  11
  12#include <linux/mm.h>
  13#include <linux/miscdevice.h>
  14#include <linux/slab.h>
  15#include <linux/vmalloc.h>
  16#include <linux/mman.h>
  17#include <linux/random.h>
  18#include <linux/init.h>
  19#include <linux/tty.h>
  20#include <linux/capability.h>
  21#include <linux/ptrace.h>
  22#include <linux/device.h>
  23#include <linux/highmem.h>
  24#include <linux/backing-dev.h>
  25#include <linux/shmem_fs.h>
  26#include <linux/splice.h>
  27#include <linux/pfn.h>
  28#include <linux/export.h>
  29#include <linux/io.h>
  30#include <linux/uio.h>
  31#include <linux/uaccess.h>
  32#include <linux/security.h>
  33
  34#ifdef CONFIG_IA64
  35# include <linux/efi.h>
  36#endif
  37
  38#define DEVMEM_MINOR    1
  39#define DEVPORT_MINOR   4
  40
  41static inline unsigned long size_inside_page(unsigned long start,
  42                                             unsigned long size)
  43{
  44        unsigned long sz;
  45
  46        sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
  47
  48        return min(sz, size);
  49}
  50
  51#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  52static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
  53{
  54        return addr + count <= __pa(high_memory);
  55}
  56
  57static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  58{
  59        return 1;
  60}
  61#endif
  62
  63#ifdef CONFIG_STRICT_DEVMEM
  64static inline int page_is_allowed(unsigned long pfn)
  65{
  66        return devmem_is_allowed(pfn);
  67}
  68static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  69{
  70        u64 from = ((u64)pfn) << PAGE_SHIFT;
  71        u64 to = from + size;
  72        u64 cursor = from;
  73
  74        while (cursor < to) {
  75                if (!devmem_is_allowed(pfn))
  76                        return 0;
  77                cursor += PAGE_SIZE;
  78                pfn++;
  79        }
  80        return 1;
  81}
  82#else
  83static inline int page_is_allowed(unsigned long pfn)
  84{
  85        return 1;
  86}
  87static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  88{
  89        return 1;
  90}
  91#endif
  92
  93#ifndef unxlate_dev_mem_ptr
  94#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
  95void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
  96{
  97}
  98#endif
  99
 100static inline bool should_stop_iteration(void)
 101{
 102        if (need_resched())
 103                cond_resched();
 104        return fatal_signal_pending(current);
 105}
 106
 107/*
 108 * This funcion reads the *physical* memory. The f_pos points directly to the
 109 * memory location.
 110 */
 111static ssize_t read_mem(struct file *file, char __user *buf,
 112                        size_t count, loff_t *ppos)
 113{
 114        phys_addr_t p = *ppos;
 115        ssize_t read, sz;
 116        void *ptr;
 117        char *bounce;
 118        int err;
 119
 120        if (p != *ppos)
 121                return 0;
 122
 123        if (!valid_phys_addr_range(p, count))
 124                return -EFAULT;
 125        read = 0;
 126#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 127        /* we don't have page 0 mapped on sparc and m68k.. */
 128        if (p < PAGE_SIZE) {
 129                sz = size_inside_page(p, count);
 130                if (sz > 0) {
 131                        if (clear_user(buf, sz))
 132                                return -EFAULT;
 133                        buf += sz;
 134                        p += sz;
 135                        count -= sz;
 136                        read += sz;
 137                }
 138        }
 139#endif
 140
 141        bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
 142        if (!bounce)
 143                return -ENOMEM;
 144
 145        while (count > 0) {
 146                unsigned long remaining;
 147                int allowed, probe;
 148
 149                sz = size_inside_page(p, count);
 150
 151                err = -EPERM;
 152                allowed = page_is_allowed(p >> PAGE_SHIFT);
 153                if (!allowed)
 154                        goto failed;
 155
 156                err = -EFAULT;
 157                if (allowed == 2) {
 158                        /* Show zeros for restricted memory. */
 159                        remaining = clear_user(buf, sz);
 160                } else {
 161                        /*
 162                         * On ia64 if a page has been mapped somewhere as
 163                         * uncached, then it must also be accessed uncached
 164                         * by the kernel or data corruption may occur.
 165                         */
 166                        ptr = xlate_dev_mem_ptr(p);
 167                        if (!ptr)
 168                                goto failed;
 169
 170                        probe = copy_from_kernel_nofault(bounce, ptr, sz);
 171                        unxlate_dev_mem_ptr(p, ptr);
 172                        if (probe)
 173                                goto failed;
 174
 175                        remaining = copy_to_user(buf, bounce, sz);
 176                }
 177
 178                if (remaining)
 179                        goto failed;
 180
 181                buf += sz;
 182                p += sz;
 183                count -= sz;
 184                read += sz;
 185                if (should_stop_iteration())
 186                        break;
 187        }
 188        kfree(bounce);
 189
 190        *ppos += read;
 191        return read;
 192
 193failed:
 194        kfree(bounce);
 195        return err;
 196}
 197
 198static ssize_t write_mem(struct file *file, const char __user *buf,
 199                         size_t count, loff_t *ppos)
 200{
 201        phys_addr_t p = *ppos;
 202        ssize_t written, sz;
 203        unsigned long copied;
 204        void *ptr;
 205
 206        if (p != *ppos)
 207                return -EFBIG;
 208
 209        if (!valid_phys_addr_range(p, count))
 210                return -EFAULT;
 211
 212        written = 0;
 213
 214#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 215        /* we don't have page 0 mapped on sparc and m68k.. */
 216        if (p < PAGE_SIZE) {
 217                sz = size_inside_page(p, count);
 218                /* Hmm. Do something? */
 219                buf += sz;
 220                p += sz;
 221                count -= sz;
 222                written += sz;
 223        }
 224#endif
 225
 226        while (count > 0) {
 227                int allowed;
 228
 229                sz = size_inside_page(p, count);
 230
 231                allowed = page_is_allowed(p >> PAGE_SHIFT);
 232                if (!allowed)
 233                        return -EPERM;
 234
 235                /* Skip actual writing when a page is marked as restricted. */
 236                if (allowed == 1) {
 237                        /*
 238                         * On ia64 if a page has been mapped somewhere as
 239                         * uncached, then it must also be accessed uncached
 240                         * by the kernel or data corruption may occur.
 241                         */
 242                        ptr = xlate_dev_mem_ptr(p);
 243                        if (!ptr) {
 244                                if (written)
 245                                        break;
 246                                return -EFAULT;
 247                        }
 248
 249                        copied = copy_from_user(ptr, buf, sz);
 250                        unxlate_dev_mem_ptr(p, ptr);
 251                        if (copied) {
 252                                written += sz - copied;
 253                                if (written)
 254                                        break;
 255                                return -EFAULT;
 256                        }
 257                }
 258
 259                buf += sz;
 260                p += sz;
 261                count -= sz;
 262                written += sz;
 263                if (should_stop_iteration())
 264                        break;
 265        }
 266
 267        *ppos += written;
 268        return written;
 269}
 270
 271int __weak phys_mem_access_prot_allowed(struct file *file,
 272        unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
 273{
 274        return 1;
 275}
 276
 277#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
 278
 279/*
 280 * Architectures vary in how they handle caching for addresses
 281 * outside of main memory.
 282 *
 283 */
 284#ifdef pgprot_noncached
 285static int uncached_access(struct file *file, phys_addr_t addr)
 286{
 287#if defined(CONFIG_IA64)
 288        /*
 289         * On ia64, we ignore O_DSYNC because we cannot tolerate memory
 290         * attribute aliases.
 291         */
 292        return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
 293#else
 294        /*
 295         * Accessing memory above the top the kernel knows about or through a
 296         * file pointer
 297         * that was marked O_DSYNC will be done non-cached.
 298         */
 299        if (file->f_flags & O_DSYNC)
 300                return 1;
 301        return addr >= __pa(high_memory);
 302#endif
 303}
 304#endif
 305
 306static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 307                                     unsigned long size, pgprot_t vma_prot)
 308{
 309#ifdef pgprot_noncached
 310        phys_addr_t offset = pfn << PAGE_SHIFT;
 311
 312        if (uncached_access(file, offset))
 313                return pgprot_noncached(vma_prot);
 314#endif
 315        return vma_prot;
 316}
 317#endif
 318
 319#ifndef CONFIG_MMU
 320static unsigned long get_unmapped_area_mem(struct file *file,
 321                                           unsigned long addr,
 322                                           unsigned long len,
 323                                           unsigned long pgoff,
 324                                           unsigned long flags)
 325{
 326        if (!valid_mmap_phys_addr_range(pgoff, len))
 327                return (unsigned long) -EINVAL;
 328        return pgoff << PAGE_SHIFT;
 329}
 330
 331/* permit direct mmap, for read, write or exec */
 332static unsigned memory_mmap_capabilities(struct file *file)
 333{
 334        return NOMMU_MAP_DIRECT |
 335                NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
 336}
 337
 338static unsigned zero_mmap_capabilities(struct file *file)
 339{
 340        return NOMMU_MAP_COPY;
 341}
 342
 343/* can't do an in-place private mapping if there's no MMU */
 344static inline int private_mapping_ok(struct vm_area_struct *vma)
 345{
 346        return vma->vm_flags & VM_MAYSHARE;
 347}
 348#else
 349
 350static inline int private_mapping_ok(struct vm_area_struct *vma)
 351{
 352        return 1;
 353}
 354#endif
 355
 356static const struct vm_operations_struct mmap_mem_ops = {
 357#ifdef CONFIG_HAVE_IOREMAP_PROT
 358        .access = generic_access_phys
 359#endif
 360};
 361
 362static int mmap_mem(struct file *file, struct vm_area_struct *vma)
 363{
 364        size_t size = vma->vm_end - vma->vm_start;
 365        phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 366
 367        /* Does it even fit in phys_addr_t? */
 368        if (offset >> PAGE_SHIFT != vma->vm_pgoff)
 369                return -EINVAL;
 370
 371        /* It's illegal to wrap around the end of the physical address space. */
 372        if (offset + (phys_addr_t)size - 1 < offset)
 373                return -EINVAL;
 374
 375        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 376                return -EINVAL;
 377
 378        if (!private_mapping_ok(vma))
 379                return -ENOSYS;
 380
 381        if (!range_is_allowed(vma->vm_pgoff, size))
 382                return -EPERM;
 383
 384        if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
 385                                                &vma->vm_page_prot))
 386                return -EINVAL;
 387
 388        vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
 389                                                 size,
 390                                                 vma->vm_page_prot);
 391
 392        vma->vm_ops = &mmap_mem_ops;
 393
 394        /* Remap-pfn-range will mark the range VM_IO */
 395        if (remap_pfn_range(vma,
 396                            vma->vm_start,
 397                            vma->vm_pgoff,
 398                            size,
 399                            vma->vm_page_prot)) {
 400                return -EAGAIN;
 401        }
 402        return 0;
 403}
 404
 405static ssize_t read_port(struct file *file, char __user *buf,
 406                         size_t count, loff_t *ppos)
 407{
 408        unsigned long i = *ppos;
 409        char __user *tmp = buf;
 410
 411        if (!access_ok(buf, count))
 412                return -EFAULT;
 413        while (count-- > 0 && i < 65536) {
 414                if (__put_user(inb(i), tmp) < 0)
 415                        return -EFAULT;
 416                i++;
 417                tmp++;
 418        }
 419        *ppos = i;
 420        return tmp-buf;
 421}
 422
 423static ssize_t write_port(struct file *file, const char __user *buf,
 424                          size_t count, loff_t *ppos)
 425{
 426        unsigned long i = *ppos;
 427        const char __user *tmp = buf;
 428
 429        if (!access_ok(buf, count))
 430                return -EFAULT;
 431        while (count-- > 0 && i < 65536) {
 432                char c;
 433
 434                if (__get_user(c, tmp)) {
 435                        if (tmp > buf)
 436                                break;
 437                        return -EFAULT;
 438                }
 439                outb(c, i);
 440                i++;
 441                tmp++;
 442        }
 443        *ppos = i;
 444        return tmp-buf;
 445}
 446
 447static ssize_t read_null(struct file *file, char __user *buf,
 448                         size_t count, loff_t *ppos)
 449{
 450        return 0;
 451}
 452
 453static ssize_t write_null(struct file *file, const char __user *buf,
 454                          size_t count, loff_t *ppos)
 455{
 456        return count;
 457}
 458
 459static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
 460{
 461        return 0;
 462}
 463
 464static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
 465{
 466        size_t count = iov_iter_count(from);
 467        iov_iter_advance(from, count);
 468        return count;
 469}
 470
 471static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
 472                        struct splice_desc *sd)
 473{
 474        return sd->len;
 475}
 476
 477static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
 478                                 loff_t *ppos, size_t len, unsigned int flags)
 479{
 480        return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
 481}
 482
 483static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
 484{
 485        size_t written = 0;
 486
 487        while (iov_iter_count(iter)) {
 488                size_t chunk = iov_iter_count(iter), n;
 489
 490                if (chunk > PAGE_SIZE)
 491                        chunk = PAGE_SIZE;      /* Just for latency reasons */
 492                n = iov_iter_zero(chunk, iter);
 493                if (!n && iov_iter_count(iter))
 494                        return written ? written : -EFAULT;
 495                written += n;
 496                if (signal_pending(current))
 497                        return written ? written : -ERESTARTSYS;
 498                cond_resched();
 499        }
 500        return written;
 501}
 502
 503static ssize_t read_zero(struct file *file, char __user *buf,
 504                         size_t count, loff_t *ppos)
 505{
 506        size_t cleared = 0;
 507
 508        while (count) {
 509                size_t chunk = min_t(size_t, count, PAGE_SIZE);
 510                size_t left;
 511
 512                left = clear_user(buf + cleared, chunk);
 513                if (unlikely(left)) {
 514                        cleared += (chunk - left);
 515                        if (!cleared)
 516                                return -EFAULT;
 517                        break;
 518                }
 519                cleared += chunk;
 520                count -= chunk;
 521
 522                if (signal_pending(current))
 523                        break;
 524                cond_resched();
 525        }
 526
 527        return cleared;
 528}
 529
 530static int mmap_zero(struct file *file, struct vm_area_struct *vma)
 531{
 532#ifndef CONFIG_MMU
 533        return -ENOSYS;
 534#endif
 535        if (vma->vm_flags & VM_SHARED)
 536                return shmem_zero_setup(vma);
 537        vma_set_anonymous(vma);
 538        return 0;
 539}
 540
 541static unsigned long get_unmapped_area_zero(struct file *file,
 542                                unsigned long addr, unsigned long len,
 543                                unsigned long pgoff, unsigned long flags)
 544{
 545#ifdef CONFIG_MMU
 546        if (flags & MAP_SHARED) {
 547                /*
 548                 * mmap_zero() will call shmem_zero_setup() to create a file,
 549                 * so use shmem's get_unmapped_area in case it can be huge;
 550                 * and pass NULL for file as in mmap.c's get_unmapped_area(),
 551                 * so as not to confuse shmem with our handle on "/dev/zero".
 552                 */
 553                return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
 554        }
 555
 556        /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
 557        return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
 558#else
 559        return -ENOSYS;
 560#endif
 561}
 562
 563static ssize_t write_full(struct file *file, const char __user *buf,
 564                          size_t count, loff_t *ppos)
 565{
 566        return -ENOSPC;
 567}
 568
 569/*
 570 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
 571 * can fopen() both devices with "a" now.  This was previously impossible.
 572 * -- SRB.
 573 */
 574static loff_t null_lseek(struct file *file, loff_t offset, int orig)
 575{
 576        return file->f_pos = 0;
 577}
 578
 579/*
 580 * The memory devices use the full 32/64 bits of the offset, and so we cannot
 581 * check against negative addresses: they are ok. The return value is weird,
 582 * though, in that case (0).
 583 *
 584 * also note that seeking relative to the "end of file" isn't supported:
 585 * it has no meaning, so it returns -EINVAL.
 586 */
 587static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
 588{
 589        loff_t ret;
 590
 591        inode_lock(file_inode(file));
 592        switch (orig) {
 593        case SEEK_CUR:
 594                offset += file->f_pos;
 595                fallthrough;
 596        case SEEK_SET:
 597                /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
 598                if ((unsigned long long)offset >= -MAX_ERRNO) {
 599                        ret = -EOVERFLOW;
 600                        break;
 601                }
 602                file->f_pos = offset;
 603                ret = file->f_pos;
 604                force_successful_syscall_return();
 605                break;
 606        default:
 607                ret = -EINVAL;
 608        }
 609        inode_unlock(file_inode(file));
 610        return ret;
 611}
 612
 613static int open_port(struct inode *inode, struct file *filp)
 614{
 615        int rc;
 616
 617        if (!capable(CAP_SYS_RAWIO))
 618                return -EPERM;
 619
 620        rc = security_locked_down(LOCKDOWN_DEV_MEM);
 621        if (rc)
 622                return rc;
 623
 624        if (iminor(inode) != DEVMEM_MINOR)
 625                return 0;
 626
 627        /*
 628         * Use a unified address space to have a single point to manage
 629         * revocations when drivers want to take over a /dev/mem mapped
 630         * range.
 631         */
 632        filp->f_mapping = iomem_get_mapping();
 633
 634        return 0;
 635}
 636
 637#define zero_lseek      null_lseek
 638#define full_lseek      null_lseek
 639#define write_zero      write_null
 640#define write_iter_zero write_iter_null
 641#define open_mem        open_port
 642
 643static const struct file_operations __maybe_unused mem_fops = {
 644        .llseek         = memory_lseek,
 645        .read           = read_mem,
 646        .write          = write_mem,
 647        .mmap           = mmap_mem,
 648        .open           = open_mem,
 649#ifndef CONFIG_MMU
 650        .get_unmapped_area = get_unmapped_area_mem,
 651        .mmap_capabilities = memory_mmap_capabilities,
 652#endif
 653};
 654
 655static const struct file_operations null_fops = {
 656        .llseek         = null_lseek,
 657        .read           = read_null,
 658        .write          = write_null,
 659        .read_iter      = read_iter_null,
 660        .write_iter     = write_iter_null,
 661        .splice_write   = splice_write_null,
 662};
 663
 664static const struct file_operations __maybe_unused port_fops = {
 665        .llseek         = memory_lseek,
 666        .read           = read_port,
 667        .write          = write_port,
 668        .open           = open_port,
 669};
 670
 671static const struct file_operations zero_fops = {
 672        .llseek         = zero_lseek,
 673        .write          = write_zero,
 674        .read_iter      = read_iter_zero,
 675        .read           = read_zero,
 676        .write_iter     = write_iter_zero,
 677        .mmap           = mmap_zero,
 678        .get_unmapped_area = get_unmapped_area_zero,
 679#ifndef CONFIG_MMU
 680        .mmap_capabilities = zero_mmap_capabilities,
 681#endif
 682};
 683
 684static const struct file_operations full_fops = {
 685        .llseek         = full_lseek,
 686        .read_iter      = read_iter_zero,
 687        .write          = write_full,
 688};
 689
 690static const struct memdev {
 691        const char *name;
 692        umode_t mode;
 693        const struct file_operations *fops;
 694        fmode_t fmode;
 695} devlist[] = {
 696#ifdef CONFIG_DEVMEM
 697         [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
 698#endif
 699         [3] = { "null", 0666, &null_fops, 0 },
 700#ifdef CONFIG_DEVPORT
 701         [4] = { "port", 0, &port_fops, 0 },
 702#endif
 703         [5] = { "zero", 0666, &zero_fops, 0 },
 704         [7] = { "full", 0666, &full_fops, 0 },
 705         [8] = { "random", 0666, &random_fops, 0 },
 706         [9] = { "urandom", 0666, &urandom_fops, 0 },
 707#ifdef CONFIG_PRINTK
 708        [11] = { "kmsg", 0644, &kmsg_fops, 0 },
 709#endif
 710};
 711
 712static int memory_open(struct inode *inode, struct file *filp)
 713{
 714        int minor;
 715        const struct memdev *dev;
 716
 717        minor = iminor(inode);
 718        if (minor >= ARRAY_SIZE(devlist))
 719                return -ENXIO;
 720
 721        dev = &devlist[minor];
 722        if (!dev->fops)
 723                return -ENXIO;
 724
 725        filp->f_op = dev->fops;
 726        filp->f_mode |= dev->fmode;
 727
 728        if (dev->fops->open)
 729                return dev->fops->open(inode, filp);
 730
 731        return 0;
 732}
 733
 734static const struct file_operations memory_fops = {
 735        .open = memory_open,
 736        .llseek = noop_llseek,
 737};
 738
 739static char *mem_devnode(struct device *dev, umode_t *mode)
 740{
 741        if (mode && devlist[MINOR(dev->devt)].mode)
 742                *mode = devlist[MINOR(dev->devt)].mode;
 743        return NULL;
 744}
 745
 746static struct class *mem_class;
 747
 748static int __init chr_dev_init(void)
 749{
 750        int minor;
 751
 752        if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
 753                printk("unable to get major %d for memory devs\n", MEM_MAJOR);
 754
 755        mem_class = class_create(THIS_MODULE, "mem");
 756        if (IS_ERR(mem_class))
 757                return PTR_ERR(mem_class);
 758
 759        mem_class->devnode = mem_devnode;
 760        for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
 761                if (!devlist[minor].name)
 762                        continue;
 763
 764                /*
 765                 * Create /dev/port?
 766                 */
 767                if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
 768                        continue;
 769
 770                device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
 771                              NULL, devlist[minor].name);
 772        }
 773
 774        return tty_init();
 775}
 776
 777fs_initcall(chr_dev_init);
 778