linux/drivers/char/mem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/drivers/char/mem.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  Added devfs support.
   8 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
   9 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  10 */
  11
  12#include <linux/mm.h>
  13#include <linux/miscdevice.h>
  14#include <linux/slab.h>
  15#include <linux/vmalloc.h>
  16#include <linux/mman.h>
  17#include <linux/random.h>
  18#include <linux/init.h>
  19#include <linux/raw.h>
  20#include <linux/tty.h>
  21#include <linux/capability.h>
  22#include <linux/ptrace.h>
  23#include <linux/device.h>
  24#include <linux/highmem.h>
  25#include <linux/backing-dev.h>
  26#include <linux/shmem_fs.h>
  27#include <linux/splice.h>
  28#include <linux/pfn.h>
  29#include <linux/export.h>
  30#include <linux/io.h>
  31#include <linux/uio.h>
  32
  33#include <linux/uaccess.h>
  34
  35#ifdef CONFIG_IA64
  36# include <linux/efi.h>
  37#endif
  38
  39#define DEVPORT_MINOR   4
  40
  41static inline unsigned long size_inside_page(unsigned long start,
  42                                             unsigned long size)
  43{
  44        unsigned long sz;
  45
  46        sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
  47
  48        return min(sz, size);
  49}
  50
  51#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  52static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
  53{
  54        return addr + count <= __pa(high_memory);
  55}
  56
  57static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  58{
  59        return 1;
  60}
  61#endif
  62
  63#ifdef CONFIG_STRICT_DEVMEM
  64static inline int page_is_allowed(unsigned long pfn)
  65{
  66        return devmem_is_allowed(pfn);
  67}
  68static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  69{
  70        u64 from = ((u64)pfn) << PAGE_SHIFT;
  71        u64 to = from + size;
  72        u64 cursor = from;
  73
  74        while (cursor < to) {
  75                if (!devmem_is_allowed(pfn))
  76                        return 0;
  77                cursor += PAGE_SIZE;
  78                pfn++;
  79        }
  80        return 1;
  81}
  82#else
  83static inline int page_is_allowed(unsigned long pfn)
  84{
  85        return 1;
  86}
  87static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  88{
  89        return 1;
  90}
  91#endif
  92
  93#ifndef unxlate_dev_mem_ptr
  94#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
  95void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
  96{
  97}
  98#endif
  99
 100/*
 101 * This funcion reads the *physical* memory. The f_pos points directly to the
 102 * memory location.
 103 */
 104static ssize_t read_mem(struct file *file, char __user *buf,
 105                        size_t count, loff_t *ppos)
 106{
 107        phys_addr_t p = *ppos;
 108        ssize_t read, sz;
 109        void *ptr;
 110
 111        if (p != *ppos)
 112                return 0;
 113
 114        if (!valid_phys_addr_range(p, count))
 115                return -EFAULT;
 116        read = 0;
 117#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 118        /* we don't have page 0 mapped on sparc and m68k.. */
 119        if (p < PAGE_SIZE) {
 120                sz = size_inside_page(p, count);
 121                if (sz > 0) {
 122                        if (clear_user(buf, sz))
 123                                return -EFAULT;
 124                        buf += sz;
 125                        p += sz;
 126                        count -= sz;
 127                        read += sz;
 128                }
 129        }
 130#endif
 131
 132        while (count > 0) {
 133                unsigned long remaining;
 134                int allowed;
 135
 136                sz = size_inside_page(p, count);
 137
 138                allowed = page_is_allowed(p >> PAGE_SHIFT);
 139                if (!allowed)
 140                        return -EPERM;
 141                if (allowed == 2) {
 142                        /* Show zeros for restricted memory. */
 143                        remaining = clear_user(buf, sz);
 144                } else {
 145                        /*
 146                         * On ia64 if a page has been mapped somewhere as
 147                         * uncached, then it must also be accessed uncached
 148                         * by the kernel or data corruption may occur.
 149                         */
 150                        ptr = xlate_dev_mem_ptr(p);
 151                        if (!ptr)
 152                                return -EFAULT;
 153
 154                        remaining = copy_to_user(buf, ptr, sz);
 155
 156                        unxlate_dev_mem_ptr(p, ptr);
 157                }
 158
 159                if (remaining)
 160                        return -EFAULT;
 161
 162                buf += sz;
 163                p += sz;
 164                count -= sz;
 165                read += sz;
 166        }
 167
 168        *ppos += read;
 169        return read;
 170}
 171
 172static ssize_t write_mem(struct file *file, const char __user *buf,
 173                         size_t count, loff_t *ppos)
 174{
 175        phys_addr_t p = *ppos;
 176        ssize_t written, sz;
 177        unsigned long copied;
 178        void *ptr;
 179
 180        if (p != *ppos)
 181                return -EFBIG;
 182
 183        if (!valid_phys_addr_range(p, count))
 184                return -EFAULT;
 185
 186        written = 0;
 187
 188#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 189        /* we don't have page 0 mapped on sparc and m68k.. */
 190        if (p < PAGE_SIZE) {
 191                sz = size_inside_page(p, count);
 192                /* Hmm. Do something? */
 193                buf += sz;
 194                p += sz;
 195                count -= sz;
 196                written += sz;
 197        }
 198#endif
 199
 200        while (count > 0) {
 201                int allowed;
 202
 203                sz = size_inside_page(p, count);
 204
 205                allowed = page_is_allowed(p >> PAGE_SHIFT);
 206                if (!allowed)
 207                        return -EPERM;
 208
 209                /* Skip actual writing when a page is marked as restricted. */
 210                if (allowed == 1) {
 211                        /*
 212                         * On ia64 if a page has been mapped somewhere as
 213                         * uncached, then it must also be accessed uncached
 214                         * by the kernel or data corruption may occur.
 215                         */
 216                        ptr = xlate_dev_mem_ptr(p);
 217                        if (!ptr) {
 218                                if (written)
 219                                        break;
 220                                return -EFAULT;
 221                        }
 222
 223                        copied = copy_from_user(ptr, buf, sz);
 224                        unxlate_dev_mem_ptr(p, ptr);
 225                        if (copied) {
 226                                written += sz - copied;
 227                                if (written)
 228                                        break;
 229                                return -EFAULT;
 230                        }
 231                }
 232
 233                buf += sz;
 234                p += sz;
 235                count -= sz;
 236                written += sz;
 237        }
 238
 239        *ppos += written;
 240        return written;
 241}
 242
 243int __weak phys_mem_access_prot_allowed(struct file *file,
 244        unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
 245{
 246        return 1;
 247}
 248
 249#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
 250
 251/*
 252 * Architectures vary in how they handle caching for addresses
 253 * outside of main memory.
 254 *
 255 */
 256#ifdef pgprot_noncached
 257static int uncached_access(struct file *file, phys_addr_t addr)
 258{
 259#if defined(CONFIG_IA64)
 260        /*
 261         * On ia64, we ignore O_DSYNC because we cannot tolerate memory
 262         * attribute aliases.
 263         */
 264        return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
 265#elif defined(CONFIG_MIPS)
 266        {
 267                extern int __uncached_access(struct file *file,
 268                                             unsigned long addr);
 269
 270                return __uncached_access(file, addr);
 271        }
 272#else
 273        /*
 274         * Accessing memory above the top the kernel knows about or through a
 275         * file pointer
 276         * that was marked O_DSYNC will be done non-cached.
 277         */
 278        if (file->f_flags & O_DSYNC)
 279                return 1;
 280        return addr >= __pa(high_memory);
 281#endif
 282}
 283#endif
 284
 285static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 286                                     unsigned long size, pgprot_t vma_prot)
 287{
 288#ifdef pgprot_noncached
 289        phys_addr_t offset = pfn << PAGE_SHIFT;
 290
 291        if (uncached_access(file, offset))
 292                return pgprot_noncached(vma_prot);
 293#endif
 294        return vma_prot;
 295}
 296#endif
 297
 298#ifndef CONFIG_MMU
 299static unsigned long get_unmapped_area_mem(struct file *file,
 300                                           unsigned long addr,
 301                                           unsigned long len,
 302                                           unsigned long pgoff,
 303                                           unsigned long flags)
 304{
 305        if (!valid_mmap_phys_addr_range(pgoff, len))
 306                return (unsigned long) -EINVAL;
 307        return pgoff << PAGE_SHIFT;
 308}
 309
 310/* permit direct mmap, for read, write or exec */
 311static unsigned memory_mmap_capabilities(struct file *file)
 312{
 313        return NOMMU_MAP_DIRECT |
 314                NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
 315}
 316
 317static unsigned zero_mmap_capabilities(struct file *file)
 318{
 319        return NOMMU_MAP_COPY;
 320}
 321
 322/* can't do an in-place private mapping if there's no MMU */
 323static inline int private_mapping_ok(struct vm_area_struct *vma)
 324{
 325        return vma->vm_flags & VM_MAYSHARE;
 326}
 327#else
 328
 329static inline int private_mapping_ok(struct vm_area_struct *vma)
 330{
 331        return 1;
 332}
 333#endif
 334
 335static const struct vm_operations_struct mmap_mem_ops = {
 336#ifdef CONFIG_HAVE_IOREMAP_PROT
 337        .access = generic_access_phys
 338#endif
 339};
 340
 341static int mmap_mem(struct file *file, struct vm_area_struct *vma)
 342{
 343        size_t size = vma->vm_end - vma->vm_start;
 344        phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 345
 346        /* It's illegal to wrap around the end of the physical address space. */
 347        if (offset + (phys_addr_t)size - 1 < offset)
 348                return -EINVAL;
 349
 350        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 351                return -EINVAL;
 352
 353        if (!private_mapping_ok(vma))
 354                return -ENOSYS;
 355
 356        if (!range_is_allowed(vma->vm_pgoff, size))
 357                return -EPERM;
 358
 359        if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
 360                                                &vma->vm_page_prot))
 361                return -EINVAL;
 362
 363        vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
 364                                                 size,
 365                                                 vma->vm_page_prot);
 366
 367        vma->vm_ops = &mmap_mem_ops;
 368
 369        /* Remap-pfn-range will mark the range VM_IO */
 370        if (remap_pfn_range(vma,
 371                            vma->vm_start,
 372                            vma->vm_pgoff,
 373                            size,
 374                            vma->vm_page_prot)) {
 375                return -EAGAIN;
 376        }
 377        return 0;
 378}
 379
 380static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
 381{
 382        unsigned long pfn;
 383
 384        /* Turn a kernel-virtual address into a physical page frame */
 385        pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
 386
 387        /*
 388         * RED-PEN: on some architectures there is more mapped memory than
 389         * available in mem_map which pfn_valid checks for. Perhaps should add a
 390         * new macro here.
 391         *
 392         * RED-PEN: vmalloc is not supported right now.
 393         */
 394        if (!pfn_valid(pfn))
 395                return -EIO;
 396
 397        vma->vm_pgoff = pfn;
 398        return mmap_mem(file, vma);
 399}
 400
 401/*
 402 * This function reads the *virtual* memory as seen by the kernel.
 403 */
 404static ssize_t read_kmem(struct file *file, char __user *buf,
 405                         size_t count, loff_t *ppos)
 406{
 407        unsigned long p = *ppos;
 408        ssize_t low_count, read, sz;
 409        char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
 410        int err = 0;
 411
 412        read = 0;
 413        if (p < (unsigned long) high_memory) {
 414                low_count = count;
 415                if (count > (unsigned long)high_memory - p)
 416                        low_count = (unsigned long)high_memory - p;
 417
 418#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 419                /* we don't have page 0 mapped on sparc and m68k.. */
 420                if (p < PAGE_SIZE && low_count > 0) {
 421                        sz = size_inside_page(p, low_count);
 422                        if (clear_user(buf, sz))
 423                                return -EFAULT;
 424                        buf += sz;
 425                        p += sz;
 426                        read += sz;
 427                        low_count -= sz;
 428                        count -= sz;
 429                }
 430#endif
 431                while (low_count > 0) {
 432                        sz = size_inside_page(p, low_count);
 433
 434                        /*
 435                         * On ia64 if a page has been mapped somewhere as
 436                         * uncached, then it must also be accessed uncached
 437                         * by the kernel or data corruption may occur
 438                         */
 439                        kbuf = xlate_dev_kmem_ptr((void *)p);
 440                        if (!virt_addr_valid(kbuf))
 441                                return -ENXIO;
 442
 443                        if (copy_to_user(buf, kbuf, sz))
 444                                return -EFAULT;
 445                        buf += sz;
 446                        p += sz;
 447                        read += sz;
 448                        low_count -= sz;
 449                        count -= sz;
 450                }
 451        }
 452
 453        if (count > 0) {
 454                kbuf = (char *)__get_free_page(GFP_KERNEL);
 455                if (!kbuf)
 456                        return -ENOMEM;
 457                while (count > 0) {
 458                        sz = size_inside_page(p, count);
 459                        if (!is_vmalloc_or_module_addr((void *)p)) {
 460                                err = -ENXIO;
 461                                break;
 462                        }
 463                        sz = vread(kbuf, (char *)p, sz);
 464                        if (!sz)
 465                                break;
 466                        if (copy_to_user(buf, kbuf, sz)) {
 467                                err = -EFAULT;
 468                                break;
 469                        }
 470                        count -= sz;
 471                        buf += sz;
 472                        read += sz;
 473                        p += sz;
 474                }
 475                free_page((unsigned long)kbuf);
 476        }
 477        *ppos = p;
 478        return read ? read : err;
 479}
 480
 481
 482static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
 483                                size_t count, loff_t *ppos)
 484{
 485        ssize_t written, sz;
 486        unsigned long copied;
 487
 488        written = 0;
 489#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 490        /* we don't have page 0 mapped on sparc and m68k.. */
 491        if (p < PAGE_SIZE) {
 492                sz = size_inside_page(p, count);
 493                /* Hmm. Do something? */
 494                buf += sz;
 495                p += sz;
 496                count -= sz;
 497                written += sz;
 498        }
 499#endif
 500
 501        while (count > 0) {
 502                void *ptr;
 503
 504                sz = size_inside_page(p, count);
 505
 506                /*
 507                 * On ia64 if a page has been mapped somewhere as uncached, then
 508                 * it must also be accessed uncached by the kernel or data
 509                 * corruption may occur.
 510                 */
 511                ptr = xlate_dev_kmem_ptr((void *)p);
 512                if (!virt_addr_valid(ptr))
 513                        return -ENXIO;
 514
 515                copied = copy_from_user(ptr, buf, sz);
 516                if (copied) {
 517                        written += sz - copied;
 518                        if (written)
 519                                break;
 520                        return -EFAULT;
 521                }
 522                buf += sz;
 523                p += sz;
 524                count -= sz;
 525                written += sz;
 526        }
 527
 528        *ppos += written;
 529        return written;
 530}
 531
 532/*
 533 * This function writes to the *virtual* memory as seen by the kernel.
 534 */
 535static ssize_t write_kmem(struct file *file, const char __user *buf,
 536                          size_t count, loff_t *ppos)
 537{
 538        unsigned long p = *ppos;
 539        ssize_t wrote = 0;
 540        ssize_t virtr = 0;
 541        char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
 542        int err = 0;
 543
 544        if (p < (unsigned long) high_memory) {
 545                unsigned long to_write = min_t(unsigned long, count,
 546                                               (unsigned long)high_memory - p);
 547                wrote = do_write_kmem(p, buf, to_write, ppos);
 548                if (wrote != to_write)
 549                        return wrote;
 550                p += wrote;
 551                buf += wrote;
 552                count -= wrote;
 553        }
 554
 555        if (count > 0) {
 556                kbuf = (char *)__get_free_page(GFP_KERNEL);
 557                if (!kbuf)
 558                        return wrote ? wrote : -ENOMEM;
 559                while (count > 0) {
 560                        unsigned long sz = size_inside_page(p, count);
 561                        unsigned long n;
 562
 563                        if (!is_vmalloc_or_module_addr((void *)p)) {
 564                                err = -ENXIO;
 565                                break;
 566                        }
 567                        n = copy_from_user(kbuf, buf, sz);
 568                        if (n) {
 569                                err = -EFAULT;
 570                                break;
 571                        }
 572                        vwrite(kbuf, (char *)p, sz);
 573                        count -= sz;
 574                        buf += sz;
 575                        virtr += sz;
 576                        p += sz;
 577                }
 578                free_page((unsigned long)kbuf);
 579        }
 580
 581        *ppos = p;
 582        return virtr + wrote ? : err;
 583}
 584
 585static ssize_t read_port(struct file *file, char __user *buf,
 586                         size_t count, loff_t *ppos)
 587{
 588        unsigned long i = *ppos;
 589        char __user *tmp = buf;
 590
 591        if (!access_ok(VERIFY_WRITE, buf, count))
 592                return -EFAULT;
 593        while (count-- > 0 && i < 65536) {
 594                if (__put_user(inb(i), tmp) < 0)
 595                        return -EFAULT;
 596                i++;
 597                tmp++;
 598        }
 599        *ppos = i;
 600        return tmp-buf;
 601}
 602
 603static ssize_t write_port(struct file *file, const char __user *buf,
 604                          size_t count, loff_t *ppos)
 605{
 606        unsigned long i = *ppos;
 607        const char __user *tmp = buf;
 608
 609        if (!access_ok(VERIFY_READ, buf, count))
 610                return -EFAULT;
 611        while (count-- > 0 && i < 65536) {
 612                char c;
 613
 614                if (__get_user(c, tmp)) {
 615                        if (tmp > buf)
 616                                break;
 617                        return -EFAULT;
 618                }
 619                outb(c, i);
 620                i++;
 621                tmp++;
 622        }
 623        *ppos = i;
 624        return tmp-buf;
 625}
 626
 627static ssize_t read_null(struct file *file, char __user *buf,
 628                         size_t count, loff_t *ppos)
 629{
 630        return 0;
 631}
 632
 633static ssize_t write_null(struct file *file, const char __user *buf,
 634                          size_t count, loff_t *ppos)
 635{
 636        return count;
 637}
 638
 639static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
 640{
 641        return 0;
 642}
 643
 644static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
 645{
 646        size_t count = iov_iter_count(from);
 647        iov_iter_advance(from, count);
 648        return count;
 649}
 650
 651static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
 652                        struct splice_desc *sd)
 653{
 654        return sd->len;
 655}
 656
 657static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
 658                                 loff_t *ppos, size_t len, unsigned int flags)
 659{
 660        return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
 661}
 662
 663static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
 664{
 665        size_t written = 0;
 666
 667        while (iov_iter_count(iter)) {
 668                size_t chunk = iov_iter_count(iter), n;
 669
 670                if (chunk > PAGE_SIZE)
 671                        chunk = PAGE_SIZE;      /* Just for latency reasons */
 672                n = iov_iter_zero(chunk, iter);
 673                if (!n && iov_iter_count(iter))
 674                        return written ? written : -EFAULT;
 675                written += n;
 676                if (signal_pending(current))
 677                        return written ? written : -ERESTARTSYS;
 678                cond_resched();
 679        }
 680        return written;
 681}
 682
 683static int mmap_zero(struct file *file, struct vm_area_struct *vma)
 684{
 685#ifndef CONFIG_MMU
 686        return -ENOSYS;
 687#endif
 688        if (vma->vm_flags & VM_SHARED)
 689                return shmem_zero_setup(vma);
 690        return 0;
 691}
 692
 693static unsigned long get_unmapped_area_zero(struct file *file,
 694                                unsigned long addr, unsigned long len,
 695                                unsigned long pgoff, unsigned long flags)
 696{
 697#ifdef CONFIG_MMU
 698        if (flags & MAP_SHARED) {
 699                /*
 700                 * mmap_zero() will call shmem_zero_setup() to create a file,
 701                 * so use shmem's get_unmapped_area in case it can be huge;
 702                 * and pass NULL for file as in mmap.c's get_unmapped_area(),
 703                 * so as not to confuse shmem with our handle on "/dev/zero".
 704                 */
 705                return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
 706        }
 707
 708        /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
 709        return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
 710#else
 711        return -ENOSYS;
 712#endif
 713}
 714
 715static ssize_t write_full(struct file *file, const char __user *buf,
 716                          size_t count, loff_t *ppos)
 717{
 718        return -ENOSPC;
 719}
 720
 721/*
 722 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
 723 * can fopen() both devices with "a" now.  This was previously impossible.
 724 * -- SRB.
 725 */
 726static loff_t null_lseek(struct file *file, loff_t offset, int orig)
 727{
 728        return file->f_pos = 0;
 729}
 730
 731/*
 732 * The memory devices use the full 32/64 bits of the offset, and so we cannot
 733 * check against negative addresses: they are ok. The return value is weird,
 734 * though, in that case (0).
 735 *
 736 * also note that seeking relative to the "end of file" isn't supported:
 737 * it has no meaning, so it returns -EINVAL.
 738 */
 739static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
 740{
 741        loff_t ret;
 742
 743        inode_lock(file_inode(file));
 744        switch (orig) {
 745        case SEEK_CUR:
 746                offset += file->f_pos;
 747        case SEEK_SET:
 748                /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
 749                if ((unsigned long long)offset >= -MAX_ERRNO) {
 750                        ret = -EOVERFLOW;
 751                        break;
 752                }
 753                file->f_pos = offset;
 754                ret = file->f_pos;
 755                force_successful_syscall_return();
 756                break;
 757        default:
 758                ret = -EINVAL;
 759        }
 760        inode_unlock(file_inode(file));
 761        return ret;
 762}
 763
 764static int open_port(struct inode *inode, struct file *filp)
 765{
 766        return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
 767}
 768
 769#define zero_lseek      null_lseek
 770#define full_lseek      null_lseek
 771#define write_zero      write_null
 772#define write_iter_zero write_iter_null
 773#define open_mem        open_port
 774#define open_kmem       open_mem
 775
 776static const struct file_operations __maybe_unused mem_fops = {
 777        .llseek         = memory_lseek,
 778        .read           = read_mem,
 779        .write          = write_mem,
 780        .mmap           = mmap_mem,
 781        .open           = open_mem,
 782#ifndef CONFIG_MMU
 783        .get_unmapped_area = get_unmapped_area_mem,
 784        .mmap_capabilities = memory_mmap_capabilities,
 785#endif
 786};
 787
 788static const struct file_operations __maybe_unused kmem_fops = {
 789        .llseek         = memory_lseek,
 790        .read           = read_kmem,
 791        .write          = write_kmem,
 792        .mmap           = mmap_kmem,
 793        .open           = open_kmem,
 794#ifndef CONFIG_MMU
 795        .get_unmapped_area = get_unmapped_area_mem,
 796        .mmap_capabilities = memory_mmap_capabilities,
 797#endif
 798};
 799
 800static const struct file_operations null_fops = {
 801        .llseek         = null_lseek,
 802        .read           = read_null,
 803        .write          = write_null,
 804        .read_iter      = read_iter_null,
 805        .write_iter     = write_iter_null,
 806        .splice_write   = splice_write_null,
 807};
 808
 809static const struct file_operations __maybe_unused port_fops = {
 810        .llseek         = memory_lseek,
 811        .read           = read_port,
 812        .write          = write_port,
 813        .open           = open_port,
 814};
 815
 816static const struct file_operations zero_fops = {
 817        .llseek         = zero_lseek,
 818        .write          = write_zero,
 819        .read_iter      = read_iter_zero,
 820        .write_iter     = write_iter_zero,
 821        .mmap           = mmap_zero,
 822        .get_unmapped_area = get_unmapped_area_zero,
 823#ifndef CONFIG_MMU
 824        .mmap_capabilities = zero_mmap_capabilities,
 825#endif
 826};
 827
 828static const struct file_operations full_fops = {
 829        .llseek         = full_lseek,
 830        .read_iter      = read_iter_zero,
 831        .write          = write_full,
 832};
 833
 834static const struct memdev {
 835        const char *name;
 836        umode_t mode;
 837        const struct file_operations *fops;
 838        fmode_t fmode;
 839} devlist[] = {
 840#ifdef CONFIG_DEVMEM
 841         [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
 842#endif
 843#ifdef CONFIG_DEVKMEM
 844         [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
 845#endif
 846         [3] = { "null", 0666, &null_fops, 0 },
 847#ifdef CONFIG_DEVPORT
 848         [4] = { "port", 0, &port_fops, 0 },
 849#endif
 850         [5] = { "zero", 0666, &zero_fops, 0 },
 851         [7] = { "full", 0666, &full_fops, 0 },
 852         [8] = { "random", 0666, &random_fops, 0 },
 853         [9] = { "urandom", 0666, &urandom_fops, 0 },
 854#ifdef CONFIG_PRINTK
 855        [11] = { "kmsg", 0644, &kmsg_fops, 0 },
 856#endif
 857};
 858
 859static int memory_open(struct inode *inode, struct file *filp)
 860{
 861        int minor;
 862        const struct memdev *dev;
 863
 864        minor = iminor(inode);
 865        if (minor >= ARRAY_SIZE(devlist))
 866                return -ENXIO;
 867
 868        dev = &devlist[minor];
 869        if (!dev->fops)
 870                return -ENXIO;
 871
 872        filp->f_op = dev->fops;
 873        filp->f_mode |= dev->fmode;
 874
 875        if (dev->fops->open)
 876                return dev->fops->open(inode, filp);
 877
 878        return 0;
 879}
 880
 881static const struct file_operations memory_fops = {
 882        .open = memory_open,
 883        .llseek = noop_llseek,
 884};
 885
 886static char *mem_devnode(struct device *dev, umode_t *mode)
 887{
 888        if (mode && devlist[MINOR(dev->devt)].mode)
 889                *mode = devlist[MINOR(dev->devt)].mode;
 890        return NULL;
 891}
 892
 893static struct class *mem_class;
 894
 895static int __init chr_dev_init(void)
 896{
 897        int minor;
 898
 899        if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
 900                printk("unable to get major %d for memory devs\n", MEM_MAJOR);
 901
 902        mem_class = class_create(THIS_MODULE, "mem");
 903        if (IS_ERR(mem_class))
 904                return PTR_ERR(mem_class);
 905
 906        mem_class->devnode = mem_devnode;
 907        for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
 908                if (!devlist[minor].name)
 909                        continue;
 910
 911                /*
 912                 * Create /dev/port?
 913                 */
 914                if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
 915                        continue;
 916
 917                device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
 918                              NULL, devlist[minor].name);
 919        }
 920
 921        return tty_init();
 922}
 923
 924fs_initcall(chr_dev_init);
 925