linux/drivers/char/mem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/drivers/char/mem.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  Added devfs support.
   8 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
   9 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  10 */
  11
  12#include <linux/mm.h>
  13#include <linux/miscdevice.h>
  14#include <linux/slab.h>
  15#include <linux/vmalloc.h>
  16#include <linux/mman.h>
  17#include <linux/random.h>
  18#include <linux/init.h>
  19#include <linux/raw.h>
  20#include <linux/tty.h>
  21#include <linux/capability.h>
  22#include <linux/ptrace.h>
  23#include <linux/device.h>
  24#include <linux/highmem.h>
  25#include <linux/backing-dev.h>
  26#include <linux/shmem_fs.h>
  27#include <linux/splice.h>
  28#include <linux/pfn.h>
  29#include <linux/export.h>
  30#include <linux/io.h>
  31#include <linux/uio.h>
  32#include <linux/uaccess.h>
  33#include <linux/security.h>
  34#include <linux/pseudo_fs.h>
  35#include <uapi/linux/magic.h>
  36#include <linux/mount.h>
  37
  38#ifdef CONFIG_IA64
  39# include <linux/efi.h>
  40#endif
  41
  42#define DEVMEM_MINOR    1
  43#define DEVPORT_MINOR   4
  44
  45static inline unsigned long size_inside_page(unsigned long start,
  46                                             unsigned long size)
  47{
  48        unsigned long sz;
  49
  50        sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
  51
  52        return min(sz, size);
  53}
  54
  55#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  56static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
  57{
  58        return addr + count <= __pa(high_memory);
  59}
  60
  61static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  62{
  63        return 1;
  64}
  65#endif
  66
  67#ifdef CONFIG_STRICT_DEVMEM
  68static inline int page_is_allowed(unsigned long pfn)
  69{
  70        return devmem_is_allowed(pfn);
  71}
  72static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  73{
  74        u64 from = ((u64)pfn) << PAGE_SHIFT;
  75        u64 to = from + size;
  76        u64 cursor = from;
  77
  78        while (cursor < to) {
  79                if (!devmem_is_allowed(pfn))
  80                        return 0;
  81                cursor += PAGE_SIZE;
  82                pfn++;
  83        }
  84        return 1;
  85}
  86#else
  87static inline int page_is_allowed(unsigned long pfn)
  88{
  89        return 1;
  90}
  91static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  92{
  93        return 1;
  94}
  95#endif
  96
  97#ifndef unxlate_dev_mem_ptr
  98#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
  99void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 100{
 101}
 102#endif
 103
 104static inline bool should_stop_iteration(void)
 105{
 106        if (need_resched())
 107                cond_resched();
 108        return fatal_signal_pending(current);
 109}
 110
 111/*
 112 * This funcion reads the *physical* memory. The f_pos points directly to the
 113 * memory location.
 114 */
 115static ssize_t read_mem(struct file *file, char __user *buf,
 116                        size_t count, loff_t *ppos)
 117{
 118        phys_addr_t p = *ppos;
 119        ssize_t read, sz;
 120        void *ptr;
 121        char *bounce;
 122        int err;
 123
 124        if (p != *ppos)
 125                return 0;
 126
 127        if (!valid_phys_addr_range(p, count))
 128                return -EFAULT;
 129        read = 0;
 130#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 131        /* we don't have page 0 mapped on sparc and m68k.. */
 132        if (p < PAGE_SIZE) {
 133                sz = size_inside_page(p, count);
 134                if (sz > 0) {
 135                        if (clear_user(buf, sz))
 136                                return -EFAULT;
 137                        buf += sz;
 138                        p += sz;
 139                        count -= sz;
 140                        read += sz;
 141                }
 142        }
 143#endif
 144
 145        bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
 146        if (!bounce)
 147                return -ENOMEM;
 148
 149        while (count > 0) {
 150                unsigned long remaining;
 151                int allowed, probe;
 152
 153                sz = size_inside_page(p, count);
 154
 155                err = -EPERM;
 156                allowed = page_is_allowed(p >> PAGE_SHIFT);
 157                if (!allowed)
 158                        goto failed;
 159
 160                err = -EFAULT;
 161                if (allowed == 2) {
 162                        /* Show zeros for restricted memory. */
 163                        remaining = clear_user(buf, sz);
 164                } else {
 165                        /*
 166                         * On ia64 if a page has been mapped somewhere as
 167                         * uncached, then it must also be accessed uncached
 168                         * by the kernel or data corruption may occur.
 169                         */
 170                        ptr = xlate_dev_mem_ptr(p);
 171                        if (!ptr)
 172                                goto failed;
 173
 174                        probe = copy_from_kernel_nofault(bounce, ptr, sz);
 175                        unxlate_dev_mem_ptr(p, ptr);
 176                        if (probe)
 177                                goto failed;
 178
 179                        remaining = copy_to_user(buf, bounce, sz);
 180                }
 181
 182                if (remaining)
 183                        goto failed;
 184
 185                buf += sz;
 186                p += sz;
 187                count -= sz;
 188                read += sz;
 189                if (should_stop_iteration())
 190                        break;
 191        }
 192        kfree(bounce);
 193
 194        *ppos += read;
 195        return read;
 196
 197failed:
 198        kfree(bounce);
 199        return err;
 200}
 201
 202static ssize_t write_mem(struct file *file, const char __user *buf,
 203                         size_t count, loff_t *ppos)
 204{
 205        phys_addr_t p = *ppos;
 206        ssize_t written, sz;
 207        unsigned long copied;
 208        void *ptr;
 209
 210        if (p != *ppos)
 211                return -EFBIG;
 212
 213        if (!valid_phys_addr_range(p, count))
 214                return -EFAULT;
 215
 216        written = 0;
 217
 218#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 219        /* we don't have page 0 mapped on sparc and m68k.. */
 220        if (p < PAGE_SIZE) {
 221                sz = size_inside_page(p, count);
 222                /* Hmm. Do something? */
 223                buf += sz;
 224                p += sz;
 225                count -= sz;
 226                written += sz;
 227        }
 228#endif
 229
 230        while (count > 0) {
 231                int allowed;
 232
 233                sz = size_inside_page(p, count);
 234
 235                allowed = page_is_allowed(p >> PAGE_SHIFT);
 236                if (!allowed)
 237                        return -EPERM;
 238
 239                /* Skip actual writing when a page is marked as restricted. */
 240                if (allowed == 1) {
 241                        /*
 242                         * On ia64 if a page has been mapped somewhere as
 243                         * uncached, then it must also be accessed uncached
 244                         * by the kernel or data corruption may occur.
 245                         */
 246                        ptr = xlate_dev_mem_ptr(p);
 247                        if (!ptr) {
 248                                if (written)
 249                                        break;
 250                                return -EFAULT;
 251                        }
 252
 253                        copied = copy_from_user(ptr, buf, sz);
 254                        unxlate_dev_mem_ptr(p, ptr);
 255                        if (copied) {
 256                                written += sz - copied;
 257                                if (written)
 258                                        break;
 259                                return -EFAULT;
 260                        }
 261                }
 262
 263                buf += sz;
 264                p += sz;
 265                count -= sz;
 266                written += sz;
 267                if (should_stop_iteration())
 268                        break;
 269        }
 270
 271        *ppos += written;
 272        return written;
 273}
 274
 275int __weak phys_mem_access_prot_allowed(struct file *file,
 276        unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
 277{
 278        return 1;
 279}
 280
 281#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
 282
 283/*
 284 * Architectures vary in how they handle caching for addresses
 285 * outside of main memory.
 286 *
 287 */
 288#ifdef pgprot_noncached
 289static int uncached_access(struct file *file, phys_addr_t addr)
 290{
 291#if defined(CONFIG_IA64)
 292        /*
 293         * On ia64, we ignore O_DSYNC because we cannot tolerate memory
 294         * attribute aliases.
 295         */
 296        return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
 297#elif defined(CONFIG_MIPS)
 298        {
 299                extern int __uncached_access(struct file *file,
 300                                             unsigned long addr);
 301
 302                return __uncached_access(file, addr);
 303        }
 304#else
 305        /*
 306         * Accessing memory above the top the kernel knows about or through a
 307         * file pointer
 308         * that was marked O_DSYNC will be done non-cached.
 309         */
 310        if (file->f_flags & O_DSYNC)
 311                return 1;
 312        return addr >= __pa(high_memory);
 313#endif
 314}
 315#endif
 316
 317static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 318                                     unsigned long size, pgprot_t vma_prot)
 319{
 320#ifdef pgprot_noncached
 321        phys_addr_t offset = pfn << PAGE_SHIFT;
 322
 323        if (uncached_access(file, offset))
 324                return pgprot_noncached(vma_prot);
 325#endif
 326        return vma_prot;
 327}
 328#endif
 329
 330#ifndef CONFIG_MMU
 331static unsigned long get_unmapped_area_mem(struct file *file,
 332                                           unsigned long addr,
 333                                           unsigned long len,
 334                                           unsigned long pgoff,
 335                                           unsigned long flags)
 336{
 337        if (!valid_mmap_phys_addr_range(pgoff, len))
 338                return (unsigned long) -EINVAL;
 339        return pgoff << PAGE_SHIFT;
 340}
 341
 342/* permit direct mmap, for read, write or exec */
 343static unsigned memory_mmap_capabilities(struct file *file)
 344{
 345        return NOMMU_MAP_DIRECT |
 346                NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
 347}
 348
 349static unsigned zero_mmap_capabilities(struct file *file)
 350{
 351        return NOMMU_MAP_COPY;
 352}
 353
 354/* can't do an in-place private mapping if there's no MMU */
 355static inline int private_mapping_ok(struct vm_area_struct *vma)
 356{
 357        return vma->vm_flags & VM_MAYSHARE;
 358}
 359#else
 360
 361static inline int private_mapping_ok(struct vm_area_struct *vma)
 362{
 363        return 1;
 364}
 365#endif
 366
 367static const struct vm_operations_struct mmap_mem_ops = {
 368#ifdef CONFIG_HAVE_IOREMAP_PROT
 369        .access = generic_access_phys
 370#endif
 371};
 372
 373static int mmap_mem(struct file *file, struct vm_area_struct *vma)
 374{
 375        size_t size = vma->vm_end - vma->vm_start;
 376        phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 377
 378        /* Does it even fit in phys_addr_t? */
 379        if (offset >> PAGE_SHIFT != vma->vm_pgoff)
 380                return -EINVAL;
 381
 382        /* It's illegal to wrap around the end of the physical address space. */
 383        if (offset + (phys_addr_t)size - 1 < offset)
 384                return -EINVAL;
 385
 386        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 387                return -EINVAL;
 388
 389        if (!private_mapping_ok(vma))
 390                return -ENOSYS;
 391
 392        if (!range_is_allowed(vma->vm_pgoff, size))
 393                return -EPERM;
 394
 395        if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
 396                                                &vma->vm_page_prot))
 397                return -EINVAL;
 398
 399        vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
 400                                                 size,
 401                                                 vma->vm_page_prot);
 402
 403        vma->vm_ops = &mmap_mem_ops;
 404
 405        /* Remap-pfn-range will mark the range VM_IO */
 406        if (remap_pfn_range(vma,
 407                            vma->vm_start,
 408                            vma->vm_pgoff,
 409                            size,
 410                            vma->vm_page_prot)) {
 411                return -EAGAIN;
 412        }
 413        return 0;
 414}
 415
 416static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
 417{
 418        unsigned long pfn;
 419
 420        /* Turn a kernel-virtual address into a physical page frame */
 421        pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
 422
 423        /*
 424         * RED-PEN: on some architectures there is more mapped memory than
 425         * available in mem_map which pfn_valid checks for. Perhaps should add a
 426         * new macro here.
 427         *
 428         * RED-PEN: vmalloc is not supported right now.
 429         */
 430        if (!pfn_valid(pfn))
 431                return -EIO;
 432
 433        vma->vm_pgoff = pfn;
 434        return mmap_mem(file, vma);
 435}
 436
 437/*
 438 * This function reads the *virtual* memory as seen by the kernel.
 439 */
 440static ssize_t read_kmem(struct file *file, char __user *buf,
 441                         size_t count, loff_t *ppos)
 442{
 443        unsigned long p = *ppos;
 444        ssize_t low_count, read, sz;
 445        char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
 446        int err = 0;
 447
 448        read = 0;
 449        if (p < (unsigned long) high_memory) {
 450                low_count = count;
 451                if (count > (unsigned long)high_memory - p)
 452                        low_count = (unsigned long)high_memory - p;
 453
 454#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 455                /* we don't have page 0 mapped on sparc and m68k.. */
 456                if (p < PAGE_SIZE && low_count > 0) {
 457                        sz = size_inside_page(p, low_count);
 458                        if (clear_user(buf, sz))
 459                                return -EFAULT;
 460                        buf += sz;
 461                        p += sz;
 462                        read += sz;
 463                        low_count -= sz;
 464                        count -= sz;
 465                }
 466#endif
 467                while (low_count > 0) {
 468                        sz = size_inside_page(p, low_count);
 469
 470                        /*
 471                         * On ia64 if a page has been mapped somewhere as
 472                         * uncached, then it must also be accessed uncached
 473                         * by the kernel or data corruption may occur
 474                         */
 475                        kbuf = xlate_dev_kmem_ptr((void *)p);
 476                        if (!virt_addr_valid(kbuf))
 477                                return -ENXIO;
 478
 479                        if (copy_to_user(buf, kbuf, sz))
 480                                return -EFAULT;
 481                        buf += sz;
 482                        p += sz;
 483                        read += sz;
 484                        low_count -= sz;
 485                        count -= sz;
 486                        if (should_stop_iteration()) {
 487                                count = 0;
 488                                break;
 489                        }
 490                }
 491        }
 492
 493        if (count > 0) {
 494                kbuf = (char *)__get_free_page(GFP_KERNEL);
 495                if (!kbuf)
 496                        return -ENOMEM;
 497                while (count > 0) {
 498                        sz = size_inside_page(p, count);
 499                        if (!is_vmalloc_or_module_addr((void *)p)) {
 500                                err = -ENXIO;
 501                                break;
 502                        }
 503                        sz = vread(kbuf, (char *)p, sz);
 504                        if (!sz)
 505                                break;
 506                        if (copy_to_user(buf, kbuf, sz)) {
 507                                err = -EFAULT;
 508                                break;
 509                        }
 510                        count -= sz;
 511                        buf += sz;
 512                        read += sz;
 513                        p += sz;
 514                        if (should_stop_iteration())
 515                                break;
 516                }
 517                free_page((unsigned long)kbuf);
 518        }
 519        *ppos = p;
 520        return read ? read : err;
 521}
 522
 523
 524static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
 525                                size_t count, loff_t *ppos)
 526{
 527        ssize_t written, sz;
 528        unsigned long copied;
 529
 530        written = 0;
 531#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 532        /* we don't have page 0 mapped on sparc and m68k.. */
 533        if (p < PAGE_SIZE) {
 534                sz = size_inside_page(p, count);
 535                /* Hmm. Do something? */
 536                buf += sz;
 537                p += sz;
 538                count -= sz;
 539                written += sz;
 540        }
 541#endif
 542
 543        while (count > 0) {
 544                void *ptr;
 545
 546                sz = size_inside_page(p, count);
 547
 548                /*
 549                 * On ia64 if a page has been mapped somewhere as uncached, then
 550                 * it must also be accessed uncached by the kernel or data
 551                 * corruption may occur.
 552                 */
 553                ptr = xlate_dev_kmem_ptr((void *)p);
 554                if (!virt_addr_valid(ptr))
 555                        return -ENXIO;
 556
 557                copied = copy_from_user(ptr, buf, sz);
 558                if (copied) {
 559                        written += sz - copied;
 560                        if (written)
 561                                break;
 562                        return -EFAULT;
 563                }
 564                buf += sz;
 565                p += sz;
 566                count -= sz;
 567                written += sz;
 568                if (should_stop_iteration())
 569                        break;
 570        }
 571
 572        *ppos += written;
 573        return written;
 574}
 575
 576/*
 577 * This function writes to the *virtual* memory as seen by the kernel.
 578 */
 579static ssize_t write_kmem(struct file *file, const char __user *buf,
 580                          size_t count, loff_t *ppos)
 581{
 582        unsigned long p = *ppos;
 583        ssize_t wrote = 0;
 584        ssize_t virtr = 0;
 585        char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
 586        int err = 0;
 587
 588        if (p < (unsigned long) high_memory) {
 589                unsigned long to_write = min_t(unsigned long, count,
 590                                               (unsigned long)high_memory - p);
 591                wrote = do_write_kmem(p, buf, to_write, ppos);
 592                if (wrote != to_write)
 593                        return wrote;
 594                p += wrote;
 595                buf += wrote;
 596                count -= wrote;
 597        }
 598
 599        if (count > 0) {
 600                kbuf = (char *)__get_free_page(GFP_KERNEL);
 601                if (!kbuf)
 602                        return wrote ? wrote : -ENOMEM;
 603                while (count > 0) {
 604                        unsigned long sz = size_inside_page(p, count);
 605                        unsigned long n;
 606
 607                        if (!is_vmalloc_or_module_addr((void *)p)) {
 608                                err = -ENXIO;
 609                                break;
 610                        }
 611                        n = copy_from_user(kbuf, buf, sz);
 612                        if (n) {
 613                                err = -EFAULT;
 614                                break;
 615                        }
 616                        vwrite(kbuf, (char *)p, sz);
 617                        count -= sz;
 618                        buf += sz;
 619                        virtr += sz;
 620                        p += sz;
 621                        if (should_stop_iteration())
 622                                break;
 623                }
 624                free_page((unsigned long)kbuf);
 625        }
 626
 627        *ppos = p;
 628        return virtr + wrote ? : err;
 629}
 630
 631static ssize_t read_port(struct file *file, char __user *buf,
 632                         size_t count, loff_t *ppos)
 633{
 634        unsigned long i = *ppos;
 635        char __user *tmp = buf;
 636
 637        if (!access_ok(buf, count))
 638                return -EFAULT;
 639        while (count-- > 0 && i < 65536) {
 640                if (__put_user(inb(i), tmp) < 0)
 641                        return -EFAULT;
 642                i++;
 643                tmp++;
 644        }
 645        *ppos = i;
 646        return tmp-buf;
 647}
 648
 649static ssize_t write_port(struct file *file, const char __user *buf,
 650                          size_t count, loff_t *ppos)
 651{
 652        unsigned long i = *ppos;
 653        const char __user *tmp = buf;
 654
 655        if (!access_ok(buf, count))
 656                return -EFAULT;
 657        while (count-- > 0 && i < 65536) {
 658                char c;
 659
 660                if (__get_user(c, tmp)) {
 661                        if (tmp > buf)
 662                                break;
 663                        return -EFAULT;
 664                }
 665                outb(c, i);
 666                i++;
 667                tmp++;
 668        }
 669        *ppos = i;
 670        return tmp-buf;
 671}
 672
 673static ssize_t read_null(struct file *file, char __user *buf,
 674                         size_t count, loff_t *ppos)
 675{
 676        return 0;
 677}
 678
 679static ssize_t write_null(struct file *file, const char __user *buf,
 680                          size_t count, loff_t *ppos)
 681{
 682        return count;
 683}
 684
 685static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
 686{
 687        return 0;
 688}
 689
 690static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
 691{
 692        size_t count = iov_iter_count(from);
 693        iov_iter_advance(from, count);
 694        return count;
 695}
 696
 697static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
 698                        struct splice_desc *sd)
 699{
 700        return sd->len;
 701}
 702
 703static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
 704                                 loff_t *ppos, size_t len, unsigned int flags)
 705{
 706        return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
 707}
 708
 709static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
 710{
 711        size_t written = 0;
 712
 713        while (iov_iter_count(iter)) {
 714                size_t chunk = iov_iter_count(iter), n;
 715
 716                if (chunk > PAGE_SIZE)
 717                        chunk = PAGE_SIZE;      /* Just for latency reasons */
 718                n = iov_iter_zero(chunk, iter);
 719                if (!n && iov_iter_count(iter))
 720                        return written ? written : -EFAULT;
 721                written += n;
 722                if (signal_pending(current))
 723                        return written ? written : -ERESTARTSYS;
 724                cond_resched();
 725        }
 726        return written;
 727}
 728
 729static int mmap_zero(struct file *file, struct vm_area_struct *vma)
 730{
 731#ifndef CONFIG_MMU
 732        return -ENOSYS;
 733#endif
 734        if (vma->vm_flags & VM_SHARED)
 735                return shmem_zero_setup(vma);
 736        vma_set_anonymous(vma);
 737        return 0;
 738}
 739
 740static unsigned long get_unmapped_area_zero(struct file *file,
 741                                unsigned long addr, unsigned long len,
 742                                unsigned long pgoff, unsigned long flags)
 743{
 744#ifdef CONFIG_MMU
 745        if (flags & MAP_SHARED) {
 746                /*
 747                 * mmap_zero() will call shmem_zero_setup() to create a file,
 748                 * so use shmem's get_unmapped_area in case it can be huge;
 749                 * and pass NULL for file as in mmap.c's get_unmapped_area(),
 750                 * so as not to confuse shmem with our handle on "/dev/zero".
 751                 */
 752                return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
 753        }
 754
 755        /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
 756        return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
 757#else
 758        return -ENOSYS;
 759#endif
 760}
 761
 762static ssize_t write_full(struct file *file, const char __user *buf,
 763                          size_t count, loff_t *ppos)
 764{
 765        return -ENOSPC;
 766}
 767
 768/*
 769 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
 770 * can fopen() both devices with "a" now.  This was previously impossible.
 771 * -- SRB.
 772 */
 773static loff_t null_lseek(struct file *file, loff_t offset, int orig)
 774{
 775        return file->f_pos = 0;
 776}
 777
 778/*
 779 * The memory devices use the full 32/64 bits of the offset, and so we cannot
 780 * check against negative addresses: they are ok. The return value is weird,
 781 * though, in that case (0).
 782 *
 783 * also note that seeking relative to the "end of file" isn't supported:
 784 * it has no meaning, so it returns -EINVAL.
 785 */
 786static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
 787{
 788        loff_t ret;
 789
 790        inode_lock(file_inode(file));
 791        switch (orig) {
 792        case SEEK_CUR:
 793                offset += file->f_pos;
 794                /* fall through */
 795        case SEEK_SET:
 796                /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
 797                if ((unsigned long long)offset >= -MAX_ERRNO) {
 798                        ret = -EOVERFLOW;
 799                        break;
 800                }
 801                file->f_pos = offset;
 802                ret = file->f_pos;
 803                force_successful_syscall_return();
 804                break;
 805        default:
 806                ret = -EINVAL;
 807        }
 808        inode_unlock(file_inode(file));
 809        return ret;
 810}
 811
 812static struct inode *devmem_inode;
 813
 814#ifdef CONFIG_IO_STRICT_DEVMEM
 815void revoke_devmem(struct resource *res)
 816{
 817        /* pairs with smp_store_release() in devmem_init_inode() */
 818        struct inode *inode = smp_load_acquire(&devmem_inode);
 819
 820        /*
 821         * Check that the initialization has completed. Losing the race
 822         * is ok because it means drivers are claiming resources before
 823         * the fs_initcall level of init and prevent /dev/mem from
 824         * establishing mappings.
 825         */
 826        if (!inode)
 827                return;
 828
 829        /*
 830         * The expectation is that the driver has successfully marked
 831         * the resource busy by this point, so devmem_is_allowed()
 832         * should start returning false, however for performance this
 833         * does not iterate the entire resource range.
 834         */
 835        if (devmem_is_allowed(PHYS_PFN(res->start)) &&
 836            devmem_is_allowed(PHYS_PFN(res->end))) {
 837                /*
 838                 * *cringe* iomem=relaxed says "go ahead, what's the
 839                 * worst that can happen?"
 840                 */
 841                return;
 842        }
 843
 844        unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
 845}
 846#endif
 847
 848static int open_port(struct inode *inode, struct file *filp)
 849{
 850        int rc;
 851
 852        if (!capable(CAP_SYS_RAWIO))
 853                return -EPERM;
 854
 855        rc = security_locked_down(LOCKDOWN_DEV_MEM);
 856        if (rc)
 857                return rc;
 858
 859        if (iminor(inode) != DEVMEM_MINOR)
 860                return 0;
 861
 862        /*
 863         * Use a unified address space to have a single point to manage
 864         * revocations when drivers want to take over a /dev/mem mapped
 865         * range.
 866         */
 867        inode->i_mapping = devmem_inode->i_mapping;
 868        filp->f_mapping = inode->i_mapping;
 869
 870        return 0;
 871}
 872
 873#define zero_lseek      null_lseek
 874#define full_lseek      null_lseek
 875#define write_zero      write_null
 876#define write_iter_zero write_iter_null
 877#define open_mem        open_port
 878#define open_kmem       open_mem
 879
 880static const struct file_operations __maybe_unused mem_fops = {
 881        .llseek         = memory_lseek,
 882        .read           = read_mem,
 883        .write          = write_mem,
 884        .mmap           = mmap_mem,
 885        .open           = open_mem,
 886#ifndef CONFIG_MMU
 887        .get_unmapped_area = get_unmapped_area_mem,
 888        .mmap_capabilities = memory_mmap_capabilities,
 889#endif
 890};
 891
 892static const struct file_operations __maybe_unused kmem_fops = {
 893        .llseek         = memory_lseek,
 894        .read           = read_kmem,
 895        .write          = write_kmem,
 896        .mmap           = mmap_kmem,
 897        .open           = open_kmem,
 898#ifndef CONFIG_MMU
 899        .get_unmapped_area = get_unmapped_area_mem,
 900        .mmap_capabilities = memory_mmap_capabilities,
 901#endif
 902};
 903
 904static const struct file_operations null_fops = {
 905        .llseek         = null_lseek,
 906        .read           = read_null,
 907        .write          = write_null,
 908        .read_iter      = read_iter_null,
 909        .write_iter     = write_iter_null,
 910        .splice_write   = splice_write_null,
 911};
 912
 913static const struct file_operations __maybe_unused port_fops = {
 914        .llseek         = memory_lseek,
 915        .read           = read_port,
 916        .write          = write_port,
 917        .open           = open_port,
 918};
 919
 920static const struct file_operations zero_fops = {
 921        .llseek         = zero_lseek,
 922        .write          = write_zero,
 923        .read_iter      = read_iter_zero,
 924        .write_iter     = write_iter_zero,
 925        .mmap           = mmap_zero,
 926        .get_unmapped_area = get_unmapped_area_zero,
 927#ifndef CONFIG_MMU
 928        .mmap_capabilities = zero_mmap_capabilities,
 929#endif
 930};
 931
 932static const struct file_operations full_fops = {
 933        .llseek         = full_lseek,
 934        .read_iter      = read_iter_zero,
 935        .write          = write_full,
 936};
 937
 938static const struct memdev {
 939        const char *name;
 940        umode_t mode;
 941        const struct file_operations *fops;
 942        fmode_t fmode;
 943} devlist[] = {
 944#ifdef CONFIG_DEVMEM
 945         [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
 946#endif
 947#ifdef CONFIG_DEVKMEM
 948         [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
 949#endif
 950         [3] = { "null", 0666, &null_fops, 0 },
 951#ifdef CONFIG_DEVPORT
 952         [4] = { "port", 0, &port_fops, 0 },
 953#endif
 954         [5] = { "zero", 0666, &zero_fops, 0 },
 955         [7] = { "full", 0666, &full_fops, 0 },
 956         [8] = { "random", 0666, &random_fops, 0 },
 957         [9] = { "urandom", 0666, &urandom_fops, 0 },
 958#ifdef CONFIG_PRINTK
 959        [11] = { "kmsg", 0644, &kmsg_fops, 0 },
 960#endif
 961};
 962
 963static int memory_open(struct inode *inode, struct file *filp)
 964{
 965        int minor;
 966        const struct memdev *dev;
 967
 968        minor = iminor(inode);
 969        if (minor >= ARRAY_SIZE(devlist))
 970                return -ENXIO;
 971
 972        dev = &devlist[minor];
 973        if (!dev->fops)
 974                return -ENXIO;
 975
 976        filp->f_op = dev->fops;
 977        filp->f_mode |= dev->fmode;
 978
 979        if (dev->fops->open)
 980                return dev->fops->open(inode, filp);
 981
 982        return 0;
 983}
 984
 985static const struct file_operations memory_fops = {
 986        .open = memory_open,
 987        .llseek = noop_llseek,
 988};
 989
 990static char *mem_devnode(struct device *dev, umode_t *mode)
 991{
 992        if (mode && devlist[MINOR(dev->devt)].mode)
 993                *mode = devlist[MINOR(dev->devt)].mode;
 994        return NULL;
 995}
 996
 997static struct class *mem_class;
 998
 999static int devmem_fs_init_fs_context(struct fs_context *fc)
1000{
1001        return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
1002}
1003
1004static struct file_system_type devmem_fs_type = {
1005        .name           = "devmem",
1006        .owner          = THIS_MODULE,
1007        .init_fs_context = devmem_fs_init_fs_context,
1008        .kill_sb        = kill_anon_super,
1009};
1010
1011static int devmem_init_inode(void)
1012{
1013        static struct vfsmount *devmem_vfs_mount;
1014        static int devmem_fs_cnt;
1015        struct inode *inode;
1016        int rc;
1017
1018        rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
1019        if (rc < 0) {
1020                pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
1021                return rc;
1022        }
1023
1024        inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
1025        if (IS_ERR(inode)) {
1026                rc = PTR_ERR(inode);
1027                pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
1028                simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
1029                return rc;
1030        }
1031
1032        /*
1033         * Publish /dev/mem initialized.
1034         * Pairs with smp_load_acquire() in revoke_devmem().
1035         */
1036        smp_store_release(&devmem_inode, inode);
1037
1038        return 0;
1039}
1040
1041static int __init chr_dev_init(void)
1042{
1043        int minor;
1044
1045        if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
1046                printk("unable to get major %d for memory devs\n", MEM_MAJOR);
1047
1048        mem_class = class_create(THIS_MODULE, "mem");
1049        if (IS_ERR(mem_class))
1050                return PTR_ERR(mem_class);
1051
1052        mem_class->devnode = mem_devnode;
1053        for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
1054                if (!devlist[minor].name)
1055                        continue;
1056
1057                /*
1058                 * Create /dev/port?
1059                 */
1060                if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
1061                        continue;
1062                if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
1063                        continue;
1064
1065                device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
1066                              NULL, devlist[minor].name);
1067        }
1068
1069        return tty_init();
1070}
1071
1072fs_initcall(chr_dev_init);
1073