linux/drivers/staging/android/ashmem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* mm/ashmem.c
   3 *
   4 * Anonymous Shared Memory Subsystem, ashmem
   5 *
   6 * Copyright (C) 2008 Google, Inc.
   7 *
   8 * Robert Love <rlove@google.com>
   9 */
  10
  11#define pr_fmt(fmt) "ashmem: " fmt
  12
  13#include <linux/init.h>
  14#include <linux/export.h>
  15#include <linux/file.h>
  16#include <linux/fs.h>
  17#include <linux/falloc.h>
  18#include <linux/miscdevice.h>
  19#include <linux/security.h>
  20#include <linux/mm.h>
  21#include <linux/mman.h>
  22#include <linux/uaccess.h>
  23#include <linux/personality.h>
  24#include <linux/bitops.h>
  25#include <linux/mutex.h>
  26#include <linux/shmem_fs.h>
  27#include "ashmem.h"
  28
  29#define ASHMEM_NAME_PREFIX "dev/ashmem/"
  30#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
  31#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
  32
  33/**
  34 * struct ashmem_area - The anonymous shared memory area
  35 * @name:               The optional name in /proc/pid/maps
  36 * @unpinned_list:      The list of all ashmem areas
  37 * @file:               The shmem-based backing file
  38 * @size:               The size of the mapping, in bytes
  39 * @prot_mask:          The allowed protection bits, as vm_flags
  40 *
  41 * The lifecycle of this structure is from our parent file's open() until
  42 * its release(). It is also protected by 'ashmem_mutex'
  43 *
  44 * Warning: Mappings do NOT pin this structure; It dies on close()
  45 */
  46struct ashmem_area {
  47        char name[ASHMEM_FULL_NAME_LEN];
  48        struct list_head unpinned_list;
  49        struct file *file;
  50        size_t size;
  51        unsigned long prot_mask;
  52};
  53
  54/**
  55 * struct ashmem_range - A range of unpinned/evictable pages
  56 * @lru:                 The entry in the LRU list
  57 * @unpinned:            The entry in its area's unpinned list
  58 * @asma:                The associated anonymous shared memory area.
  59 * @pgstart:             The starting page (inclusive)
  60 * @pgend:               The ending page (inclusive)
  61 * @purged:              The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
  62 *
  63 * The lifecycle of this structure is from unpin to pin.
  64 * It is protected by 'ashmem_mutex'
  65 */
  66struct ashmem_range {
  67        struct list_head lru;
  68        struct list_head unpinned;
  69        struct ashmem_area *asma;
  70        size_t pgstart;
  71        size_t pgend;
  72        unsigned int purged;
  73};
  74
  75/* LRU list of unpinned pages, protected by ashmem_mutex */
  76static LIST_HEAD(ashmem_lru_list);
  77
  78static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
  79static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
  80
  81/*
  82 * long lru_count - The count of pages on our LRU list.
  83 *
  84 * This is protected by ashmem_mutex.
  85 */
  86static unsigned long lru_count;
  87
  88/*
  89 * ashmem_mutex - protects the list of and each individual ashmem_area
  90 *
  91 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
  92 */
  93static DEFINE_MUTEX(ashmem_mutex);
  94
  95static struct kmem_cache *ashmem_area_cachep __read_mostly;
  96static struct kmem_cache *ashmem_range_cachep __read_mostly;
  97
  98/*
  99 * A separate lockdep class for the backing shmem inodes to resolve the lockdep
 100 * warning about the race between kswapd taking fs_reclaim before inode_lock
 101 * and write syscall taking inode_lock and then fs_reclaim.
 102 * Note that such race is impossible because ashmem does not support write
 103 * syscalls operating on the backing shmem.
 104 */
 105static struct lock_class_key backing_shmem_inode_class;
 106
 107static inline unsigned long range_size(struct ashmem_range *range)
 108{
 109        return range->pgend - range->pgstart + 1;
 110}
 111
 112static inline bool range_on_lru(struct ashmem_range *range)
 113{
 114        return range->purged == ASHMEM_NOT_PURGED;
 115}
 116
 117static inline bool page_range_subsumes_range(struct ashmem_range *range,
 118                                             size_t start, size_t end)
 119{
 120        return (range->pgstart >= start) && (range->pgend <= end);
 121}
 122
 123static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
 124                                                size_t start, size_t end)
 125{
 126        return (range->pgstart <= start) && (range->pgend >= end);
 127}
 128
 129static inline bool page_in_range(struct ashmem_range *range, size_t page)
 130{
 131        return (range->pgstart <= page) && (range->pgend >= page);
 132}
 133
 134static inline bool page_range_in_range(struct ashmem_range *range,
 135                                       size_t start, size_t end)
 136{
 137        return page_in_range(range, start) || page_in_range(range, end) ||
 138                page_range_subsumes_range(range, start, end);
 139}
 140
 141static inline bool range_before_page(struct ashmem_range *range,
 142                                     size_t page)
 143{
 144        return range->pgend < page;
 145}
 146
 147#define PROT_MASK               (PROT_EXEC | PROT_READ | PROT_WRITE)
 148
 149/**
 150 * lru_add() - Adds a range of memory to the LRU list
 151 * @range:     The memory range being added.
 152 *
 153 * The range is first added to the end (tail) of the LRU list.
 154 * After this, the size of the range is added to @lru_count
 155 */
 156static inline void lru_add(struct ashmem_range *range)
 157{
 158        list_add_tail(&range->lru, &ashmem_lru_list);
 159        lru_count += range_size(range);
 160}
 161
 162/**
 163 * lru_del() - Removes a range of memory from the LRU list
 164 * @range:     The memory range being removed
 165 *
 166 * The range is first deleted from the LRU list.
 167 * After this, the size of the range is removed from @lru_count
 168 */
 169static inline void lru_del(struct ashmem_range *range)
 170{
 171        list_del(&range->lru);
 172        lru_count -= range_size(range);
 173}
 174
 175/**
 176 * range_alloc() - Allocates and initializes a new ashmem_range structure
 177 * @asma:          The associated ashmem_area
 178 * @prev_range:    The previous ashmem_range in the sorted asma->unpinned list
 179 * @purged:        Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
 180 * @start:         The starting page (inclusive)
 181 * @end:           The ending page (inclusive)
 182 * @new_range:     The placeholder for the new range
 183 *
 184 * This function is protected by ashmem_mutex.
 185 */
 186static void range_alloc(struct ashmem_area *asma,
 187                        struct ashmem_range *prev_range, unsigned int purged,
 188                        size_t start, size_t end,
 189                        struct ashmem_range **new_range)
 190{
 191        struct ashmem_range *range = *new_range;
 192
 193        *new_range = NULL;
 194        range->asma = asma;
 195        range->pgstart = start;
 196        range->pgend = end;
 197        range->purged = purged;
 198
 199        list_add_tail(&range->unpinned, &prev_range->unpinned);
 200
 201        if (range_on_lru(range))
 202                lru_add(range);
 203}
 204
 205/**
 206 * range_del() - Deletes and deallocates an ashmem_range structure
 207 * @range:       The associated ashmem_range that has previously been allocated
 208 */
 209static void range_del(struct ashmem_range *range)
 210{
 211        list_del(&range->unpinned);
 212        if (range_on_lru(range))
 213                lru_del(range);
 214        kmem_cache_free(ashmem_range_cachep, range);
 215}
 216
 217/**
 218 * range_shrink() - Shrinks an ashmem_range
 219 * @range:          The associated ashmem_range being shrunk
 220 * @start:          The starting byte of the new range
 221 * @end:            The ending byte of the new range
 222 *
 223 * This does not modify the data inside the existing range in any way - It
 224 * simply shrinks the boundaries of the range.
 225 *
 226 * Theoretically, with a little tweaking, this could eventually be changed
 227 * to range_resize, and expand the lru_count if the new range is larger.
 228 */
 229static inline void range_shrink(struct ashmem_range *range,
 230                                size_t start, size_t end)
 231{
 232        size_t pre = range_size(range);
 233
 234        range->pgstart = start;
 235        range->pgend = end;
 236
 237        if (range_on_lru(range))
 238                lru_count -= pre - range_size(range);
 239}
 240
 241/**
 242 * ashmem_open() - Opens an Anonymous Shared Memory structure
 243 * @inode:         The backing file's index node(?)
 244 * @file:          The backing file
 245 *
 246 * Please note that the ashmem_area is not returned by this function - It is
 247 * instead written to "file->private_data".
 248 *
 249 * Return: 0 if successful, or another code if unsuccessful.
 250 */
 251static int ashmem_open(struct inode *inode, struct file *file)
 252{
 253        struct ashmem_area *asma;
 254        int ret;
 255
 256        ret = generic_file_open(inode, file);
 257        if (ret)
 258                return ret;
 259
 260        asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
 261        if (!asma)
 262                return -ENOMEM;
 263
 264        INIT_LIST_HEAD(&asma->unpinned_list);
 265        memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
 266        asma->prot_mask = PROT_MASK;
 267        file->private_data = asma;
 268
 269        return 0;
 270}
 271
 272/**
 273 * ashmem_release() - Releases an Anonymous Shared Memory structure
 274 * @ignored:          The backing file's Index Node(?) - It is ignored here.
 275 * @file:             The backing file
 276 *
 277 * Return: 0 if successful. If it is anything else, go have a coffee and
 278 * try again.
 279 */
 280static int ashmem_release(struct inode *ignored, struct file *file)
 281{
 282        struct ashmem_area *asma = file->private_data;
 283        struct ashmem_range *range, *next;
 284
 285        mutex_lock(&ashmem_mutex);
 286        list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
 287                range_del(range);
 288        mutex_unlock(&ashmem_mutex);
 289
 290        if (asma->file)
 291                fput(asma->file);
 292        kmem_cache_free(ashmem_area_cachep, asma);
 293
 294        return 0;
 295}
 296
 297static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 298{
 299        struct ashmem_area *asma = iocb->ki_filp->private_data;
 300        int ret = 0;
 301
 302        mutex_lock(&ashmem_mutex);
 303
 304        /* If size is not set, or set to 0, always return EOF. */
 305        if (asma->size == 0)
 306                goto out_unlock;
 307
 308        if (!asma->file) {
 309                ret = -EBADF;
 310                goto out_unlock;
 311        }
 312
 313        /*
 314         * asma and asma->file are used outside the lock here.  We assume
 315         * once asma->file is set it will never be changed, and will not
 316         * be destroyed until all references to the file are dropped and
 317         * ashmem_release is called.
 318         */
 319        mutex_unlock(&ashmem_mutex);
 320        ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0);
 321        mutex_lock(&ashmem_mutex);
 322        if (ret > 0)
 323                asma->file->f_pos = iocb->ki_pos;
 324out_unlock:
 325        mutex_unlock(&ashmem_mutex);
 326        return ret;
 327}
 328
 329static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
 330{
 331        struct ashmem_area *asma = file->private_data;
 332        loff_t ret;
 333
 334        mutex_lock(&ashmem_mutex);
 335
 336        if (asma->size == 0) {
 337                mutex_unlock(&ashmem_mutex);
 338                return -EINVAL;
 339        }
 340
 341        if (!asma->file) {
 342                mutex_unlock(&ashmem_mutex);
 343                return -EBADF;
 344        }
 345
 346        mutex_unlock(&ashmem_mutex);
 347
 348        ret = vfs_llseek(asma->file, offset, origin);
 349        if (ret < 0)
 350                return ret;
 351
 352        /** Copy f_pos from backing file, since f_ops->llseek() sets it */
 353        file->f_pos = asma->file->f_pos;
 354        return ret;
 355}
 356
 357static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
 358{
 359        return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
 360               _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
 361               _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
 362}
 363
 364static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
 365{
 366        /* do not allow to mmap ashmem backing shmem file directly */
 367        return -EPERM;
 368}
 369
 370static unsigned long
 371ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
 372                                unsigned long len, unsigned long pgoff,
 373                                unsigned long flags)
 374{
 375        return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
 376}
 377
 378static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 379{
 380        static struct file_operations vmfile_fops;
 381        struct ashmem_area *asma = file->private_data;
 382        int ret = 0;
 383
 384        mutex_lock(&ashmem_mutex);
 385
 386        /* user needs to SET_SIZE before mapping */
 387        if (!asma->size) {
 388                ret = -EINVAL;
 389                goto out;
 390        }
 391
 392        /* requested mapping size larger than object size */
 393        if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
 394                ret = -EINVAL;
 395                goto out;
 396        }
 397
 398        /* requested protection bits must match our allowed protection mask */
 399        if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
 400            calc_vm_prot_bits(PROT_MASK, 0)) {
 401                ret = -EPERM;
 402                goto out;
 403        }
 404        vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
 405
 406        if (!asma->file) {
 407                char *name = ASHMEM_NAME_DEF;
 408                struct file *vmfile;
 409                struct inode *inode;
 410
 411                if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
 412                        name = asma->name;
 413
 414                /* ... and allocate the backing shmem file */
 415                vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
 416                if (IS_ERR(vmfile)) {
 417                        ret = PTR_ERR(vmfile);
 418                        goto out;
 419                }
 420                vmfile->f_mode |= FMODE_LSEEK;
 421                inode = file_inode(vmfile);
 422                lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class);
 423                asma->file = vmfile;
 424                /*
 425                 * override mmap operation of the vmfile so that it can't be
 426                 * remapped which would lead to creation of a new vma with no
 427                 * asma permission checks. Have to override get_unmapped_area
 428                 * as well to prevent VM_BUG_ON check for f_ops modification.
 429                 */
 430                if (!vmfile_fops.mmap) {
 431                        vmfile_fops = *vmfile->f_op;
 432                        vmfile_fops.mmap = ashmem_vmfile_mmap;
 433                        vmfile_fops.get_unmapped_area =
 434                                        ashmem_vmfile_get_unmapped_area;
 435                }
 436                vmfile->f_op = &vmfile_fops;
 437        }
 438        get_file(asma->file);
 439
 440        /*
 441         * XXX - Reworked to use shmem_zero_setup() instead of
 442         * shmem_set_file while we're in staging. -jstultz
 443         */
 444        if (vma->vm_flags & VM_SHARED) {
 445                ret = shmem_zero_setup(vma);
 446                if (ret) {
 447                        fput(asma->file);
 448                        goto out;
 449                }
 450        } else {
 451                vma_set_anonymous(vma);
 452        }
 453
 454        vma_set_file(vma, asma->file);
 455        /* XXX: merge this with the get_file() above if possible */
 456        fput(asma->file);
 457
 458out:
 459        mutex_unlock(&ashmem_mutex);
 460        return ret;
 461}
 462
 463/*
 464 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
 465 *
 466 * 'nr_to_scan' is the number of objects to scan for freeing.
 467 *
 468 * 'gfp_mask' is the mask of the allocation that got us into this mess.
 469 *
 470 * Return value is the number of objects freed or -1 if we cannot
 471 * proceed without risk of deadlock (due to gfp_mask).
 472 *
 473 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
 474 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
 475 * pages freed.
 476 */
 477static unsigned long
 478ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 479{
 480        unsigned long freed = 0;
 481
 482        /* We might recurse into filesystem code, so bail out if necessary */
 483        if (!(sc->gfp_mask & __GFP_FS))
 484                return SHRINK_STOP;
 485
 486        if (!mutex_trylock(&ashmem_mutex))
 487                return -1;
 488
 489        while (!list_empty(&ashmem_lru_list)) {
 490                struct ashmem_range *range =
 491                        list_first_entry(&ashmem_lru_list, typeof(*range), lru);
 492                loff_t start = range->pgstart * PAGE_SIZE;
 493                loff_t end = (range->pgend + 1) * PAGE_SIZE;
 494                struct file *f = range->asma->file;
 495
 496                get_file(f);
 497                atomic_inc(&ashmem_shrink_inflight);
 498                range->purged = ASHMEM_WAS_PURGED;
 499                lru_del(range);
 500
 501                freed += range_size(range);
 502                mutex_unlock(&ashmem_mutex);
 503                f->f_op->fallocate(f,
 504                                   FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
 505                                   start, end - start);
 506                fput(f);
 507                if (atomic_dec_and_test(&ashmem_shrink_inflight))
 508                        wake_up_all(&ashmem_shrink_wait);
 509                if (!mutex_trylock(&ashmem_mutex))
 510                        goto out;
 511                if (--sc->nr_to_scan <= 0)
 512                        break;
 513        }
 514        mutex_unlock(&ashmem_mutex);
 515out:
 516        return freed;
 517}
 518
 519static unsigned long
 520ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 521{
 522        /*
 523         * note that lru_count is count of pages on the lru, not a count of
 524         * objects on the list. This means the scan function needs to return the
 525         * number of pages freed, not the number of objects scanned.
 526         */
 527        return lru_count;
 528}
 529
 530static struct shrinker ashmem_shrinker = {
 531        .count_objects = ashmem_shrink_count,
 532        .scan_objects = ashmem_shrink_scan,
 533        /*
 534         * XXX (dchinner): I wish people would comment on why they need on
 535         * significant changes to the default value here
 536         */
 537        .seeks = DEFAULT_SEEKS * 4,
 538};
 539
 540static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
 541{
 542        int ret = 0;
 543
 544        mutex_lock(&ashmem_mutex);
 545
 546        /* the user can only remove, not add, protection bits */
 547        if ((asma->prot_mask & prot) != prot) {
 548                ret = -EINVAL;
 549                goto out;
 550        }
 551
 552        /* does the application expect PROT_READ to imply PROT_EXEC? */
 553        if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
 554                prot |= PROT_EXEC;
 555
 556        asma->prot_mask = prot;
 557
 558out:
 559        mutex_unlock(&ashmem_mutex);
 560        return ret;
 561}
 562
 563static int set_name(struct ashmem_area *asma, void __user *name)
 564{
 565        int len;
 566        int ret = 0;
 567        char local_name[ASHMEM_NAME_LEN];
 568
 569        /*
 570         * Holding the ashmem_mutex while doing a copy_from_user might cause
 571         * an data abort which would try to access mmap_lock. If another
 572         * thread has invoked ashmem_mmap then it will be holding the
 573         * semaphore and will be waiting for ashmem_mutex, there by leading to
 574         * deadlock. We'll release the mutex and take the name to a local
 575         * variable that does not need protection and later copy the local
 576         * variable to the structure member with lock held.
 577         */
 578        len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
 579        if (len < 0)
 580                return len;
 581
 582        mutex_lock(&ashmem_mutex);
 583        /* cannot change an existing mapping's name */
 584        if (asma->file)
 585                ret = -EINVAL;
 586        else
 587                strscpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name,
 588                        ASHMEM_NAME_LEN);
 589
 590        mutex_unlock(&ashmem_mutex);
 591        return ret;
 592}
 593
 594static int get_name(struct ashmem_area *asma, void __user *name)
 595{
 596        int ret = 0;
 597        size_t len;
 598        /*
 599         * Have a local variable to which we'll copy the content
 600         * from asma with the lock held. Later we can copy this to the user
 601         * space safely without holding any locks. So even if we proceed to
 602         * wait for mmap_lock, it won't lead to deadlock.
 603         */
 604        char local_name[ASHMEM_NAME_LEN];
 605
 606        mutex_lock(&ashmem_mutex);
 607        if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
 608                /*
 609                 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
 610                 * prevents us from revealing one user's stack to another.
 611                 */
 612                len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
 613                memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
 614        } else {
 615                len = sizeof(ASHMEM_NAME_DEF);
 616                memcpy(local_name, ASHMEM_NAME_DEF, len);
 617        }
 618        mutex_unlock(&ashmem_mutex);
 619
 620        /*
 621         * Now we are just copying from the stack variable to userland
 622         * No lock held
 623         */
 624        if (copy_to_user(name, local_name, len))
 625                ret = -EFAULT;
 626        return ret;
 627}
 628
 629/*
 630 * ashmem_pin - pin the given ashmem region, returning whether it was
 631 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
 632 *
 633 * Caller must hold ashmem_mutex.
 634 */
 635static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
 636                      struct ashmem_range **new_range)
 637{
 638        struct ashmem_range *range, *next;
 639        int ret = ASHMEM_NOT_PURGED;
 640
 641        list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
 642                /* moved past last applicable page; we can short circuit */
 643                if (range_before_page(range, pgstart))
 644                        break;
 645
 646                /*
 647                 * The user can ask us to pin pages that span multiple ranges,
 648                 * or to pin pages that aren't even unpinned, so this is messy.
 649                 *
 650                 * Four cases:
 651                 * 1. The requested range subsumes an existing range, so we
 652                 *    just remove the entire matching range.
 653                 * 2. The requested range overlaps the start of an existing
 654                 *    range, so we just update that range.
 655                 * 3. The requested range overlaps the end of an existing
 656                 *    range, so we just update that range.
 657                 * 4. The requested range punches a hole in an existing range,
 658                 *    so we have to update one side of the range and then
 659                 *    create a new range for the other side.
 660                 */
 661                if (page_range_in_range(range, pgstart, pgend)) {
 662                        ret |= range->purged;
 663
 664                        /* Case #1: Easy. Just nuke the whole thing. */
 665                        if (page_range_subsumes_range(range, pgstart, pgend)) {
 666                                range_del(range);
 667                                continue;
 668                        }
 669
 670                        /* Case #2: We overlap from the start, so adjust it */
 671                        if (range->pgstart >= pgstart) {
 672                                range_shrink(range, pgend + 1, range->pgend);
 673                                continue;
 674                        }
 675
 676                        /* Case #3: We overlap from the rear, so adjust it */
 677                        if (range->pgend <= pgend) {
 678                                range_shrink(range, range->pgstart,
 679                                             pgstart - 1);
 680                                continue;
 681                        }
 682
 683                        /*
 684                         * Case #4: We eat a chunk out of the middle. A bit
 685                         * more complicated, we allocate a new range for the
 686                         * second half and adjust the first chunk's endpoint.
 687                         */
 688                        range_alloc(asma, range, range->purged,
 689                                    pgend + 1, range->pgend, new_range);
 690                        range_shrink(range, range->pgstart, pgstart - 1);
 691                        break;
 692                }
 693        }
 694
 695        return ret;
 696}
 697
 698/*
 699 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
 700 *
 701 * Caller must hold ashmem_mutex.
 702 */
 703static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
 704                        struct ashmem_range **new_range)
 705{
 706        struct ashmem_range *range, *next;
 707        unsigned int purged = ASHMEM_NOT_PURGED;
 708
 709restart:
 710        list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
 711                /* short circuit: this is our insertion point */
 712                if (range_before_page(range, pgstart))
 713                        break;
 714
 715                /*
 716                 * The user can ask us to unpin pages that are already entirely
 717                 * or partially pinned. We handle those two cases here.
 718                 */
 719                if (page_range_subsumed_by_range(range, pgstart, pgend))
 720                        return 0;
 721                if (page_range_in_range(range, pgstart, pgend)) {
 722                        pgstart = min(range->pgstart, pgstart);
 723                        pgend = max(range->pgend, pgend);
 724                        purged |= range->purged;
 725                        range_del(range);
 726                        goto restart;
 727                }
 728        }
 729
 730        range_alloc(asma, range, purged, pgstart, pgend, new_range);
 731        return 0;
 732}
 733
 734/*
 735 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
 736 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
 737 *
 738 * Caller must hold ashmem_mutex.
 739 */
 740static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
 741                                 size_t pgend)
 742{
 743        struct ashmem_range *range;
 744        int ret = ASHMEM_IS_PINNED;
 745
 746        list_for_each_entry(range, &asma->unpinned_list, unpinned) {
 747                if (range_before_page(range, pgstart))
 748                        break;
 749                if (page_range_in_range(range, pgstart, pgend)) {
 750                        ret = ASHMEM_IS_UNPINNED;
 751                        break;
 752                }
 753        }
 754
 755        return ret;
 756}
 757
 758static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 759                            void __user *p)
 760{
 761        struct ashmem_pin pin;
 762        size_t pgstart, pgend;
 763        int ret = -EINVAL;
 764        struct ashmem_range *range = NULL;
 765
 766        if (copy_from_user(&pin, p, sizeof(pin)))
 767                return -EFAULT;
 768
 769        if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
 770                range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
 771                if (!range)
 772                        return -ENOMEM;
 773        }
 774
 775        mutex_lock(&ashmem_mutex);
 776        wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
 777
 778        if (!asma->file)
 779                goto out_unlock;
 780
 781        /* per custom, you can pass zero for len to mean "everything onward" */
 782        if (!pin.len)
 783                pin.len = PAGE_ALIGN(asma->size) - pin.offset;
 784
 785        if ((pin.offset | pin.len) & ~PAGE_MASK)
 786                goto out_unlock;
 787
 788        if (((__u32)-1) - pin.offset < pin.len)
 789                goto out_unlock;
 790
 791        if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
 792                goto out_unlock;
 793
 794        pgstart = pin.offset / PAGE_SIZE;
 795        pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
 796
 797        switch (cmd) {
 798        case ASHMEM_PIN:
 799                ret = ashmem_pin(asma, pgstart, pgend, &range);
 800                break;
 801        case ASHMEM_UNPIN:
 802                ret = ashmem_unpin(asma, pgstart, pgend, &range);
 803                break;
 804        case ASHMEM_GET_PIN_STATUS:
 805                ret = ashmem_get_pin_status(asma, pgstart, pgend);
 806                break;
 807        }
 808
 809out_unlock:
 810        mutex_unlock(&ashmem_mutex);
 811        if (range)
 812                kmem_cache_free(ashmem_range_cachep, range);
 813
 814        return ret;
 815}
 816
 817static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 818{
 819        struct ashmem_area *asma = file->private_data;
 820        long ret = -ENOTTY;
 821
 822        switch (cmd) {
 823        case ASHMEM_SET_NAME:
 824                ret = set_name(asma, (void __user *)arg);
 825                break;
 826        case ASHMEM_GET_NAME:
 827                ret = get_name(asma, (void __user *)arg);
 828                break;
 829        case ASHMEM_SET_SIZE:
 830                ret = -EINVAL;
 831                mutex_lock(&ashmem_mutex);
 832                if (!asma->file) {
 833                        ret = 0;
 834                        asma->size = (size_t)arg;
 835                }
 836                mutex_unlock(&ashmem_mutex);
 837                break;
 838        case ASHMEM_GET_SIZE:
 839                ret = asma->size;
 840                break;
 841        case ASHMEM_SET_PROT_MASK:
 842                ret = set_prot_mask(asma, arg);
 843                break;
 844        case ASHMEM_GET_PROT_MASK:
 845                ret = asma->prot_mask;
 846                break;
 847        case ASHMEM_PIN:
 848        case ASHMEM_UNPIN:
 849        case ASHMEM_GET_PIN_STATUS:
 850                ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
 851                break;
 852        case ASHMEM_PURGE_ALL_CACHES:
 853                ret = -EPERM;
 854                if (capable(CAP_SYS_ADMIN)) {
 855                        struct shrink_control sc = {
 856                                .gfp_mask = GFP_KERNEL,
 857                                .nr_to_scan = LONG_MAX,
 858                        };
 859                        ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
 860                        ashmem_shrink_scan(&ashmem_shrinker, &sc);
 861                }
 862                break;
 863        }
 864
 865        return ret;
 866}
 867
 868/* support of 32bit userspace on 64bit platforms */
 869#ifdef CONFIG_COMPAT
 870static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
 871                                unsigned long arg)
 872{
 873        switch (cmd) {
 874        case COMPAT_ASHMEM_SET_SIZE:
 875                cmd = ASHMEM_SET_SIZE;
 876                break;
 877        case COMPAT_ASHMEM_SET_PROT_MASK:
 878                cmd = ASHMEM_SET_PROT_MASK;
 879                break;
 880        }
 881        return ashmem_ioctl(file, cmd, arg);
 882}
 883#endif
 884#ifdef CONFIG_PROC_FS
 885static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
 886{
 887        struct ashmem_area *asma = file->private_data;
 888
 889        mutex_lock(&ashmem_mutex);
 890
 891        if (asma->file)
 892                seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino);
 893
 894        if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
 895                seq_printf(m, "name:\t%s\n",
 896                           asma->name + ASHMEM_NAME_PREFIX_LEN);
 897
 898        seq_printf(m, "size:\t%zu\n", asma->size);
 899
 900        mutex_unlock(&ashmem_mutex);
 901}
 902#endif
 903static const struct file_operations ashmem_fops = {
 904        .owner = THIS_MODULE,
 905        .open = ashmem_open,
 906        .release = ashmem_release,
 907        .read_iter = ashmem_read_iter,
 908        .llseek = ashmem_llseek,
 909        .mmap = ashmem_mmap,
 910        .unlocked_ioctl = ashmem_ioctl,
 911#ifdef CONFIG_COMPAT
 912        .compat_ioctl = compat_ashmem_ioctl,
 913#endif
 914#ifdef CONFIG_PROC_FS
 915        .show_fdinfo = ashmem_show_fdinfo,
 916#endif
 917};
 918
 919static struct miscdevice ashmem_misc = {
 920        .minor = MISC_DYNAMIC_MINOR,
 921        .name = "ashmem",
 922        .fops = &ashmem_fops,
 923};
 924
 925static int __init ashmem_init(void)
 926{
 927        int ret = -ENOMEM;
 928
 929        ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
 930                                               sizeof(struct ashmem_area),
 931                                               0, 0, NULL);
 932        if (!ashmem_area_cachep) {
 933                pr_err("failed to create slab cache\n");
 934                goto out;
 935        }
 936
 937        ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
 938                                                sizeof(struct ashmem_range),
 939                                                0, SLAB_RECLAIM_ACCOUNT, NULL);
 940        if (!ashmem_range_cachep) {
 941                pr_err("failed to create slab cache\n");
 942                goto out_free1;
 943        }
 944
 945        ret = misc_register(&ashmem_misc);
 946        if (ret) {
 947                pr_err("failed to register misc device!\n");
 948                goto out_free2;
 949        }
 950
 951        ret = register_shrinker(&ashmem_shrinker);
 952        if (ret) {
 953                pr_err("failed to register shrinker!\n");
 954                goto out_demisc;
 955        }
 956
 957        pr_info("initialized\n");
 958
 959        return 0;
 960
 961out_demisc:
 962        misc_deregister(&ashmem_misc);
 963out_free2:
 964        kmem_cache_destroy(ashmem_range_cachep);
 965out_free1:
 966        kmem_cache_destroy(ashmem_area_cachep);
 967out:
 968        return ret;
 969}
 970device_initcall(ashmem_init);
 971