linux/drivers/staging/android/ashmem.c
<<
>>
Prefs
   1/* mm/ashmem.c
   2 *
   3 * Anonymous Shared Memory Subsystem, ashmem
   4 *
   5 * Copyright (C) 2008 Google, Inc.
   6 *
   7 * Robert Love <rlove@google.com>
   8 *
   9 * This software is licensed under the terms of the GNU General Public
  10 * License version 2, as published by the Free Software Foundation, and
  11 * may be copied, distributed, and modified under those terms.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 */
  18
  19#define pr_fmt(fmt) "ashmem: " fmt
  20
  21#include <linux/init.h>
  22#include <linux/export.h>
  23#include <linux/file.h>
  24#include <linux/fs.h>
  25#include <linux/falloc.h>
  26#include <linux/miscdevice.h>
  27#include <linux/security.h>
  28#include <linux/mm.h>
  29#include <linux/mman.h>
  30#include <linux/uaccess.h>
  31#include <linux/personality.h>
  32#include <linux/bitops.h>
  33#include <linux/mutex.h>
  34#include <linux/shmem_fs.h>
  35#include "ashmem.h"
  36
  37#define ASHMEM_NAME_PREFIX "dev/ashmem/"
  38#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
  39#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
  40
  41/**
  42 * struct ashmem_area - The anonymous shared memory area
  43 * @name:               The optional name in /proc/pid/maps
  44 * @unpinned_list:      The list of all ashmem areas
  45 * @file:               The shmem-based backing file
  46 * @size:               The size of the mapping, in bytes
  47 * @prot_mask:          The allowed protection bits, as vm_flags
  48 *
  49 * The lifecycle of this structure is from our parent file's open() until
  50 * its release(). It is also protected by 'ashmem_mutex'
  51 *
  52 * Warning: Mappings do NOT pin this structure; It dies on close()
  53 */
  54struct ashmem_area {
  55        char name[ASHMEM_FULL_NAME_LEN];
  56        struct list_head unpinned_list;
  57        struct file *file;
  58        size_t size;
  59        unsigned long prot_mask;
  60};
  61
  62/**
  63 * struct ashmem_range - A range of unpinned/evictable pages
  64 * @lru:                 The entry in the LRU list
  65 * @unpinned:            The entry in its area's unpinned list
  66 * @asma:                The associated anonymous shared memory area.
  67 * @pgstart:             The starting page (inclusive)
  68 * @pgend:               The ending page (inclusive)
  69 * @purged:              The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
  70 *
  71 * The lifecycle of this structure is from unpin to pin.
  72 * It is protected by 'ashmem_mutex'
  73 */
  74struct ashmem_range {
  75        struct list_head lru;
  76        struct list_head unpinned;
  77        struct ashmem_area *asma;
  78        size_t pgstart;
  79        size_t pgend;
  80        unsigned int purged;
  81};
  82
  83/* LRU list of unpinned pages, protected by ashmem_mutex */
  84static LIST_HEAD(ashmem_lru_list);
  85
  86/*
  87 * long lru_count - The count of pages on our LRU list.
  88 *
  89 * This is protected by ashmem_mutex.
  90 */
  91static unsigned long lru_count;
  92
  93/*
  94 * ashmem_mutex - protects the list of and each individual ashmem_area
  95 *
  96 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
  97 */
  98static DEFINE_MUTEX(ashmem_mutex);
  99
 100static struct kmem_cache *ashmem_area_cachep __read_mostly;
 101static struct kmem_cache *ashmem_range_cachep __read_mostly;
 102
 103static inline unsigned long range_size(struct ashmem_range *range)
 104{
 105        return range->pgend - range->pgstart + 1;
 106}
 107
 108static inline bool range_on_lru(struct ashmem_range *range)
 109{
 110        return range->purged == ASHMEM_NOT_PURGED;
 111}
 112
 113static inline bool page_range_subsumes_range(struct ashmem_range *range,
 114                                             size_t start, size_t end)
 115{
 116        return (range->pgstart >= start) && (range->pgend <= end);
 117}
 118
 119static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
 120                                                size_t start, size_t end)
 121{
 122        return (range->pgstart <= start) && (range->pgend >= end);
 123}
 124
 125static inline bool page_in_range(struct ashmem_range *range, size_t page)
 126{
 127        return (range->pgstart <= page) && (range->pgend >= page);
 128}
 129
 130static inline bool page_range_in_range(struct ashmem_range *range,
 131                                       size_t start, size_t end)
 132{
 133        return page_in_range(range, start) || page_in_range(range, end) ||
 134                page_range_subsumes_range(range, start, end);
 135}
 136
 137static inline bool range_before_page(struct ashmem_range *range, size_t page)
 138{
 139        return range->pgend < page;
 140}
 141
 142#define PROT_MASK               (PROT_EXEC | PROT_READ | PROT_WRITE)
 143
 144/**
 145 * lru_add() - Adds a range of memory to the LRU list
 146 * @range:     The memory range being added.
 147 *
 148 * The range is first added to the end (tail) of the LRU list.
 149 * After this, the size of the range is added to @lru_count
 150 */
 151static inline void lru_add(struct ashmem_range *range)
 152{
 153        list_add_tail(&range->lru, &ashmem_lru_list);
 154        lru_count += range_size(range);
 155}
 156
 157/**
 158 * lru_del() - Removes a range of memory from the LRU list
 159 * @range:     The memory range being removed
 160 *
 161 * The range is first deleted from the LRU list.
 162 * After this, the size of the range is removed from @lru_count
 163 */
 164static inline void lru_del(struct ashmem_range *range)
 165{
 166        list_del(&range->lru);
 167        lru_count -= range_size(range);
 168}
 169
 170/**
 171 * range_alloc() - Allocates and initializes a new ashmem_range structure
 172 * @asma:          The associated ashmem_area
 173 * @prev_range:    The previous ashmem_range in the sorted asma->unpinned list
 174 * @purged:        Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
 175 * @start:         The starting page (inclusive)
 176 * @end:           The ending page (inclusive)
 177 *
 178 * This function is protected by ashmem_mutex.
 179 *
 180 * Return: 0 if successful, or -ENOMEM if there is an error
 181 */
 182static int range_alloc(struct ashmem_area *asma,
 183                       struct ashmem_range *prev_range, unsigned int purged,
 184                       size_t start, size_t end)
 185{
 186        struct ashmem_range *range;
 187
 188        range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
 189        if (unlikely(!range))
 190                return -ENOMEM;
 191
 192        range->asma = asma;
 193        range->pgstart = start;
 194        range->pgend = end;
 195        range->purged = purged;
 196
 197        list_add_tail(&range->unpinned, &prev_range->unpinned);
 198
 199        if (range_on_lru(range))
 200                lru_add(range);
 201
 202        return 0;
 203}
 204
 205/**
 206 * range_del() - Deletes and dealloctes an ashmem_range structure
 207 * @range:       The associated ashmem_range that has previously been allocated
 208 */
 209static void range_del(struct ashmem_range *range)
 210{
 211        list_del(&range->unpinned);
 212        if (range_on_lru(range))
 213                lru_del(range);
 214        kmem_cache_free(ashmem_range_cachep, range);
 215}
 216
 217/**
 218 * range_shrink() - Shrinks an ashmem_range
 219 * @range:          The associated ashmem_range being shrunk
 220 * @start:          The starting byte of the new range
 221 * @end:            The ending byte of the new range
 222 *
 223 * This does not modify the data inside the existing range in any way - It
 224 * simply shrinks the boundaries of the range.
 225 *
 226 * Theoretically, with a little tweaking, this could eventually be changed
 227 * to range_resize, and expand the lru_count if the new range is larger.
 228 */
 229static inline void range_shrink(struct ashmem_range *range,
 230                                size_t start, size_t end)
 231{
 232        size_t pre = range_size(range);
 233
 234        range->pgstart = start;
 235        range->pgend = end;
 236
 237        if (range_on_lru(range))
 238                lru_count -= pre - range_size(range);
 239}
 240
 241/**
 242 * ashmem_open() - Opens an Anonymous Shared Memory structure
 243 * @inode:         The backing file's index node(?)
 244 * @file:          The backing file
 245 *
 246 * Please note that the ashmem_area is not returned by this function - It is
 247 * instead written to "file->private_data".
 248 *
 249 * Return: 0 if successful, or another code if unsuccessful.
 250 */
 251static int ashmem_open(struct inode *inode, struct file *file)
 252{
 253        struct ashmem_area *asma;
 254        int ret;
 255
 256        ret = generic_file_open(inode, file);
 257        if (unlikely(ret))
 258                return ret;
 259
 260        asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
 261        if (unlikely(!asma))
 262                return -ENOMEM;
 263
 264        INIT_LIST_HEAD(&asma->unpinned_list);
 265        memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
 266        asma->prot_mask = PROT_MASK;
 267        file->private_data = asma;
 268
 269        return 0;
 270}
 271
 272/**
 273 * ashmem_release() - Releases an Anonymous Shared Memory structure
 274 * @ignored:          The backing file's Index Node(?) - It is ignored here.
 275 * @file:             The backing file
 276 *
 277 * Return: 0 if successful. If it is anything else, go have a coffee and
 278 * try again.
 279 */
 280static int ashmem_release(struct inode *ignored, struct file *file)
 281{
 282        struct ashmem_area *asma = file->private_data;
 283        struct ashmem_range *range, *next;
 284
 285        mutex_lock(&ashmem_mutex);
 286        list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
 287                range_del(range);
 288        mutex_unlock(&ashmem_mutex);
 289
 290        if (asma->file)
 291                fput(asma->file);
 292        kmem_cache_free(ashmem_area_cachep, asma);
 293
 294        return 0;
 295}
 296
 297static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 298{
 299        struct ashmem_area *asma = iocb->ki_filp->private_data;
 300        int ret = 0;
 301
 302        mutex_lock(&ashmem_mutex);
 303
 304        /* If size is not set, or set to 0, always return EOF. */
 305        if (asma->size == 0)
 306                goto out_unlock;
 307
 308        if (!asma->file) {
 309                ret = -EBADF;
 310                goto out_unlock;
 311        }
 312
 313        /*
 314         * asma and asma->file are used outside the lock here.  We assume
 315         * once asma->file is set it will never be changed, and will not
 316         * be destroyed until all references to the file are dropped and
 317         * ashmem_release is called.
 318         */
 319        mutex_unlock(&ashmem_mutex);
 320        ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0);
 321        mutex_lock(&ashmem_mutex);
 322        if (ret > 0)
 323                asma->file->f_pos = iocb->ki_pos;
 324out_unlock:
 325        mutex_unlock(&ashmem_mutex);
 326        return ret;
 327}
 328
 329static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
 330{
 331        struct ashmem_area *asma = file->private_data;
 332        int ret;
 333
 334        mutex_lock(&ashmem_mutex);
 335
 336        if (asma->size == 0) {
 337                ret = -EINVAL;
 338                goto out;
 339        }
 340
 341        if (!asma->file) {
 342                ret = -EBADF;
 343                goto out;
 344        }
 345
 346        ret = vfs_llseek(asma->file, offset, origin);
 347        if (ret < 0)
 348                goto out;
 349
 350        /** Copy f_pos from backing file, since f_ops->llseek() sets it */
 351        file->f_pos = asma->file->f_pos;
 352
 353out:
 354        mutex_unlock(&ashmem_mutex);
 355        return ret;
 356}
 357
 358static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
 359{
 360        return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
 361               _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
 362               _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
 363}
 364
 365static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 366{
 367        struct ashmem_area *asma = file->private_data;
 368        int ret = 0;
 369
 370        mutex_lock(&ashmem_mutex);
 371
 372        /* user needs to SET_SIZE before mapping */
 373        if (unlikely(!asma->size)) {
 374                ret = -EINVAL;
 375                goto out;
 376        }
 377
 378        /* requested protection bits must match our allowed protection mask */
 379        if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
 380                     calc_vm_prot_bits(PROT_MASK, 0))) {
 381                ret = -EPERM;
 382                goto out;
 383        }
 384        vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
 385
 386        if (!asma->file) {
 387                char *name = ASHMEM_NAME_DEF;
 388                struct file *vmfile;
 389
 390                if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
 391                        name = asma->name;
 392
 393                /* ... and allocate the backing shmem file */
 394                vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
 395                if (IS_ERR(vmfile)) {
 396                        ret = PTR_ERR(vmfile);
 397                        goto out;
 398                }
 399                vmfile->f_mode |= FMODE_LSEEK;
 400                asma->file = vmfile;
 401        }
 402        get_file(asma->file);
 403
 404        /*
 405         * XXX - Reworked to use shmem_zero_setup() instead of
 406         * shmem_set_file while we're in staging. -jstultz
 407         */
 408        if (vma->vm_flags & VM_SHARED) {
 409                ret = shmem_zero_setup(vma);
 410                if (ret) {
 411                        fput(asma->file);
 412                        goto out;
 413                }
 414        }
 415
 416        if (vma->vm_file)
 417                fput(vma->vm_file);
 418        vma->vm_file = asma->file;
 419
 420out:
 421        mutex_unlock(&ashmem_mutex);
 422        return ret;
 423}
 424
 425/*
 426 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
 427 *
 428 * 'nr_to_scan' is the number of objects to scan for freeing.
 429 *
 430 * 'gfp_mask' is the mask of the allocation that got us into this mess.
 431 *
 432 * Return value is the number of objects freed or -1 if we cannot
 433 * proceed without risk of deadlock (due to gfp_mask).
 434 *
 435 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
 436 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
 437 * pages freed.
 438 */
 439static unsigned long
 440ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 441{
 442        struct ashmem_range *range, *next;
 443        unsigned long freed = 0;
 444
 445        /* We might recurse into filesystem code, so bail out if necessary */
 446        if (!(sc->gfp_mask & __GFP_FS))
 447                return SHRINK_STOP;
 448
 449        if (!mutex_trylock(&ashmem_mutex))
 450                return -1;
 451
 452        list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
 453                loff_t start = range->pgstart * PAGE_SIZE;
 454                loff_t end = (range->pgend + 1) * PAGE_SIZE;
 455
 456                vfs_fallocate(range->asma->file,
 457                              FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
 458                              start, end - start);
 459                range->purged = ASHMEM_WAS_PURGED;
 460                lru_del(range);
 461
 462                freed += range_size(range);
 463                if (--sc->nr_to_scan <= 0)
 464                        break;
 465        }
 466        mutex_unlock(&ashmem_mutex);
 467        return freed;
 468}
 469
 470static unsigned long
 471ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 472{
 473        /*
 474         * note that lru_count is count of pages on the lru, not a count of
 475         * objects on the list. This means the scan function needs to return the
 476         * number of pages freed, not the number of objects scanned.
 477         */
 478        return lru_count;
 479}
 480
 481static struct shrinker ashmem_shrinker = {
 482        .count_objects = ashmem_shrink_count,
 483        .scan_objects = ashmem_shrink_scan,
 484        /*
 485         * XXX (dchinner): I wish people would comment on why they need on
 486         * significant changes to the default value here
 487         */
 488        .seeks = DEFAULT_SEEKS * 4,
 489};
 490
 491static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
 492{
 493        int ret = 0;
 494
 495        mutex_lock(&ashmem_mutex);
 496
 497        /* the user can only remove, not add, protection bits */
 498        if (unlikely((asma->prot_mask & prot) != prot)) {
 499                ret = -EINVAL;
 500                goto out;
 501        }
 502
 503        /* does the application expect PROT_READ to imply PROT_EXEC? */
 504        if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
 505                prot |= PROT_EXEC;
 506
 507        asma->prot_mask = prot;
 508
 509out:
 510        mutex_unlock(&ashmem_mutex);
 511        return ret;
 512}
 513
 514static int set_name(struct ashmem_area *asma, void __user *name)
 515{
 516        int len;
 517        int ret = 0;
 518        char local_name[ASHMEM_NAME_LEN];
 519
 520        /*
 521         * Holding the ashmem_mutex while doing a copy_from_user might cause
 522         * an data abort which would try to access mmap_sem. If another
 523         * thread has invoked ashmem_mmap then it will be holding the
 524         * semaphore and will be waiting for ashmem_mutex, there by leading to
 525         * deadlock. We'll release the mutex  and take the name to a local
 526         * variable that does not need protection and later copy the local
 527         * variable to the structure member with lock held.
 528         */
 529        len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
 530        if (len < 0)
 531                return len;
 532        if (len == ASHMEM_NAME_LEN)
 533                local_name[ASHMEM_NAME_LEN - 1] = '\0';
 534        mutex_lock(&ashmem_mutex);
 535        /* cannot change an existing mapping's name */
 536        if (unlikely(asma->file))
 537                ret = -EINVAL;
 538        else
 539                strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
 540
 541        mutex_unlock(&ashmem_mutex);
 542        return ret;
 543}
 544
 545static int get_name(struct ashmem_area *asma, void __user *name)
 546{
 547        int ret = 0;
 548        size_t len;
 549        /*
 550         * Have a local variable to which we'll copy the content
 551         * from asma with the lock held. Later we can copy this to the user
 552         * space safely without holding any locks. So even if we proceed to
 553         * wait for mmap_sem, it won't lead to deadlock.
 554         */
 555        char local_name[ASHMEM_NAME_LEN];
 556
 557        mutex_lock(&ashmem_mutex);
 558        if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
 559                /*
 560                 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
 561                 * prevents us from revealing one user's stack to another.
 562                 */
 563                len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
 564                memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
 565        } else {
 566                len = sizeof(ASHMEM_NAME_DEF);
 567                memcpy(local_name, ASHMEM_NAME_DEF, len);
 568        }
 569        mutex_unlock(&ashmem_mutex);
 570
 571        /*
 572         * Now we are just copying from the stack variable to userland
 573         * No lock held
 574         */
 575        if (unlikely(copy_to_user(name, local_name, len)))
 576                ret = -EFAULT;
 577        return ret;
 578}
 579
 580/*
 581 * ashmem_pin - pin the given ashmem region, returning whether it was
 582 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
 583 *
 584 * Caller must hold ashmem_mutex.
 585 */
 586static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
 587{
 588        struct ashmem_range *range, *next;
 589        int ret = ASHMEM_NOT_PURGED;
 590
 591        list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
 592                /* moved past last applicable page; we can short circuit */
 593                if (range_before_page(range, pgstart))
 594                        break;
 595
 596                /*
 597                 * The user can ask us to pin pages that span multiple ranges,
 598                 * or to pin pages that aren't even unpinned, so this is messy.
 599                 *
 600                 * Four cases:
 601                 * 1. The requested range subsumes an existing range, so we
 602                 *    just remove the entire matching range.
 603                 * 2. The requested range overlaps the start of an existing
 604                 *    range, so we just update that range.
 605                 * 3. The requested range overlaps the end of an existing
 606                 *    range, so we just update that range.
 607                 * 4. The requested range punches a hole in an existing range,
 608                 *    so we have to update one side of the range and then
 609                 *    create a new range for the other side.
 610                 */
 611                if (page_range_in_range(range, pgstart, pgend)) {
 612                        ret |= range->purged;
 613
 614                        /* Case #1: Easy. Just nuke the whole thing. */
 615                        if (page_range_subsumes_range(range, pgstart, pgend)) {
 616                                range_del(range);
 617                                continue;
 618                        }
 619
 620                        /* Case #2: We overlap from the start, so adjust it */
 621                        if (range->pgstart >= pgstart) {
 622                                range_shrink(range, pgend + 1, range->pgend);
 623                                continue;
 624                        }
 625
 626                        /* Case #3: We overlap from the rear, so adjust it */
 627                        if (range->pgend <= pgend) {
 628                                range_shrink(range, range->pgstart,
 629                                             pgstart - 1);
 630                                continue;
 631                        }
 632
 633                        /*
 634                         * Case #4: We eat a chunk out of the middle. A bit
 635                         * more complicated, we allocate a new range for the
 636                         * second half and adjust the first chunk's endpoint.
 637                         */
 638                        range_alloc(asma, range, range->purged,
 639                                    pgend + 1, range->pgend);
 640                        range_shrink(range, range->pgstart, pgstart - 1);
 641                        break;
 642                }
 643        }
 644
 645        return ret;
 646}
 647
 648/*
 649 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
 650 *
 651 * Caller must hold ashmem_mutex.
 652 */
 653static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
 654{
 655        struct ashmem_range *range, *next;
 656        unsigned int purged = ASHMEM_NOT_PURGED;
 657
 658restart:
 659        list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
 660                /* short circuit: this is our insertion point */
 661                if (range_before_page(range, pgstart))
 662                        break;
 663
 664                /*
 665                 * The user can ask us to unpin pages that are already entirely
 666                 * or partially pinned. We handle those two cases here.
 667                 */
 668                if (page_range_subsumed_by_range(range, pgstart, pgend))
 669                        return 0;
 670                if (page_range_in_range(range, pgstart, pgend)) {
 671                        pgstart = min(range->pgstart, pgstart);
 672                        pgend = max(range->pgend, pgend);
 673                        purged |= range->purged;
 674                        range_del(range);
 675                        goto restart;
 676                }
 677        }
 678
 679        return range_alloc(asma, range, purged, pgstart, pgend);
 680}
 681
 682/*
 683 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
 684 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
 685 *
 686 * Caller must hold ashmem_mutex.
 687 */
 688static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
 689                                 size_t pgend)
 690{
 691        struct ashmem_range *range;
 692        int ret = ASHMEM_IS_PINNED;
 693
 694        list_for_each_entry(range, &asma->unpinned_list, unpinned) {
 695                if (range_before_page(range, pgstart))
 696                        break;
 697                if (page_range_in_range(range, pgstart, pgend)) {
 698                        ret = ASHMEM_IS_UNPINNED;
 699                        break;
 700                }
 701        }
 702
 703        return ret;
 704}
 705
 706static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 707                            void __user *p)
 708{
 709        struct ashmem_pin pin;
 710        size_t pgstart, pgend;
 711        int ret = -EINVAL;
 712
 713        if (unlikely(!asma->file))
 714                return -EINVAL;
 715
 716        if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
 717                return -EFAULT;
 718
 719        /* per custom, you can pass zero for len to mean "everything onward" */
 720        if (!pin.len)
 721                pin.len = PAGE_ALIGN(asma->size) - pin.offset;
 722
 723        if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
 724                return -EINVAL;
 725
 726        if (unlikely(((__u32)-1) - pin.offset < pin.len))
 727                return -EINVAL;
 728
 729        if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
 730                return -EINVAL;
 731
 732        pgstart = pin.offset / PAGE_SIZE;
 733        pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
 734
 735        mutex_lock(&ashmem_mutex);
 736
 737        switch (cmd) {
 738        case ASHMEM_PIN:
 739                ret = ashmem_pin(asma, pgstart, pgend);
 740                break;
 741        case ASHMEM_UNPIN:
 742                ret = ashmem_unpin(asma, pgstart, pgend);
 743                break;
 744        case ASHMEM_GET_PIN_STATUS:
 745                ret = ashmem_get_pin_status(asma, pgstart, pgend);
 746                break;
 747        }
 748
 749        mutex_unlock(&ashmem_mutex);
 750
 751        return ret;
 752}
 753
 754static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 755{
 756        struct ashmem_area *asma = file->private_data;
 757        long ret = -ENOTTY;
 758
 759        switch (cmd) {
 760        case ASHMEM_SET_NAME:
 761                ret = set_name(asma, (void __user *)arg);
 762                break;
 763        case ASHMEM_GET_NAME:
 764                ret = get_name(asma, (void __user *)arg);
 765                break;
 766        case ASHMEM_SET_SIZE:
 767                ret = -EINVAL;
 768                if (!asma->file) {
 769                        ret = 0;
 770                        asma->size = (size_t)arg;
 771                }
 772                break;
 773        case ASHMEM_GET_SIZE:
 774                ret = asma->size;
 775                break;
 776        case ASHMEM_SET_PROT_MASK:
 777                ret = set_prot_mask(asma, arg);
 778                break;
 779        case ASHMEM_GET_PROT_MASK:
 780                ret = asma->prot_mask;
 781                break;
 782        case ASHMEM_PIN:
 783        case ASHMEM_UNPIN:
 784        case ASHMEM_GET_PIN_STATUS:
 785                ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
 786                break;
 787        case ASHMEM_PURGE_ALL_CACHES:
 788                ret = -EPERM;
 789                if (capable(CAP_SYS_ADMIN)) {
 790                        struct shrink_control sc = {
 791                                .gfp_mask = GFP_KERNEL,
 792                                .nr_to_scan = LONG_MAX,
 793                        };
 794                        ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
 795                        ashmem_shrink_scan(&ashmem_shrinker, &sc);
 796                }
 797                break;
 798        }
 799
 800        return ret;
 801}
 802
 803/* support of 32bit userspace on 64bit platforms */
 804#ifdef CONFIG_COMPAT
 805static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
 806                                unsigned long arg)
 807{
 808        switch (cmd) {
 809        case COMPAT_ASHMEM_SET_SIZE:
 810                cmd = ASHMEM_SET_SIZE;
 811                break;
 812        case COMPAT_ASHMEM_SET_PROT_MASK:
 813                cmd = ASHMEM_SET_PROT_MASK;
 814                break;
 815        }
 816        return ashmem_ioctl(file, cmd, arg);
 817}
 818#endif
 819
 820static const struct file_operations ashmem_fops = {
 821        .owner = THIS_MODULE,
 822        .open = ashmem_open,
 823        .release = ashmem_release,
 824        .read_iter = ashmem_read_iter,
 825        .llseek = ashmem_llseek,
 826        .mmap = ashmem_mmap,
 827        .unlocked_ioctl = ashmem_ioctl,
 828#ifdef CONFIG_COMPAT
 829        .compat_ioctl = compat_ashmem_ioctl,
 830#endif
 831};
 832
 833static struct miscdevice ashmem_misc = {
 834        .minor = MISC_DYNAMIC_MINOR,
 835        .name = "ashmem",
 836        .fops = &ashmem_fops,
 837};
 838
 839static int __init ashmem_init(void)
 840{
 841        int ret = -ENOMEM;
 842
 843        ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
 844                                               sizeof(struct ashmem_area),
 845                                               0, 0, NULL);
 846        if (unlikely(!ashmem_area_cachep)) {
 847                pr_err("failed to create slab cache\n");
 848                goto out;
 849        }
 850
 851        ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
 852                                                sizeof(struct ashmem_range),
 853                                                0, 0, NULL);
 854        if (unlikely(!ashmem_range_cachep)) {
 855                pr_err("failed to create slab cache\n");
 856                goto out_free1;
 857        }
 858
 859        ret = misc_register(&ashmem_misc);
 860        if (unlikely(ret)) {
 861                pr_err("failed to register misc device!\n");
 862                goto out_free2;
 863        }
 864
 865        register_shrinker(&ashmem_shrinker);
 866
 867        pr_info("initialized\n");
 868
 869        return 0;
 870
 871out_free2:
 872        kmem_cache_destroy(ashmem_range_cachep);
 873out_free1:
 874        kmem_cache_destroy(ashmem_area_cachep);
 875out:
 876        return ret;
 877}
 878device_initcall(ashmem_init);
 879