linux/drivers/staging/android/ashmem.c
<<
>>
Prefs
   1/* mm/ashmem.c
   2 *
   3 * Anonymous Shared Memory Subsystem, ashmem
   4 *
   5 * Copyright (C) 2008 Google, Inc.
   6 *
   7 * Robert Love <rlove@google.com>
   8 *
   9 * This software is licensed under the terms of the GNU General Public
  10 * License version 2, as published by the Free Software Foundation, and
  11 * may be copied, distributed, and modified under those terms.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 */
  18
  19#define pr_fmt(fmt) "ashmem: " fmt
  20
  21#include <linux/module.h>
  22#include <linux/file.h>
  23#include <linux/fs.h>
  24#include <linux/falloc.h>
  25#include <linux/miscdevice.h>
  26#include <linux/security.h>
  27#include <linux/mm.h>
  28#include <linux/mman.h>
  29#include <linux/uaccess.h>
  30#include <linux/personality.h>
  31#include <linux/bitops.h>
  32#include <linux/mutex.h>
  33#include <linux/shmem_fs.h>
  34#include "ashmem.h"
  35
  36#define ASHMEM_NAME_PREFIX "dev/ashmem/"
  37#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
  38#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
  39
  40/*
  41 * ashmem_area - anonymous shared memory area
  42 * Lifecycle: From our parent file's open() until its release()
  43 * Locking: Protected by `ashmem_mutex'
  44 * Big Note: Mappings do NOT pin this structure; it dies on close()
  45 */
  46struct ashmem_area {
  47        char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
  48        struct list_head unpinned_list;  /* list of all ashmem areas */
  49        struct file *file;               /* the shmem-based backing file */
  50        size_t size;                     /* size of the mapping, in bytes */
  51        unsigned long prot_mask;         /* allowed prot bits, as vm_flags */
  52};
  53
  54/*
  55 * ashmem_range - represents an interval of unpinned (evictable) pages
  56 * Lifecycle: From unpin to pin
  57 * Locking: Protected by `ashmem_mutex'
  58 */
  59struct ashmem_range {
  60        struct list_head lru;           /* entry in LRU list */
  61        struct list_head unpinned;      /* entry in its area's unpinned list */
  62        struct ashmem_area *asma;       /* associated area */
  63        size_t pgstart;                 /* starting page, inclusive */
  64        size_t pgend;                   /* ending page, inclusive */
  65        unsigned int purged;            /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
  66};
  67
  68/* LRU list of unpinned pages, protected by ashmem_mutex */
  69static LIST_HEAD(ashmem_lru_list);
  70
  71/* Count of pages on our LRU list, protected by ashmem_mutex */
  72static unsigned long lru_count;
  73
  74/*
  75 * ashmem_mutex - protects the list of and each individual ashmem_area
  76 *
  77 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
  78 */
  79static DEFINE_MUTEX(ashmem_mutex);
  80
  81static struct kmem_cache *ashmem_area_cachep __read_mostly;
  82static struct kmem_cache *ashmem_range_cachep __read_mostly;
  83
  84#define range_size(range) \
  85        ((range)->pgend - (range)->pgstart + 1)
  86
  87#define range_on_lru(range) \
  88        ((range)->purged == ASHMEM_NOT_PURGED)
  89
  90#define page_range_subsumes_range(range, start, end) \
  91        (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
  92
  93#define page_range_subsumed_by_range(range, start, end) \
  94        (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
  95
  96#define page_in_range(range, page) \
  97        (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
  98
  99#define page_range_in_range(range, start, end) \
 100        (page_in_range(range, start) || page_in_range(range, end) || \
 101                page_range_subsumes_range(range, start, end))
 102
 103#define range_before_page(range, page) \
 104        ((range)->pgend < (page))
 105
 106#define PROT_MASK               (PROT_EXEC | PROT_READ | PROT_WRITE)
 107
 108static inline void lru_add(struct ashmem_range *range)
 109{
 110        list_add_tail(&range->lru, &ashmem_lru_list);
 111        lru_count += range_size(range);
 112}
 113
 114static inline void lru_del(struct ashmem_range *range)
 115{
 116        list_del(&range->lru);
 117        lru_count -= range_size(range);
 118}
 119
 120/*
 121 * range_alloc - allocate and initialize a new ashmem_range structure
 122 *
 123 * 'asma' - associated ashmem_area
 124 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
 125 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
 126 * 'start' - starting page, inclusive
 127 * 'end' - ending page, inclusive
 128 *
 129 * Caller must hold ashmem_mutex.
 130 */
 131static int range_alloc(struct ashmem_area *asma,
 132                       struct ashmem_range *prev_range, unsigned int purged,
 133                       size_t start, size_t end)
 134{
 135        struct ashmem_range *range;
 136
 137        range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
 138        if (unlikely(!range))
 139                return -ENOMEM;
 140
 141        range->asma = asma;
 142        range->pgstart = start;
 143        range->pgend = end;
 144        range->purged = purged;
 145
 146        list_add_tail(&range->unpinned, &prev_range->unpinned);
 147
 148        if (range_on_lru(range))
 149                lru_add(range);
 150
 151        return 0;
 152}
 153
 154static void range_del(struct ashmem_range *range)
 155{
 156        list_del(&range->unpinned);
 157        if (range_on_lru(range))
 158                lru_del(range);
 159        kmem_cache_free(ashmem_range_cachep, range);
 160}
 161
 162/*
 163 * range_shrink - shrinks a range
 164 *
 165 * Caller must hold ashmem_mutex.
 166 */
 167static inline void range_shrink(struct ashmem_range *range,
 168                                size_t start, size_t end)
 169{
 170        size_t pre = range_size(range);
 171
 172        range->pgstart = start;
 173        range->pgend = end;
 174
 175        if (range_on_lru(range))
 176                lru_count -= pre - range_size(range);
 177}
 178
 179static int ashmem_open(struct inode *inode, struct file *file)
 180{
 181        struct ashmem_area *asma;
 182        int ret;
 183
 184        ret = generic_file_open(inode, file);
 185        if (unlikely(ret))
 186                return ret;
 187
 188        asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
 189        if (unlikely(!asma))
 190                return -ENOMEM;
 191
 192        INIT_LIST_HEAD(&asma->unpinned_list);
 193        memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
 194        asma->prot_mask = PROT_MASK;
 195        file->private_data = asma;
 196
 197        return 0;
 198}
 199
 200static int ashmem_release(struct inode *ignored, struct file *file)
 201{
 202        struct ashmem_area *asma = file->private_data;
 203        struct ashmem_range *range, *next;
 204
 205        mutex_lock(&ashmem_mutex);
 206        list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
 207                range_del(range);
 208        mutex_unlock(&ashmem_mutex);
 209
 210        if (asma->file)
 211                fput(asma->file);
 212        kmem_cache_free(ashmem_area_cachep, asma);
 213
 214        return 0;
 215}
 216
 217static ssize_t ashmem_read(struct file *file, char __user *buf,
 218                           size_t len, loff_t *pos)
 219{
 220        struct ashmem_area *asma = file->private_data;
 221        int ret = 0;
 222
 223        mutex_lock(&ashmem_mutex);
 224
 225        /* If size is not set, or set to 0, always return EOF. */
 226        if (asma->size == 0)
 227                goto out;
 228
 229        if (!asma->file) {
 230                ret = -EBADF;
 231                goto out;
 232        }
 233
 234        ret = asma->file->f_op->read(asma->file, buf, len, pos);
 235        if (ret < 0)
 236                goto out;
 237
 238        /** Update backing file pos, since f_ops->read() doesn't */
 239        asma->file->f_pos = *pos;
 240
 241out:
 242        mutex_unlock(&ashmem_mutex);
 243        return ret;
 244}
 245
 246static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
 247{
 248        struct ashmem_area *asma = file->private_data;
 249        int ret;
 250
 251        mutex_lock(&ashmem_mutex);
 252
 253        if (asma->size == 0) {
 254                ret = -EINVAL;
 255                goto out;
 256        }
 257
 258        if (!asma->file) {
 259                ret = -EBADF;
 260                goto out;
 261        }
 262
 263        ret = asma->file->f_op->llseek(asma->file, offset, origin);
 264        if (ret < 0)
 265                goto out;
 266
 267        /** Copy f_pos from backing file, since f_ops->llseek() sets it */
 268        file->f_pos = asma->file->f_pos;
 269
 270out:
 271        mutex_unlock(&ashmem_mutex);
 272        return ret;
 273}
 274
 275static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
 276{
 277        return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) |
 278               _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
 279               _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC);
 280}
 281
 282static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 283{
 284        struct ashmem_area *asma = file->private_data;
 285        int ret = 0;
 286
 287        mutex_lock(&ashmem_mutex);
 288
 289        /* user needs to SET_SIZE before mapping */
 290        if (unlikely(!asma->size)) {
 291                ret = -EINVAL;
 292                goto out;
 293        }
 294
 295        /* requested protection bits must match our allowed protection mask */
 296        if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
 297                     calc_vm_prot_bits(PROT_MASK))) {
 298                ret = -EPERM;
 299                goto out;
 300        }
 301        vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
 302
 303        if (!asma->file) {
 304                char *name = ASHMEM_NAME_DEF;
 305                struct file *vmfile;
 306
 307                if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
 308                        name = asma->name;
 309
 310                /* ... and allocate the backing shmem file */
 311                vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
 312                if (unlikely(IS_ERR(vmfile))) {
 313                        ret = PTR_ERR(vmfile);
 314                        goto out;
 315                }
 316                asma->file = vmfile;
 317        }
 318        get_file(asma->file);
 319
 320        /*
 321         * XXX - Reworked to use shmem_zero_setup() instead of
 322         * shmem_set_file while we're in staging. -jstultz
 323         */
 324        if (vma->vm_flags & VM_SHARED) {
 325                ret = shmem_zero_setup(vma);
 326                if (ret) {
 327                        fput(asma->file);
 328                        goto out;
 329                }
 330        }
 331
 332        if (vma->vm_file)
 333                fput(vma->vm_file);
 334        vma->vm_file = asma->file;
 335
 336out:
 337        mutex_unlock(&ashmem_mutex);
 338        return ret;
 339}
 340
 341/*
 342 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
 343 *
 344 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
 345 * many objects (pages) we have in total.
 346 *
 347 * 'gfp_mask' is the mask of the allocation that got us into this mess.
 348 *
 349 * Return value is the number of objects (pages) remaining, or -1 if we cannot
 350 * proceed without risk of deadlock (due to gfp_mask).
 351 *
 352 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
 353 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
 354 * pages freed.
 355 */
 356static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
 357{
 358        struct ashmem_range *range, *next;
 359
 360        /* We might recurse into filesystem code, so bail out if necessary */
 361        if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
 362                return -1;
 363        if (!sc->nr_to_scan)
 364                return lru_count;
 365
 366        mutex_lock(&ashmem_mutex);
 367        list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
 368                loff_t start = range->pgstart * PAGE_SIZE;
 369                loff_t end = (range->pgend + 1) * PAGE_SIZE;
 370
 371                do_fallocate(range->asma->file,
 372                                FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
 373                                start, end - start);
 374                range->purged = ASHMEM_WAS_PURGED;
 375                lru_del(range);
 376
 377                sc->nr_to_scan -= range_size(range);
 378                if (sc->nr_to_scan <= 0)
 379                        break;
 380        }
 381        mutex_unlock(&ashmem_mutex);
 382
 383        return lru_count;
 384}
 385
 386static struct shrinker ashmem_shrinker = {
 387        .shrink = ashmem_shrink,
 388        .seeks = DEFAULT_SEEKS * 4,
 389};
 390
 391static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
 392{
 393        int ret = 0;
 394
 395        mutex_lock(&ashmem_mutex);
 396
 397        /* the user can only remove, not add, protection bits */
 398        if (unlikely((asma->prot_mask & prot) != prot)) {
 399                ret = -EINVAL;
 400                goto out;
 401        }
 402
 403        /* does the application expect PROT_READ to imply PROT_EXEC? */
 404        if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
 405                prot |= PROT_EXEC;
 406
 407        asma->prot_mask = prot;
 408
 409out:
 410        mutex_unlock(&ashmem_mutex);
 411        return ret;
 412}
 413
 414static int set_name(struct ashmem_area *asma, void __user *name)
 415{
 416        int ret = 0;
 417        char local_name[ASHMEM_NAME_LEN];
 418
 419        /*
 420         * Holding the ashmem_mutex while doing a copy_from_user might cause
 421         * an data abort which would try to access mmap_sem. If another
 422         * thread has invoked ashmem_mmap then it will be holding the
 423         * semaphore and will be waiting for ashmem_mutex, there by leading to
 424         * deadlock. We'll release the mutex  and take the name to a local
 425         * variable that does not need protection and later copy the local
 426         * variable to the structure member with lock held.
 427         */
 428        if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
 429                return -EFAULT;
 430
 431        mutex_lock(&ashmem_mutex);
 432        /* cannot change an existing mapping's name */
 433        if (unlikely(asma->file)) {
 434                ret = -EINVAL;
 435                goto out;
 436        }
 437        memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
 438                local_name, ASHMEM_NAME_LEN);
 439        asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
 440out:
 441        mutex_unlock(&ashmem_mutex);
 442
 443        return ret;
 444}
 445
 446static int get_name(struct ashmem_area *asma, void __user *name)
 447{
 448        int ret = 0;
 449        size_t len;
 450        /*
 451         * Have a local variable to which we'll copy the content
 452         * from asma with the lock held. Later we can copy this to the user
 453         * space safely without holding any locks. So even if we proceed to
 454         * wait for mmap_sem, it won't lead to deadlock.
 455         */
 456        char local_name[ASHMEM_NAME_LEN];
 457
 458        mutex_lock(&ashmem_mutex);
 459        if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
 460
 461                /*
 462                 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
 463                 * prevents us from revealing one user's stack to another.
 464                 */
 465                len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
 466                memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
 467        } else {
 468                len = sizeof(ASHMEM_NAME_DEF);
 469                memcpy(local_name, ASHMEM_NAME_DEF, len);
 470        }
 471        mutex_unlock(&ashmem_mutex);
 472
 473        /*
 474         * Now we are just copying from the stack variable to userland
 475         * No lock held
 476         */
 477        if (unlikely(copy_to_user(name, local_name, len)))
 478                ret = -EFAULT;
 479        return ret;
 480}
 481
 482/*
 483 * ashmem_pin - pin the given ashmem region, returning whether it was
 484 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
 485 *
 486 * Caller must hold ashmem_mutex.
 487 */
 488static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
 489{
 490        struct ashmem_range *range, *next;
 491        int ret = ASHMEM_NOT_PURGED;
 492
 493        list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
 494                /* moved past last applicable page; we can short circuit */
 495                if (range_before_page(range, pgstart))
 496                        break;
 497
 498                /*
 499                 * The user can ask us to pin pages that span multiple ranges,
 500                 * or to pin pages that aren't even unpinned, so this is messy.
 501                 *
 502                 * Four cases:
 503                 * 1. The requested range subsumes an existing range, so we
 504                 *    just remove the entire matching range.
 505                 * 2. The requested range overlaps the start of an existing
 506                 *    range, so we just update that range.
 507                 * 3. The requested range overlaps the end of an existing
 508                 *    range, so we just update that range.
 509                 * 4. The requested range punches a hole in an existing range,
 510                 *    so we have to update one side of the range and then
 511                 *    create a new range for the other side.
 512                 */
 513                if (page_range_in_range(range, pgstart, pgend)) {
 514                        ret |= range->purged;
 515
 516                        /* Case #1: Easy. Just nuke the whole thing. */
 517                        if (page_range_subsumes_range(range, pgstart, pgend)) {
 518                                range_del(range);
 519                                continue;
 520                        }
 521
 522                        /* Case #2: We overlap from the start, so adjust it */
 523                        if (range->pgstart >= pgstart) {
 524                                range_shrink(range, pgend + 1, range->pgend);
 525                                continue;
 526                        }
 527
 528                        /* Case #3: We overlap from the rear, so adjust it */
 529                        if (range->pgend <= pgend) {
 530                                range_shrink(range, range->pgstart, pgstart-1);
 531                                continue;
 532                        }
 533
 534                        /*
 535                         * Case #4: We eat a chunk out of the middle. A bit
 536                         * more complicated, we allocate a new range for the
 537                         * second half and adjust the first chunk's endpoint.
 538                         */
 539                        range_alloc(asma, range, range->purged,
 540                                    pgend + 1, range->pgend);
 541                        range_shrink(range, range->pgstart, pgstart - 1);
 542                        break;
 543                }
 544        }
 545
 546        return ret;
 547}
 548
 549/*
 550 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
 551 *
 552 * Caller must hold ashmem_mutex.
 553 */
 554static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
 555{
 556        struct ashmem_range *range, *next;
 557        unsigned int purged = ASHMEM_NOT_PURGED;
 558
 559restart:
 560        list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
 561                /* short circuit: this is our insertion point */
 562                if (range_before_page(range, pgstart))
 563                        break;
 564
 565                /*
 566                 * The user can ask us to unpin pages that are already entirely
 567                 * or partially pinned. We handle those two cases here.
 568                 */
 569                if (page_range_subsumed_by_range(range, pgstart, pgend))
 570                        return 0;
 571                if (page_range_in_range(range, pgstart, pgend)) {
 572                        pgstart = min_t(size_t, range->pgstart, pgstart),
 573                        pgend = max_t(size_t, range->pgend, pgend);
 574                        purged |= range->purged;
 575                        range_del(range);
 576                        goto restart;
 577                }
 578        }
 579
 580        return range_alloc(asma, range, purged, pgstart, pgend);
 581}
 582
 583/*
 584 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
 585 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
 586 *
 587 * Caller must hold ashmem_mutex.
 588 */
 589static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
 590                                 size_t pgend)
 591{
 592        struct ashmem_range *range;
 593        int ret = ASHMEM_IS_PINNED;
 594
 595        list_for_each_entry(range, &asma->unpinned_list, unpinned) {
 596                if (range_before_page(range, pgstart))
 597                        break;
 598                if (page_range_in_range(range, pgstart, pgend)) {
 599                        ret = ASHMEM_IS_UNPINNED;
 600                        break;
 601                }
 602        }
 603
 604        return ret;
 605}
 606
 607static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 608                            void __user *p)
 609{
 610        struct ashmem_pin pin;
 611        size_t pgstart, pgend;
 612        int ret = -EINVAL;
 613
 614        if (unlikely(!asma->file))
 615                return -EINVAL;
 616
 617        if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
 618                return -EFAULT;
 619
 620        /* per custom, you can pass zero for len to mean "everything onward" */
 621        if (!pin.len)
 622                pin.len = PAGE_ALIGN(asma->size) - pin.offset;
 623
 624        if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
 625                return -EINVAL;
 626
 627        if (unlikely(((__u32) -1) - pin.offset < pin.len))
 628                return -EINVAL;
 629
 630        if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
 631                return -EINVAL;
 632
 633        pgstart = pin.offset / PAGE_SIZE;
 634        pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
 635
 636        mutex_lock(&ashmem_mutex);
 637
 638        switch (cmd) {
 639        case ASHMEM_PIN:
 640                ret = ashmem_pin(asma, pgstart, pgend);
 641                break;
 642        case ASHMEM_UNPIN:
 643                ret = ashmem_unpin(asma, pgstart, pgend);
 644                break;
 645        case ASHMEM_GET_PIN_STATUS:
 646                ret = ashmem_get_pin_status(asma, pgstart, pgend);
 647                break;
 648        }
 649
 650        mutex_unlock(&ashmem_mutex);
 651
 652        return ret;
 653}
 654
 655static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 656{
 657        struct ashmem_area *asma = file->private_data;
 658        long ret = -ENOTTY;
 659
 660        switch (cmd) {
 661        case ASHMEM_SET_NAME:
 662                ret = set_name(asma, (void __user *) arg);
 663                break;
 664        case ASHMEM_GET_NAME:
 665                ret = get_name(asma, (void __user *) arg);
 666                break;
 667        case ASHMEM_SET_SIZE:
 668                ret = -EINVAL;
 669                if (!asma->file) {
 670                        ret = 0;
 671                        asma->size = (size_t) arg;
 672                }
 673                break;
 674        case ASHMEM_GET_SIZE:
 675                ret = asma->size;
 676                break;
 677        case ASHMEM_SET_PROT_MASK:
 678                ret = set_prot_mask(asma, arg);
 679                break;
 680        case ASHMEM_GET_PROT_MASK:
 681                ret = asma->prot_mask;
 682                break;
 683        case ASHMEM_PIN:
 684        case ASHMEM_UNPIN:
 685        case ASHMEM_GET_PIN_STATUS:
 686                ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
 687                break;
 688        case ASHMEM_PURGE_ALL_CACHES:
 689                ret = -EPERM;
 690                if (capable(CAP_SYS_ADMIN)) {
 691                        struct shrink_control sc = {
 692                                .gfp_mask = GFP_KERNEL,
 693                                .nr_to_scan = 0,
 694                        };
 695                        ret = ashmem_shrink(&ashmem_shrinker, &sc);
 696                        sc.nr_to_scan = ret;
 697                        ashmem_shrink(&ashmem_shrinker, &sc);
 698                }
 699                break;
 700        }
 701
 702        return ret;
 703}
 704
 705/* support of 32bit userspace on 64bit platforms */
 706#ifdef CONFIG_COMPAT
 707static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
 708                                unsigned long arg)
 709{
 710
 711        switch (cmd) {
 712        case COMPAT_ASHMEM_SET_SIZE:
 713                cmd = ASHMEM_SET_SIZE;
 714                break;
 715        case COMPAT_ASHMEM_SET_PROT_MASK:
 716                cmd = ASHMEM_SET_PROT_MASK;
 717                break;
 718        }
 719        return ashmem_ioctl(file, cmd, arg);
 720}
 721#endif
 722
 723static const struct file_operations ashmem_fops = {
 724        .owner = THIS_MODULE,
 725        .open = ashmem_open,
 726        .release = ashmem_release,
 727        .read = ashmem_read,
 728        .llseek = ashmem_llseek,
 729        .mmap = ashmem_mmap,
 730        .unlocked_ioctl = ashmem_ioctl,
 731#ifdef CONFIG_COMPAT
 732        .compat_ioctl = compat_ashmem_ioctl,
 733#endif
 734};
 735
 736static struct miscdevice ashmem_misc = {
 737        .minor = MISC_DYNAMIC_MINOR,
 738        .name = "ashmem",
 739        .fops = &ashmem_fops,
 740};
 741
 742static int __init ashmem_init(void)
 743{
 744        int ret;
 745
 746        ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
 747                                          sizeof(struct ashmem_area),
 748                                          0, 0, NULL);
 749        if (unlikely(!ashmem_area_cachep)) {
 750                pr_err("failed to create slab cache\n");
 751                return -ENOMEM;
 752        }
 753
 754        ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
 755                                          sizeof(struct ashmem_range),
 756                                          0, 0, NULL);
 757        if (unlikely(!ashmem_range_cachep)) {
 758                pr_err("failed to create slab cache\n");
 759                return -ENOMEM;
 760        }
 761
 762        ret = misc_register(&ashmem_misc);
 763        if (unlikely(ret)) {
 764                pr_err("failed to register misc device!\n");
 765                return ret;
 766        }
 767
 768        register_shrinker(&ashmem_shrinker);
 769
 770        pr_info("initialized\n");
 771
 772        return 0;
 773}
 774
 775static void __exit ashmem_exit(void)
 776{
 777        int ret;
 778
 779        unregister_shrinker(&ashmem_shrinker);
 780
 781        ret = misc_deregister(&ashmem_misc);
 782        if (unlikely(ret))
 783                pr_err("failed to unregister misc device!\n");
 784
 785        kmem_cache_destroy(ashmem_range_cachep);
 786        kmem_cache_destroy(ashmem_area_cachep);
 787
 788        pr_info("unloaded\n");
 789}
 790
 791module_init(ashmem_init);
 792module_exit(ashmem_exit);
 793
 794MODULE_LICENSE("GPL");
 795