linux/fs/ramfs/file-nommu.c
<<
>>
Prefs
   1/* file-nommu.c: no-MMU version of ramfs
   2 *
   3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/fs.h>
  14#include <linux/mm.h>
  15#include <linux/pagemap.h>
  16#include <linux/highmem.h>
  17#include <linux/init.h>
  18#include <linux/string.h>
  19#include <linux/backing-dev.h>
  20#include <linux/ramfs.h>
  21#include <linux/pagevec.h>
  22#include <linux/mman.h>
  23#include <linux/sched.h>
  24#include <linux/slab.h>
  25
  26#include <asm/uaccess.h>
  27#include "internal.h"
  28
  29static int ramfs_nommu_setattr(struct dentry *, struct iattr *);
  30
  31const struct address_space_operations ramfs_aops = {
  32        .readpage               = simple_readpage,
  33        .write_begin            = simple_write_begin,
  34        .write_end              = simple_write_end,
  35        .set_page_dirty         = __set_page_dirty_no_writeback,
  36};
  37
  38const struct file_operations ramfs_file_operations = {
  39        .mmap                   = ramfs_nommu_mmap,
  40        .get_unmapped_area      = ramfs_nommu_get_unmapped_area,
  41        .read                   = do_sync_read,
  42        .aio_read               = generic_file_aio_read,
  43        .write                  = do_sync_write,
  44        .aio_write              = generic_file_aio_write,
  45        .fsync                  = noop_fsync,
  46        .splice_read            = generic_file_splice_read,
  47        .splice_write           = generic_file_splice_write,
  48        .llseek                 = generic_file_llseek,
  49};
  50
  51const struct inode_operations ramfs_file_inode_operations = {
  52        .setattr                = ramfs_nommu_setattr,
  53        .getattr                = simple_getattr,
  54};
  55
  56/*****************************************************************************/
  57/*
  58 * add a contiguous set of pages into a ramfs inode when it's truncated from
  59 * size 0 on the assumption that it's going to be used for an mmap of shared
  60 * memory
  61 */
  62int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
  63{
  64        unsigned long npages, xpages, loop;
  65        struct page *pages;
  66        unsigned order;
  67        void *data;
  68        int ret;
  69
  70        /* make various checks */
  71        order = get_order(newsize);
  72        if (unlikely(order >= MAX_ORDER))
  73                return -EFBIG;
  74
  75        ret = inode_newsize_ok(inode, newsize);
  76        if (ret)
  77                return ret;
  78
  79        i_size_write(inode, newsize);
  80
  81        /* allocate enough contiguous pages to be able to satisfy the
  82         * request */
  83        pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
  84        if (!pages)
  85                return -ENOMEM;
  86
  87        /* split the high-order page into an array of single pages */
  88        xpages = 1UL << order;
  89        npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
  90
  91        split_page(pages, order);
  92
  93        /* trim off any pages we don't actually require */
  94        for (loop = npages; loop < xpages; loop++)
  95                __free_page(pages + loop);
  96
  97        /* clear the memory we allocated */
  98        newsize = PAGE_SIZE * npages;
  99        data = page_address(pages);
 100        memset(data, 0, newsize);
 101
 102        /* attach all the pages to the inode's address space */
 103        for (loop = 0; loop < npages; loop++) {
 104                struct page *page = pages + loop;
 105
 106                ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
 107                                        GFP_KERNEL);
 108                if (ret < 0)
 109                        goto add_error;
 110
 111                /* prevent the page from being discarded on memory pressure */
 112                SetPageDirty(page);
 113
 114                unlock_page(page);
 115        }
 116
 117        return 0;
 118
 119add_error:
 120        while (loop < npages)
 121                __free_page(pages + loop++);
 122        return ret;
 123}
 124
 125/*****************************************************************************/
 126/*
 127 *
 128 */
 129static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
 130{
 131        int ret;
 132
 133        /* assume a truncate from zero size is going to be for the purposes of
 134         * shared mmap */
 135        if (size == 0) {
 136                if (unlikely(newsize >> 32))
 137                        return -EFBIG;
 138
 139                return ramfs_nommu_expand_for_mapping(inode, newsize);
 140        }
 141
 142        /* check that a decrease in size doesn't cut off any shared mappings */
 143        if (newsize < size) {
 144                ret = nommu_shrink_inode_mappings(inode, size, newsize);
 145                if (ret < 0)
 146                        return ret;
 147        }
 148
 149        truncate_setsize(inode, newsize);
 150        return 0;
 151}
 152
 153/*****************************************************************************/
 154/*
 155 * handle a change of attributes
 156 * - we're specifically interested in a change of size
 157 */
 158static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
 159{
 160        struct inode *inode = dentry->d_inode;
 161        unsigned int old_ia_valid = ia->ia_valid;
 162        int ret = 0;
 163
 164        /* POSIX UID/GID verification for setting inode attributes */
 165        ret = inode_change_ok(inode, ia);
 166        if (ret)
 167                return ret;
 168
 169        /* pick out size-changing events */
 170        if (ia->ia_valid & ATTR_SIZE) {
 171                loff_t size = inode->i_size;
 172
 173                if (ia->ia_size != size) {
 174                        ret = ramfs_nommu_resize(inode, ia->ia_size, size);
 175                        if (ret < 0 || ia->ia_valid == ATTR_SIZE)
 176                                goto out;
 177                } else {
 178                        /* we skipped the truncate but must still update
 179                         * timestamps
 180                         */
 181                        ia->ia_valid |= ATTR_MTIME|ATTR_CTIME;
 182                }
 183        }
 184
 185        setattr_copy(inode, ia);
 186 out:
 187        ia->ia_valid = old_ia_valid;
 188        return ret;
 189}
 190
 191/*****************************************************************************/
 192/*
 193 * try to determine where a shared mapping can be made
 194 * - we require that:
 195 *   - the pages to be mapped must exist
 196 *   - the pages be physically contiguous in sequence
 197 */
 198unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
 199                                            unsigned long addr, unsigned long len,
 200                                            unsigned long pgoff, unsigned long flags)
 201{
 202        unsigned long maxpages, lpages, nr, loop, ret;
 203        struct inode *inode = file->f_path.dentry->d_inode;
 204        struct page **pages = NULL, **ptr, *page;
 205        loff_t isize;
 206
 207        if (!(flags & MAP_SHARED))
 208                return addr;
 209
 210        /* the mapping mustn't extend beyond the EOF */
 211        lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 212        isize = i_size_read(inode);
 213
 214        ret = -EINVAL;
 215        maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
 216        if (pgoff >= maxpages)
 217                goto out;
 218
 219        if (maxpages - pgoff < lpages)
 220                goto out;
 221
 222        /* gang-find the pages */
 223        ret = -ENOMEM;
 224        pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL);
 225        if (!pages)
 226                goto out_free;
 227
 228        nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages);
 229        if (nr != lpages)
 230                goto out_free_pages; /* leave if some pages were missing */
 231
 232        /* check the pages for physical adjacency */
 233        ptr = pages;
 234        page = *ptr++;
 235        page++;
 236        for (loop = lpages; loop > 1; loop--)
 237                if (*ptr++ != page++)
 238                        goto out_free_pages;
 239
 240        /* okay - all conditions fulfilled */
 241        ret = (unsigned long) page_address(pages[0]);
 242
 243out_free_pages:
 244        ptr = pages;
 245        for (loop = nr; loop > 0; loop--)
 246                put_page(*ptr++);
 247out_free:
 248        kfree(pages);
 249out:
 250        return ret;
 251}
 252
 253/*****************************************************************************/
 254/*
 255 * set up a mapping for shared memory segments
 256 */
 257int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
 258{
 259        if (!(vma->vm_flags & VM_SHARED))
 260                return -ENOSYS;
 261
 262        file_accessed(file);
 263        vma->vm_ops = &generic_file_vm_ops;
 264        return 0;
 265}
 266