linux/drivers/staging/lustre/lustre/llite/llite_mmap.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.gnu.org/licenses/gpl-2.0.html
  19 *
  20 * GPL HEADER END
  21 */
  22/*
  23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  24 * Use is subject to license terms.
  25 *
  26 * Copyright (c) 2011, 2015, Intel Corporation.
  27 */
  28/*
  29 * This file is part of Lustre, http://www.lustre.org/
  30 * Lustre is a trademark of Sun Microsystems, Inc.
  31 */
  32
  33#include <linux/kernel.h>
  34#include <linux/mm.h>
  35#include <linux/string.h>
  36#include <linux/stat.h>
  37#include <linux/errno.h>
  38#include <linux/unistd.h>
  39#include <linux/uaccess.h>
  40
  41#include <linux/fs.h>
  42#include <linux/pagemap.h>
  43
  44#define DEBUG_SUBSYSTEM S_LLITE
  45
  46#include "llite_internal.h"
  47
  48static const struct vm_operations_struct ll_file_vm_ops;
  49
  50void policy_from_vma(ldlm_policy_data_t *policy,
  51                     struct vm_area_struct *vma, unsigned long addr,
  52                     size_t count)
  53{
  54        policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
  55                                 (vma->vm_pgoff << PAGE_SHIFT);
  56        policy->l_extent.end = (policy->l_extent.start + count - 1) |
  57                               ~PAGE_MASK;
  58}
  59
  60struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
  61                               size_t count)
  62{
  63        struct vm_area_struct *vma, *ret = NULL;
  64
  65        /* mmap_sem must have been held by caller. */
  66        LASSERT(!down_write_trylock(&mm->mmap_sem));
  67
  68        for (vma = find_vma(mm, addr);
  69            vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
  70                if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
  71                    vma->vm_flags & VM_SHARED) {
  72                        ret = vma;
  73                        break;
  74                }
  75        }
  76        return ret;
  77}
  78
  79/**
  80 * API independent part for page fault initialization.
  81 * \param vma - virtual memory area addressed to page fault
  82 * \param env - corespondent lu_env to processing
  83 * \param nest - nested level
  84 * \param index - page index corespondent to fault.
  85 * \parm ra_flags - vma readahead flags.
  86 *
  87 * \return allocated and initialized env for fault operation.
  88 * \retval EINVAL if env can't allocated
  89 * \return other error codes from cl_io_init.
  90 */
  91static struct cl_io *
  92ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
  93                 struct cl_env_nest *nest, pgoff_t index,
  94                 unsigned long *ra_flags)
  95{
  96        struct file            *file = vma->vm_file;
  97        struct inode           *inode = file_inode(file);
  98        struct cl_io           *io;
  99        struct cl_fault_io     *fio;
 100        struct lu_env          *env;
 101        int                     rc;
 102
 103        *env_ret = NULL;
 104        if (ll_file_nolock(file))
 105                return ERR_PTR(-EOPNOTSUPP);
 106
 107        /*
 108         * page fault can be called when lustre IO is
 109         * already active for the current thread, e.g., when doing read/write
 110         * against user level buffer mapped from Lustre buffer. To avoid
 111         * stomping on existing context, optionally force an allocation of a new
 112         * one.
 113         */
 114        env = cl_env_nested_get(nest);
 115        if (IS_ERR(env))
 116                return ERR_PTR(-EINVAL);
 117
 118        *env_ret = env;
 119
 120restart:
 121        io = vvp_env_thread_io(env);
 122        io->ci_obj = ll_i2info(inode)->lli_clob;
 123        LASSERT(io->ci_obj);
 124
 125        fio = &io->u.ci_fault;
 126        fio->ft_index      = index;
 127        fio->ft_executable = vma->vm_flags & VM_EXEC;
 128
 129        /*
 130         * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
 131         * the kernel will not read other pages not covered by ldlm in
 132         * filemap_nopage. we do our readahead in ll_readpage.
 133         */
 134        if (ra_flags)
 135                *ra_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ);
 136        vma->vm_flags &= ~VM_SEQ_READ;
 137        vma->vm_flags |= VM_RAND_READ;
 138
 139        CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
 140               fio->ft_index, fio->ft_executable);
 141
 142        rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
 143        if (rc == 0) {
 144                struct vvp_io *vio = vvp_env_io(env);
 145                struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
 146
 147                LASSERT(vio->vui_cl.cis_io == io);
 148
 149                /* mmap lock must be MANDATORY it has to cache pages. */
 150                io->ci_lockreq = CILR_MANDATORY;
 151                vio->vui_fd = fd;
 152        } else {
 153                LASSERT(rc < 0);
 154                cl_io_fini(env, io);
 155                if (io->ci_need_restart)
 156                        goto restart;
 157
 158                cl_env_nested_put(nest, env);
 159                io = ERR_PTR(rc);
 160        }
 161
 162        return io;
 163}
 164
 165/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
 166static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
 167                            bool *retry)
 168{
 169        struct lu_env      *env;
 170        struct cl_io        *io;
 171        struct vvp_io      *vio;
 172        struct cl_env_nest       nest;
 173        int                   result;
 174        sigset_t             set;
 175        struct inode         *inode;
 176        struct ll_inode_info     *lli;
 177
 178        io = ll_fault_io_init(vma, &env,  &nest, vmpage->index, NULL);
 179        if (IS_ERR(io)) {
 180                result = PTR_ERR(io);
 181                goto out;
 182        }
 183
 184        result = io->ci_result;
 185        if (result < 0)
 186                goto out_io;
 187
 188        io->u.ci_fault.ft_mkwrite = 1;
 189        io->u.ci_fault.ft_writable = 1;
 190
 191        vio = vvp_env_io(env);
 192        vio->u.fault.ft_vma    = vma;
 193        vio->u.fault.ft_vmpage = vmpage;
 194
 195        set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
 196
 197        inode = vvp_object_inode(io->ci_obj);
 198        lli = ll_i2info(inode);
 199
 200        result = cl_io_loop(env, io);
 201
 202        cfs_restore_sigs(set);
 203
 204        if (result == 0) {
 205                struct inode *inode = file_inode(vma->vm_file);
 206                struct ll_inode_info *lli = ll_i2info(inode);
 207
 208                lock_page(vmpage);
 209                if (!vmpage->mapping) {
 210                        unlock_page(vmpage);
 211
 212                        /* page was truncated and lock was cancelled, return
 213                         * ENODATA so that VM_FAULT_NOPAGE will be returned
 214                         * to handle_mm_fault().
 215                         */
 216                        if (result == 0)
 217                                result = -ENODATA;
 218                } else if (!PageDirty(vmpage)) {
 219                        /* race, the page has been cleaned by ptlrpcd after
 220                         * it was unlocked, it has to be added into dirty
 221                         * cache again otherwise this soon-to-dirty page won't
 222                         * consume any grants, even worse if this page is being
 223                         * transferred because it will break RPC checksum.
 224                         */
 225                        unlock_page(vmpage);
 226
 227                        CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
 228                               vmpage, vmpage->index);
 229
 230                        *retry = true;
 231                        result = -EAGAIN;
 232                }
 233
 234                if (result == 0) {
 235                        spin_lock(&lli->lli_lock);
 236                        lli->lli_flags |= LLIF_DATA_MODIFIED;
 237                        spin_unlock(&lli->lli_lock);
 238                }
 239        }
 240
 241out_io:
 242        cl_io_fini(env, io);
 243        cl_env_nested_put(&nest, env);
 244out:
 245        CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
 246        LASSERT(ergo(result == 0, PageLocked(vmpage)));
 247
 248        return result;
 249}
 250
 251static inline int to_fault_error(int result)
 252{
 253        switch (result) {
 254        case 0:
 255                result = VM_FAULT_LOCKED;
 256                break;
 257        case -EFAULT:
 258                result = VM_FAULT_NOPAGE;
 259                break;
 260        case -ENOMEM:
 261                result = VM_FAULT_OOM;
 262                break;
 263        default:
 264                result = VM_FAULT_SIGBUS;
 265                break;
 266        }
 267        return result;
 268}
 269
 270/**
 271 * Lustre implementation of a vm_operations_struct::fault() method, called by
 272 * VM to server page fault (both in kernel and user space).
 273 *
 274 * \param vma - is virtual area struct related to page fault
 275 * \param vmf - structure which describe type and address where hit fault
 276 *
 277 * \return allocated and filled _locked_ page for address
 278 * \retval VM_FAULT_ERROR on general error
 279 * \retval NOPAGE_OOM not have memory for allocate new page
 280 */
 281static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
 282{
 283        struct lu_env      *env;
 284        struct cl_io        *io;
 285        struct vvp_io      *vio = NULL;
 286        struct page          *vmpage;
 287        unsigned long       ra_flags;
 288        struct cl_env_nest       nest;
 289        int                   result;
 290        int                   fault_ret = 0;
 291
 292        io = ll_fault_io_init(vma, &env,  &nest, vmf->pgoff, &ra_flags);
 293        if (IS_ERR(io))
 294                return to_fault_error(PTR_ERR(io));
 295
 296        result = io->ci_result;
 297        if (result == 0) {
 298                vio = vvp_env_io(env);
 299                vio->u.fault.ft_vma       = vma;
 300                vio->u.fault.ft_vmpage    = NULL;
 301                vio->u.fault.ft_vmf = vmf;
 302                vio->u.fault.ft_flags = 0;
 303                vio->u.fault.ft_flags_valid = false;
 304
 305                /* May call ll_readpage() */
 306                ll_cl_add(vma->vm_file, env, io);
 307
 308                result = cl_io_loop(env, io);
 309
 310                ll_cl_remove(vma->vm_file, env);
 311
 312                /* ft_flags are only valid if we reached
 313                 * the call to filemap_fault
 314                 */
 315                if (vio->u.fault.ft_flags_valid)
 316                        fault_ret = vio->u.fault.ft_flags;
 317
 318                vmpage = vio->u.fault.ft_vmpage;
 319                if (result != 0 && vmpage) {
 320                        put_page(vmpage);
 321                        vmf->page = NULL;
 322                }
 323        }
 324        cl_io_fini(env, io);
 325        cl_env_nested_put(&nest, env);
 326
 327        vma->vm_flags |= ra_flags;
 328        if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
 329                fault_ret |= to_fault_error(result);
 330
 331        CDEBUG(D_MMAP, "%s fault %d/%d\n",
 332               current->comm, fault_ret, result);
 333        return fault_ret;
 334}
 335
 336static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 337{
 338        int count = 0;
 339        bool printed = false;
 340        int result;
 341        sigset_t set;
 342
 343        /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite
 344         * so that it can be killed by admin but not cause segfault by
 345         * other signals.
 346         */
 347        set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
 348
 349restart:
 350        result = ll_fault0(vma, vmf);
 351        LASSERT(!(result & VM_FAULT_LOCKED));
 352        if (result == 0) {
 353                struct page *vmpage = vmf->page;
 354
 355                /* check if this page has been truncated */
 356                lock_page(vmpage);
 357                if (unlikely(!vmpage->mapping)) { /* unlucky */
 358                        unlock_page(vmpage);
 359                        put_page(vmpage);
 360                        vmf->page = NULL;
 361
 362                        if (!printed && ++count > 16) {
 363                                CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
 364                                      current->comm);
 365                                printed = true;
 366                        }
 367
 368                        goto restart;
 369                }
 370
 371                result = VM_FAULT_LOCKED;
 372        }
 373        cfs_restore_sigs(set);
 374        return result;
 375}
 376
 377static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 378{
 379        int count = 0;
 380        bool printed = false;
 381        bool retry;
 382        int result;
 383
 384        do {
 385                retry = false;
 386                result = ll_page_mkwrite0(vma, vmf->page, &retry);
 387
 388                if (!printed && ++count > 16) {
 389                        const struct dentry *de = vma->vm_file->f_path.dentry;
 390
 391                        CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
 392                              current->comm, vmf->pgoff,
 393                              PFID(ll_inode2fid(de->d_inode)));
 394                        printed = true;
 395                }
 396        } while (retry);
 397
 398        switch (result) {
 399        case 0:
 400                LASSERT(PageLocked(vmf->page));
 401                result = VM_FAULT_LOCKED;
 402                break;
 403        case -ENODATA:
 404        case -EFAULT:
 405                result = VM_FAULT_NOPAGE;
 406                break;
 407        case -ENOMEM:
 408                result = VM_FAULT_OOM;
 409                break;
 410        case -EAGAIN:
 411                result = VM_FAULT_RETRY;
 412                break;
 413        default:
 414                result = VM_FAULT_SIGBUS;
 415                break;
 416        }
 417
 418        return result;
 419}
 420
 421/**
 422 *  To avoid cancel the locks covering mmapped region for lock cache pressure,
 423 *  we track the mapped vma count in vvp_object::vob_mmap_cnt.
 424 */
 425static void ll_vm_open(struct vm_area_struct *vma)
 426{
 427        struct inode *inode    = file_inode(vma->vm_file);
 428        struct vvp_object *vob = cl_inode2vvp(inode);
 429
 430        LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
 431        atomic_inc(&vob->vob_mmap_cnt);
 432}
 433
 434/**
 435 * Dual to ll_vm_open().
 436 */
 437static void ll_vm_close(struct vm_area_struct *vma)
 438{
 439        struct inode      *inode = file_inode(vma->vm_file);
 440        struct vvp_object *vob   = cl_inode2vvp(inode);
 441
 442        atomic_dec(&vob->vob_mmap_cnt);
 443        LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
 444}
 445
 446/* XXX put nice comment here.  talk about __free_pte -> dirty pages and
 447 * nopage's reference passing to the pte
 448 */
 449int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
 450{
 451        int rc = -ENOENT;
 452
 453        LASSERTF(last > first, "last %llu first %llu\n", last, first);
 454        if (mapping_mapped(mapping)) {
 455                rc = 0;
 456                unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
 457                                    last - first + 1, 0);
 458        }
 459
 460        return rc;
 461}
 462
 463static const struct vm_operations_struct ll_file_vm_ops = {
 464        .fault                  = ll_fault,
 465        .page_mkwrite           = ll_page_mkwrite,
 466        .open                   = ll_vm_open,
 467        .close                  = ll_vm_close,
 468};
 469
 470int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
 471{
 472        struct inode *inode = file_inode(file);
 473        int rc;
 474
 475        if (ll_file_nolock(file))
 476                return -EOPNOTSUPP;
 477
 478        ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
 479        rc = generic_file_mmap(file, vma);
 480        if (rc == 0) {
 481                vma->vm_ops = &ll_file_vm_ops;
 482                vma->vm_ops->open(vma);
 483                /* update the inode's size and mtime */
 484                rc = ll_glimpse_size(inode);
 485        }
 486
 487        return rc;
 488}
 489