linux/drivers/staging/lustre/lustre/llite/llite_mmap.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.gnu.org/licenses/gpl-2.0.html
  19 *
  20 * GPL HEADER END
  21 */
  22/*
  23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  24 * Use is subject to license terms.
  25 *
  26 * Copyright (c) 2011, 2015, Intel Corporation.
  27 */
  28/*
  29 * This file is part of Lustre, http://www.lustre.org/
  30 * Lustre is a trademark of Sun Microsystems, Inc.
  31 */
  32
  33#include <linux/kernel.h>
  34#include <linux/mm.h>
  35#include <linux/string.h>
  36#include <linux/stat.h>
  37#include <linux/errno.h>
  38#include <linux/unistd.h>
  39#include <linux/uaccess.h>
  40
  41#include <linux/fs.h>
  42#include <linux/pagemap.h>
  43
  44#define DEBUG_SUBSYSTEM S_LLITE
  45
  46#include "llite_internal.h"
  47
  48static const struct vm_operations_struct ll_file_vm_ops;
  49
  50void policy_from_vma(union ldlm_policy_data *policy,
  51                     struct vm_area_struct *vma, unsigned long addr,
  52                     size_t count)
  53{
  54        policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
  55                                 (vma->vm_pgoff << PAGE_SHIFT);
  56        policy->l_extent.end = (policy->l_extent.start + count - 1) |
  57                               ~PAGE_MASK;
  58}
  59
  60struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
  61                               size_t count)
  62{
  63        struct vm_area_struct *vma, *ret = NULL;
  64
  65        /* mmap_sem must have been held by caller. */
  66        LASSERT(!down_write_trylock(&mm->mmap_sem));
  67
  68        for (vma = find_vma(mm, addr);
  69            vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
  70                if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
  71                    vma->vm_flags & VM_SHARED) {
  72                        ret = vma;
  73                        break;
  74                }
  75        }
  76        return ret;
  77}
  78
  79/**
  80 * API independent part for page fault initialization.
  81 * \param vma - virtual memory area addressed to page fault
  82 * \param env - corespondent lu_env to processing
  83 * \param index - page index corespondent to fault.
  84 * \parm ra_flags - vma readahead flags.
  85 *
  86 * \return error codes from cl_io_init.
  87 */
  88static struct cl_io *
  89ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
  90                 pgoff_t index, unsigned long *ra_flags)
  91{
  92        struct file            *file = vma->vm_file;
  93        struct inode           *inode = file_inode(file);
  94        struct cl_io           *io;
  95        struct cl_fault_io     *fio;
  96        int                     rc;
  97
  98        if (ll_file_nolock(file))
  99                return ERR_PTR(-EOPNOTSUPP);
 100
 101restart:
 102        io = vvp_env_thread_io(env);
 103        io->ci_obj = ll_i2info(inode)->lli_clob;
 104        LASSERT(io->ci_obj);
 105
 106        fio = &io->u.ci_fault;
 107        fio->ft_index      = index;
 108        fio->ft_executable = vma->vm_flags & VM_EXEC;
 109
 110        /*
 111         * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
 112         * the kernel will not read other pages not covered by ldlm in
 113         * filemap_nopage. we do our readahead in ll_readpage.
 114         */
 115        if (ra_flags)
 116                *ra_flags = vma->vm_flags & (VM_RAND_READ | VM_SEQ_READ);
 117        vma->vm_flags &= ~VM_SEQ_READ;
 118        vma->vm_flags |= VM_RAND_READ;
 119
 120        CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
 121               fio->ft_index, fio->ft_executable);
 122
 123        rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
 124        if (rc == 0) {
 125                struct vvp_io *vio = vvp_env_io(env);
 126                struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
 127
 128                LASSERT(vio->vui_cl.cis_io == io);
 129
 130                /* mmap lock must be MANDATORY it has to cache pages. */
 131                io->ci_lockreq = CILR_MANDATORY;
 132                vio->vui_fd = fd;
 133        } else {
 134                LASSERT(rc < 0);
 135                cl_io_fini(env, io);
 136                if (io->ci_need_restart)
 137                        goto restart;
 138
 139                io = ERR_PTR(rc);
 140        }
 141
 142        return io;
 143}
 144
 145/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
 146static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
 147                            bool *retry)
 148{
 149        struct lu_env      *env;
 150        struct cl_io        *io;
 151        struct vvp_io      *vio;
 152        int                   result;
 153        u16 refcheck;
 154        sigset_t             set;
 155        struct inode         *inode;
 156        struct ll_inode_info     *lli;
 157
 158        env = cl_env_get(&refcheck);
 159        if (IS_ERR(env))
 160                return PTR_ERR(env);
 161
 162        io = ll_fault_io_init(env, vma, vmpage->index, NULL);
 163        if (IS_ERR(io)) {
 164                result = PTR_ERR(io);
 165                goto out;
 166        }
 167
 168        result = io->ci_result;
 169        if (result < 0)
 170                goto out_io;
 171
 172        io->u.ci_fault.ft_mkwrite = 1;
 173        io->u.ci_fault.ft_writable = 1;
 174
 175        vio = vvp_env_io(env);
 176        vio->u.fault.ft_vma    = vma;
 177        vio->u.fault.ft_vmpage = vmpage;
 178
 179        set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
 180
 181        inode = vvp_object_inode(io->ci_obj);
 182        lli = ll_i2info(inode);
 183
 184        result = cl_io_loop(env, io);
 185
 186        cfs_restore_sigs(set);
 187
 188        if (result == 0) {
 189                struct inode *inode = file_inode(vma->vm_file);
 190                struct ll_inode_info *lli = ll_i2info(inode);
 191
 192                lock_page(vmpage);
 193                if (!vmpage->mapping) {
 194                        unlock_page(vmpage);
 195
 196                        /* page was truncated and lock was cancelled, return
 197                         * ENODATA so that VM_FAULT_NOPAGE will be returned
 198                         * to handle_mm_fault().
 199                         */
 200                        if (result == 0)
 201                                result = -ENODATA;
 202                } else if (!PageDirty(vmpage)) {
 203                        /* race, the page has been cleaned by ptlrpcd after
 204                         * it was unlocked, it has to be added into dirty
 205                         * cache again otherwise this soon-to-dirty page won't
 206                         * consume any grants, even worse if this page is being
 207                         * transferred because it will break RPC checksum.
 208                         */
 209                        unlock_page(vmpage);
 210
 211                        CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
 212                               vmpage, vmpage->index);
 213
 214                        *retry = true;
 215                        result = -EAGAIN;
 216                }
 217
 218                if (!result)
 219                        set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
 220        }
 221
 222out_io:
 223        cl_io_fini(env, io);
 224out:
 225        cl_env_put(env, &refcheck);
 226        CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
 227        LASSERT(ergo(result == 0, PageLocked(vmpage)));
 228
 229        return result;
 230}
 231
 232static inline int to_fault_error(int result)
 233{
 234        switch (result) {
 235        case 0:
 236                result = VM_FAULT_LOCKED;
 237                break;
 238        case -EFAULT:
 239                result = VM_FAULT_NOPAGE;
 240                break;
 241        case -ENOMEM:
 242                result = VM_FAULT_OOM;
 243                break;
 244        default:
 245                result = VM_FAULT_SIGBUS;
 246                break;
 247        }
 248        return result;
 249}
 250
 251/**
 252 * Lustre implementation of a vm_operations_struct::fault() method, called by
 253 * VM to server page fault (both in kernel and user space).
 254 *
 255 * \param vma - is virtual area struct related to page fault
 256 * \param vmf - structure which describe type and address where hit fault
 257 *
 258 * \return allocated and filled _locked_ page for address
 259 * \retval VM_FAULT_ERROR on general error
 260 * \retval NOPAGE_OOM not have memory for allocate new page
 261 */
 262static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
 263{
 264        struct lu_env      *env;
 265        struct cl_io        *io;
 266        struct vvp_io      *vio = NULL;
 267        struct page          *vmpage;
 268        unsigned long       ra_flags;
 269        int                   result = 0;
 270        int                   fault_ret = 0;
 271        u16 refcheck;
 272
 273        env = cl_env_get(&refcheck);
 274        if (IS_ERR(env))
 275                return PTR_ERR(env);
 276
 277        io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
 278        if (IS_ERR(io)) {
 279                result = to_fault_error(PTR_ERR(io));
 280                goto out;
 281        }
 282
 283        result = io->ci_result;
 284        if (result == 0) {
 285                vio = vvp_env_io(env);
 286                vio->u.fault.ft_vma       = vma;
 287                vio->u.fault.ft_vmpage    = NULL;
 288                vio->u.fault.ft_vmf = vmf;
 289                vio->u.fault.ft_flags = 0;
 290                vio->u.fault.ft_flags_valid = false;
 291
 292                /* May call ll_readpage() */
 293                ll_cl_add(vma->vm_file, env, io);
 294
 295                result = cl_io_loop(env, io);
 296
 297                ll_cl_remove(vma->vm_file, env);
 298
 299                /* ft_flags are only valid if we reached
 300                 * the call to filemap_fault
 301                 */
 302                if (vio->u.fault.ft_flags_valid)
 303                        fault_ret = vio->u.fault.ft_flags;
 304
 305                vmpage = vio->u.fault.ft_vmpage;
 306                if (result != 0 && vmpage) {
 307                        put_page(vmpage);
 308                        vmf->page = NULL;
 309                }
 310        }
 311        cl_io_fini(env, io);
 312
 313        vma->vm_flags |= ra_flags;
 314
 315out:
 316        cl_env_put(env, &refcheck);
 317        if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
 318                fault_ret |= to_fault_error(result);
 319
 320        CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
 321        return fault_ret;
 322}
 323
 324static int ll_fault(struct vm_fault *vmf)
 325{
 326        int count = 0;
 327        bool printed = false;
 328        int result;
 329        sigset_t set;
 330
 331        /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite
 332         * so that it can be killed by admin but not cause segfault by
 333         * other signals.
 334         */
 335        set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
 336
 337restart:
 338        result = ll_fault0(vmf->vma, vmf);
 339        LASSERT(!(result & VM_FAULT_LOCKED));
 340        if (result == 0) {
 341                struct page *vmpage = vmf->page;
 342
 343                /* check if this page has been truncated */
 344                lock_page(vmpage);
 345                if (unlikely(!vmpage->mapping)) { /* unlucky */
 346                        unlock_page(vmpage);
 347                        put_page(vmpage);
 348                        vmf->page = NULL;
 349
 350                        if (!printed && ++count > 16) {
 351                                CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
 352                                      current->comm);
 353                                printed = true;
 354                        }
 355
 356                        goto restart;
 357                }
 358
 359                result = VM_FAULT_LOCKED;
 360        }
 361        cfs_restore_sigs(set);
 362        return result;
 363}
 364
 365static int ll_page_mkwrite(struct vm_fault *vmf)
 366{
 367        struct vm_area_struct *vma = vmf->vma;
 368        int count = 0;
 369        bool printed = false;
 370        bool retry;
 371        int result;
 372
 373        file_update_time(vma->vm_file);
 374        do {
 375                retry = false;
 376                result = ll_page_mkwrite0(vma, vmf->page, &retry);
 377
 378                if (!printed && ++count > 16) {
 379                        const struct dentry *de = vma->vm_file->f_path.dentry;
 380
 381                        CWARN("app(%s): the page %lu of file " DFID " is under heavy contention\n",
 382                              current->comm, vmf->pgoff,
 383                              PFID(ll_inode2fid(de->d_inode)));
 384                        printed = true;
 385                }
 386        } while (retry);
 387
 388        switch (result) {
 389        case 0:
 390                LASSERT(PageLocked(vmf->page));
 391                result = VM_FAULT_LOCKED;
 392                break;
 393        case -ENODATA:
 394        case -EAGAIN:
 395        case -EFAULT:
 396                result = VM_FAULT_NOPAGE;
 397                break;
 398        case -ENOMEM:
 399                result = VM_FAULT_OOM;
 400                break;
 401        default:
 402                result = VM_FAULT_SIGBUS;
 403                break;
 404        }
 405
 406        return result;
 407}
 408
 409/**
 410 *  To avoid cancel the locks covering mmapped region for lock cache pressure,
 411 *  we track the mapped vma count in vvp_object::vob_mmap_cnt.
 412 */
 413static void ll_vm_open(struct vm_area_struct *vma)
 414{
 415        struct inode *inode    = file_inode(vma->vm_file);
 416        struct vvp_object *vob = cl_inode2vvp(inode);
 417
 418        LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
 419        atomic_inc(&vob->vob_mmap_cnt);
 420}
 421
 422/**
 423 * Dual to ll_vm_open().
 424 */
 425static void ll_vm_close(struct vm_area_struct *vma)
 426{
 427        struct inode      *inode = file_inode(vma->vm_file);
 428        struct vvp_object *vob   = cl_inode2vvp(inode);
 429
 430        atomic_dec(&vob->vob_mmap_cnt);
 431        LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
 432}
 433
 434/* XXX put nice comment here.  talk about __free_pte -> dirty pages and
 435 * nopage's reference passing to the pte
 436 */
 437int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
 438{
 439        int rc = -ENOENT;
 440
 441        LASSERTF(last > first, "last %llu first %llu\n", last, first);
 442        if (mapping_mapped(mapping)) {
 443                rc = 0;
 444                unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
 445                                    last - first + 1, 0);
 446        }
 447
 448        return rc;
 449}
 450
 451static const struct vm_operations_struct ll_file_vm_ops = {
 452        .fault                  = ll_fault,
 453        .page_mkwrite           = ll_page_mkwrite,
 454        .open                   = ll_vm_open,
 455        .close                  = ll_vm_close,
 456};
 457
 458int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
 459{
 460        struct inode *inode = file_inode(file);
 461        int rc;
 462
 463        if (ll_file_nolock(file))
 464                return -EOPNOTSUPP;
 465
 466        ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
 467        rc = generic_file_mmap(file, vma);
 468        if (rc == 0) {
 469                vma->vm_ops = &ll_file_vm_ops;
 470                vma->vm_ops->open(vma);
 471                /* update the inode's size and mtime */
 472                rc = ll_glimpse_size(inode);
 473        }
 474
 475        return rc;
 476}
 477