linux/drivers/staging/lustre/lustre/llite/rw26.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.gnu.org/licenses/gpl-2.0.html
  19 *
  20 * GPL HEADER END
  21 */
  22/*
  23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  24 * Use is subject to license terms.
  25 *
  26 * Copyright (c) 2011, 2012, Intel Corporation.
  27 */
  28/*
  29 * This file is part of Lustre, http://www.lustre.org/
  30 * Lustre is a trademark of Sun Microsystems, Inc.
  31 *
  32 * lustre/lustre/llite/rw26.c
  33 *
  34 * Lustre Lite I/O page cache routines for the 2.5/2.6 kernel version
  35 */
  36
  37#include <linux/kernel.h>
  38#include <linux/mm.h>
  39#include <linux/string.h>
  40#include <linux/stat.h>
  41#include <linux/errno.h>
  42#include <linux/unistd.h>
  43#include <linux/uaccess.h>
  44
  45#include <linux/migrate.h>
  46#include <linux/fs.h>
  47#include <linux/buffer_head.h>
  48#include <linux/mpage.h>
  49#include <linux/writeback.h>
  50#include <linux/pagemap.h>
  51
  52#define DEBUG_SUBSYSTEM S_LLITE
  53
  54#include "llite_internal.h"
  55
  56/**
  57 * Implements Linux VM address_space::invalidatepage() method. This method is
  58 * called when the page is truncate from a file, either as a result of
  59 * explicit truncate, or when inode is removed from memory (as a result of
  60 * final iput(), umount, or memory pressure induced icache shrinking).
  61 *
  62 * [0, offset] bytes of the page remain valid (this is for a case of not-page
  63 * aligned truncate). Lustre leaves partially truncated page in the cache,
  64 * relying on struct inode::i_size to limit further accesses.
  65 */
  66static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
  67                              unsigned int length)
  68{
  69        struct inode     *inode;
  70        struct lu_env    *env;
  71        struct cl_page   *page;
  72        struct cl_object *obj;
  73
  74        int refcheck;
  75
  76        LASSERT(PageLocked(vmpage));
  77        LASSERT(!PageWriteback(vmpage));
  78
  79        /*
  80         * It is safe to not check anything in invalidatepage/releasepage
  81         * below because they are run with page locked and all our io is
  82         * happening with locked page too
  83         */
  84        if (offset == 0 && length == PAGE_SIZE) {
  85                env = cl_env_get(&refcheck);
  86                if (!IS_ERR(env)) {
  87                        inode = vmpage->mapping->host;
  88                        obj = ll_i2info(inode)->lli_clob;
  89                        if (obj) {
  90                                page = cl_vmpage_page(vmpage, obj);
  91                                if (page) {
  92                                        cl_page_delete(env, page);
  93                                        cl_page_put(env, page);
  94                                }
  95                        } else {
  96                                LASSERT(vmpage->private == 0);
  97                        }
  98                        cl_env_put(env, &refcheck);
  99                }
 100        }
 101}
 102
 103static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
 104{
 105        struct lu_env     *env;
 106        void                    *cookie;
 107        struct cl_object  *obj;
 108        struct cl_page    *page;
 109        struct address_space *mapping;
 110        int result = 0;
 111
 112        LASSERT(PageLocked(vmpage));
 113        if (PageWriteback(vmpage) || PageDirty(vmpage))
 114                return 0;
 115
 116        mapping = vmpage->mapping;
 117        if (!mapping)
 118                return 1;
 119
 120        obj = ll_i2info(mapping->host)->lli_clob;
 121        if (!obj)
 122                return 1;
 123
 124        /* 1 for caller, 1 for cl_page and 1 for page cache */
 125        if (page_count(vmpage) > 3)
 126                return 0;
 127
 128        page = cl_vmpage_page(vmpage, obj);
 129        if (!page)
 130                return 1;
 131
 132        cookie = cl_env_reenter();
 133        env = cl_env_percpu_get();
 134        LASSERT(!IS_ERR(env));
 135
 136        if (!cl_page_in_use(page)) {
 137                result = 1;
 138                cl_page_delete(env, page);
 139        }
 140
 141        /* To use percpu env array, the call path can not be rescheduled;
 142         * otherwise percpu array will be messed if ll_releaspage() called
 143         * again on the same CPU.
 144         *
 145         * If this page holds the last refc of cl_object, the following
 146         * call path may cause reschedule:
 147         *   cl_page_put -> cl_page_free -> cl_object_put ->
 148         *     lu_object_put -> lu_object_free -> lov_delete_raid0.
 149         *
 150         * However, the kernel can't get rid of this inode until all pages have
 151         * been cleaned up. Now that we hold page lock here, it's pretty safe
 152         * that we won't get into object delete path.
 153         */
 154        LASSERT(cl_object_refc(obj) > 1);
 155        cl_page_put(env, page);
 156
 157        cl_env_percpu_put(env);
 158        cl_env_reexit(cookie);
 159        return result;
 160}
 161
 162#define MAX_DIRECTIO_SIZE (2 * 1024 * 1024 * 1024UL)
 163
 164static inline int ll_get_user_pages(int rw, unsigned long user_addr,
 165                                    size_t size, struct page ***pages,
 166                                    int *max_pages)
 167{
 168        int result = -ENOMEM;
 169
 170        /* set an arbitrary limit to prevent arithmetic overflow */
 171        if (size > MAX_DIRECTIO_SIZE) {
 172                *pages = NULL;
 173                return -EFBIG;
 174        }
 175
 176        *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 177        *max_pages -= user_addr >> PAGE_SHIFT;
 178
 179        *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
 180        if (*pages) {
 181                result = get_user_pages_fast(user_addr, *max_pages,
 182                                             (rw == READ), *pages);
 183                if (unlikely(result <= 0))
 184                        kvfree(*pages);
 185        }
 186
 187        return result;
 188}
 189
 190/*  ll_free_user_pages - tear down page struct array
 191 *  @pages: array of page struct pointers underlying target buffer
 192 */
 193static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
 194{
 195        int i;
 196
 197        for (i = 0; i < npages; i++) {
 198                if (do_dirty)
 199                        set_page_dirty_lock(pages[i]);
 200                put_page(pages[i]);
 201        }
 202        kvfree(pages);
 203}
 204
 205ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
 206                           int rw, struct inode *inode,
 207                           struct ll_dio_pages *pv)
 208{
 209        struct cl_page    *clp;
 210        struct cl_2queue  *queue;
 211        struct cl_object  *obj = io->ci_obj;
 212        int i;
 213        ssize_t rc = 0;
 214        loff_t file_offset  = pv->ldp_start_offset;
 215        size_t size = pv->ldp_size;
 216        int page_count      = pv->ldp_nr;
 217        struct page **pages = pv->ldp_pages;
 218        size_t page_size = cl_page_size(obj);
 219        bool do_io;
 220        int  io_pages       = 0;
 221
 222        queue = &io->ci_queue;
 223        cl_2queue_init(queue);
 224        for (i = 0; i < page_count; i++) {
 225                if (pv->ldp_offsets)
 226                        file_offset = pv->ldp_offsets[i];
 227
 228                LASSERT(!(file_offset & (page_size - 1)));
 229                clp = cl_page_find(env, obj, cl_index(obj, file_offset),
 230                                   pv->ldp_pages[i], CPT_TRANSIENT);
 231                if (IS_ERR(clp)) {
 232                        rc = PTR_ERR(clp);
 233                        break;
 234                }
 235
 236                rc = cl_page_own(env, io, clp);
 237                if (rc) {
 238                        LASSERT(clp->cp_state == CPS_FREEING);
 239                        cl_page_put(env, clp);
 240                        break;
 241                }
 242
 243                do_io = true;
 244
 245                /* check the page type: if the page is a host page, then do
 246                 * write directly
 247                 */
 248                if (clp->cp_type == CPT_CACHEABLE) {
 249                        struct page *vmpage = cl_page_vmpage(clp);
 250                        struct page *src_page;
 251                        struct page *dst_page;
 252                        void       *src;
 253                        void       *dst;
 254
 255                        src_page = (rw == WRITE) ? pages[i] : vmpage;
 256                        dst_page = (rw == WRITE) ? vmpage : pages[i];
 257
 258                        src = kmap_atomic(src_page);
 259                        dst = kmap_atomic(dst_page);
 260                        memcpy(dst, src, min(page_size, size));
 261                        kunmap_atomic(dst);
 262                        kunmap_atomic(src);
 263
 264                        /* make sure page will be added to the transfer by
 265                         * cl_io_submit()->...->vvp_page_prep_write().
 266                         */
 267                        if (rw == WRITE)
 268                                set_page_dirty(vmpage);
 269
 270                        if (rw == READ) {
 271                                /* do not issue the page for read, since it
 272                                 * may reread a ra page which has NOT uptodate
 273                                 * bit set.
 274                                 */
 275                                cl_page_disown(env, io, clp);
 276                                do_io = false;
 277                        }
 278                }
 279
 280                if (likely(do_io)) {
 281                        /*
 282                         * Add a page to the incoming page list of 2-queue.
 283                         */
 284                        cl_page_list_add(&queue->c2_qin, clp);
 285
 286                        /*
 287                         * Set page clip to tell transfer formation engine
 288                         * that page has to be sent even if it is beyond KMS.
 289                         */
 290                        cl_page_clip(env, clp, 0, min(size, page_size));
 291
 292                        ++io_pages;
 293                }
 294
 295                /* drop the reference count for cl_page_find */
 296                cl_page_put(env, clp);
 297                size -= page_size;
 298                file_offset += page_size;
 299        }
 300
 301        if (rc == 0 && io_pages) {
 302                rc = cl_io_submit_sync(env, io,
 303                                       rw == READ ? CRT_READ : CRT_WRITE,
 304                                       queue, 0);
 305        }
 306        if (rc == 0)
 307                rc = pv->ldp_size;
 308
 309        cl_2queue_discard(env, io, queue);
 310        cl_2queue_disown(env, io, queue);
 311        cl_2queue_fini(env, queue);
 312        return rc;
 313}
 314EXPORT_SYMBOL(ll_direct_rw_pages);
 315
 316static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
 317                                   int rw, struct inode *inode,
 318                                   struct address_space *mapping,
 319                                   size_t size, loff_t file_offset,
 320                                   struct page **pages, int page_count)
 321{
 322        struct ll_dio_pages pvec = {
 323                .ldp_pages      = pages,
 324                .ldp_nr         = page_count,
 325                .ldp_size       = size,
 326                .ldp_offsets    = NULL,
 327                .ldp_start_offset = file_offset
 328        };
 329
 330        return ll_direct_rw_pages(env, io, rw, inode, &pvec);
 331}
 332
 333/* This is the maximum size of a single O_DIRECT request, based on the
 334 * kmalloc limit.  We need to fit all of the brw_page structs, each one
 335 * representing PAGE_SIZE worth of user data, into a single buffer, and
 336 * then truncate this to be a full-sized RPC.  For 4kB PAGE_SIZE this is
 337 * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
 338 */
 339#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) *       \
 340                       PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
 341static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
 342{
 343        struct lu_env *env;
 344        struct cl_io *io;
 345        struct file *file = iocb->ki_filp;
 346        struct inode *inode = file->f_mapping->host;
 347        loff_t file_offset = iocb->ki_pos;
 348        ssize_t count = iov_iter_count(iter);
 349        ssize_t tot_bytes = 0, result = 0;
 350        struct ll_inode_info *lli = ll_i2info(inode);
 351        long size = MAX_DIO_SIZE;
 352        int refcheck;
 353
 354        if (!lli->lli_has_smd)
 355                return -EBADF;
 356
 357        /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
 358        if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
 359                return -EINVAL;
 360
 361        CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
 362               PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
 363               file_offset, file_offset, count >> PAGE_SHIFT,
 364               MAX_DIO_SIZE >> PAGE_SHIFT);
 365
 366        /* Check that all user buffers are aligned as well */
 367        if (iov_iter_alignment(iter) & ~PAGE_MASK)
 368                return -EINVAL;
 369
 370        env = cl_env_get(&refcheck);
 371        LASSERT(!IS_ERR(env));
 372        io = vvp_env_io(env)->vui_cl.cis_io;
 373        LASSERT(io);
 374
 375        while (iov_iter_count(iter)) {
 376                struct page **pages;
 377                size_t offs;
 378
 379                count = min_t(size_t, iov_iter_count(iter), size);
 380                if (iov_iter_rw(iter) == READ) {
 381                        if (file_offset >= i_size_read(inode))
 382                                break;
 383                        if (file_offset + count > i_size_read(inode))
 384                                count = i_size_read(inode) - file_offset;
 385                }
 386
 387                result = iov_iter_get_pages_alloc(iter, &pages, count, &offs);
 388                if (likely(result > 0)) {
 389                        int n = DIV_ROUND_UP(result + offs, PAGE_SIZE);
 390
 391                        result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter),
 392                                                     inode, file->f_mapping,
 393                                                     result, file_offset, pages,
 394                                                     n);
 395                        ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ);
 396                }
 397                if (unlikely(result <= 0)) {
 398                        /* If we can't allocate a large enough buffer
 399                         * for the request, shrink it to a smaller
 400                         * PAGE_SIZE multiple and try again.
 401                         * We should always be able to kmalloc for a
 402                         * page worth of page pointers = 4MB on i386.
 403                         */
 404                        if (result == -ENOMEM &&
 405                            size > (PAGE_SIZE / sizeof(*pages)) *
 406                            PAGE_SIZE) {
 407                                size = ((((size / 2) - 1) |
 408                                         ~PAGE_MASK) + 1) &
 409                                        PAGE_MASK;
 410                                CDEBUG(D_VFSTRACE, "DIO size now %lu\n",
 411                                       size);
 412                                continue;
 413                        }
 414
 415                        goto out;
 416                }
 417                iov_iter_advance(iter, result);
 418                tot_bytes += result;
 419                file_offset += result;
 420        }
 421out:
 422        if (tot_bytes > 0) {
 423                struct vvp_io *vio = vvp_env_io(env);
 424
 425                /* no commit async for direct IO */
 426                vio->u.write.vui_written += tot_bytes;
 427        }
 428
 429        cl_env_put(env, &refcheck);
 430        return tot_bytes ? tot_bytes : result;
 431}
 432
 433/**
 434 * Prepare partially written-to page for a write.
 435 */
 436static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
 437                                   struct cl_page *pg)
 438{
 439        struct cl_attr *attr   = vvp_env_thread_attr(env);
 440        struct cl_object *obj  = io->ci_obj;
 441        struct vvp_page *vpg   = cl_object_page_slice(obj, pg);
 442        loff_t          offset = cl_offset(obj, vvp_index(vpg));
 443        int             result;
 444
 445        cl_object_attr_lock(obj);
 446        result = cl_object_attr_get(env, obj, attr);
 447        cl_object_attr_unlock(obj);
 448        if (result == 0) {
 449                /*
 450                 * If are writing to a new page, no need to read old data.
 451                 * The extent locking will have updated the KMS, and for our
 452                 * purposes here we can treat it like i_size.
 453                 */
 454                if (attr->cat_kms <= offset) {
 455                        char *kaddr = kmap_atomic(vpg->vpg_page);
 456
 457                        memset(kaddr, 0, cl_page_size(obj));
 458                        kunmap_atomic(kaddr);
 459                } else if (vpg->vpg_defer_uptodate) {
 460                        vpg->vpg_ra_used = 1;
 461                } else {
 462                        result = ll_page_sync_io(env, io, pg, CRT_READ);
 463                }
 464        }
 465        return result;
 466}
 467
 468static int ll_write_begin(struct file *file, struct address_space *mapping,
 469                          loff_t pos, unsigned len, unsigned flags,
 470                          struct page **pagep, void **fsdata)
 471{
 472        struct ll_cl_context *lcc;
 473        const struct lu_env  *env;
 474        struct cl_io   *io;
 475        struct cl_page *page;
 476        struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
 477        pgoff_t index = pos >> PAGE_SHIFT;
 478        struct page *vmpage = NULL;
 479        unsigned int from = pos & (PAGE_SIZE - 1);
 480        unsigned int to = from + len;
 481        int result = 0;
 482
 483        CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
 484
 485        lcc = ll_cl_find(file);
 486        if (!lcc) {
 487                result = -EIO;
 488                goto out;
 489        }
 490
 491        env = lcc->lcc_env;
 492        io  = lcc->lcc_io;
 493
 494        /* To avoid deadlock, try to lock page first. */
 495        vmpage = grab_cache_page_nowait(mapping, index);
 496        if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
 497                struct vvp_io *vio = vvp_env_io(env);
 498                struct cl_page_list *plist = &vio->u.write.vui_queue;
 499
 500                /* if the page is already in dirty cache, we have to commit
 501                 * the pages right now; otherwise, it may cause deadlock
 502                 * because it holds page lock of a dirty page and request for
 503                 * more grants. It's okay for the dirty page to be the first
 504                 * one in commit page list, though.
 505                 */
 506                if (vmpage && plist->pl_nr > 0) {
 507                        unlock_page(vmpage);
 508                        put_page(vmpage);
 509                        vmpage = NULL;
 510                }
 511
 512                /* commit pages and then wait for page lock */
 513                result = vvp_io_write_commit(env, io);
 514                if (result < 0)
 515                        goto out;
 516
 517                if (!vmpage) {
 518                        vmpage = grab_cache_page_write_begin(mapping, index,
 519                                                             flags);
 520                        if (!vmpage) {
 521                                result = -ENOMEM;
 522                                goto out;
 523                        }
 524                }
 525        }
 526
 527        page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
 528        if (IS_ERR(page)) {
 529                result = PTR_ERR(page);
 530                goto out;
 531        }
 532
 533        lcc->lcc_page = page;
 534        lu_ref_add(&page->cp_reference, "cl_io", io);
 535
 536        cl_page_assume(env, io, page);
 537        if (!PageUptodate(vmpage)) {
 538                /*
 539                 * We're completely overwriting an existing page,
 540                 * so _don't_ set it up to date until commit_write
 541                 */
 542                if (from == 0 && to == PAGE_SIZE) {
 543                        CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
 544                        POISON_PAGE(vmpage, 0x11);
 545                } else {
 546                        /* TODO: can be optimized at OSC layer to check if it
 547                         * is a lockless IO. In that case, it's not necessary
 548                         * to read the data.
 549                         */
 550                        result = ll_prepare_partial_page(env, io, page);
 551                        if (result == 0)
 552                                SetPageUptodate(vmpage);
 553                }
 554        }
 555        if (result < 0)
 556                cl_page_unassume(env, io, page);
 557out:
 558        if (result < 0) {
 559                if (vmpage) {
 560                        unlock_page(vmpage);
 561                        put_page(vmpage);
 562                }
 563        } else {
 564                *pagep = vmpage;
 565                *fsdata = lcc;
 566        }
 567        return result;
 568}
 569
 570static int ll_write_end(struct file *file, struct address_space *mapping,
 571                        loff_t pos, unsigned len, unsigned copied,
 572                        struct page *vmpage, void *fsdata)
 573{
 574        struct ll_cl_context *lcc = fsdata;
 575        const struct lu_env *env;
 576        struct cl_io *io;
 577        struct vvp_io *vio;
 578        struct cl_page *page;
 579        unsigned from = pos & (PAGE_SIZE - 1);
 580        bool unplug = false;
 581        int result = 0;
 582
 583        put_page(vmpage);
 584
 585        env  = lcc->lcc_env;
 586        page = lcc->lcc_page;
 587        io   = lcc->lcc_io;
 588        vio  = vvp_env_io(env);
 589
 590        LASSERT(cl_page_is_owned(page, io));
 591        if (copied > 0) {
 592                struct cl_page_list *plist = &vio->u.write.vui_queue;
 593
 594                lcc->lcc_page = NULL; /* page will be queued */
 595
 596                /* Add it into write queue */
 597                cl_page_list_add(plist, page);
 598                if (plist->pl_nr == 1) /* first page */
 599                        vio->u.write.vui_from = from;
 600                else
 601                        LASSERT(from == 0);
 602                vio->u.write.vui_to = from + copied;
 603
 604                /*
 605                 * To address the deadlock in balance_dirty_pages() where
 606                 * this dirty page may be written back in the same thread.
 607                 */
 608                if (PageDirty(vmpage))
 609                        unplug = true;
 610
 611                /* We may have one full RPC, commit it soon */
 612                if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
 613                        unplug = true;
 614
 615                CL_PAGE_DEBUG(D_VFSTRACE, env, page,
 616                              "queued page: %d.\n", plist->pl_nr);
 617        } else {
 618                cl_page_disown(env, io, page);
 619
 620                lcc->lcc_page = NULL;
 621                lu_ref_del(&page->cp_reference, "cl_io", io);
 622                cl_page_put(env, page);
 623
 624                /* page list is not contiguous now, commit it now */
 625                unplug = true;
 626        }
 627
 628        if (unplug ||
 629            file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
 630                result = vvp_io_write_commit(env, io);
 631
 632        return result >= 0 ? copied : result;
 633}
 634
 635#ifdef CONFIG_MIGRATION
 636static int ll_migratepage(struct address_space *mapping,
 637                          struct page *newpage, struct page *page,
 638                          enum migrate_mode mode
 639                )
 640{
 641        /* Always fail page migration until we have a proper implementation */
 642        return -EIO;
 643}
 644#endif
 645
 646const struct address_space_operations ll_aops = {
 647        .readpage       = ll_readpage,
 648        .direct_IO      = ll_direct_IO_26,
 649        .writepage      = ll_writepage,
 650        .writepages     = ll_writepages,
 651        .set_page_dirty = __set_page_dirty_nobuffers,
 652        .write_begin    = ll_write_begin,
 653        .write_end      = ll_write_end,
 654        .invalidatepage = ll_invalidatepage,
 655        .releasepage    = (void *)ll_releasepage,
 656#ifdef CONFIG_MIGRATION
 657        .migratepage    = ll_migratepage,
 658#endif
 659};
 660